hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
37e4f2c4b90817314cd77bae4c4800a1c5a1cfd8 | 11,933 | py | Python | alerter/src/monitorables/nodes/chainlink_node.py | SimplyVC/panic | 2f5c327ea0d14b6a49dc8f4599a255048bc2ff6d | [
"Apache-2.0"
] | 41 | 2019-08-23T12:40:42.000Z | 2022-03-28T11:06:02.000Z | alerter/src/monitorables/nodes/chainlink_node.py | SimplyVC/panic | 2f5c327ea0d14b6a49dc8f4599a255048bc2ff6d | [
"Apache-2.0"
] | 147 | 2019-08-30T22:09:48.000Z | 2022-03-30T08:46:26.000Z | alerter/src/monitorables/nodes/chainlink_node.py | SimplyVC/panic | 2f5c327ea0d14b6a49dc8f4599a255048bc2ff6d | [
"Apache-2.0"
] | 3 | 2019-09-03T21:12:28.000Z | 2021-08-18T14:27:56.000Z | from datetime import datetime
from typing import Optional, Dict, List, Union
from schema import Schema, Or
from src.monitorables.nodes.node import Node
from src.utils.exceptions import InvalidDictSchemaException
def get_int_metric_attributes(self) -> List[str]:
"""
:return: A list of all variable names representing int metrics.
"""
int_prometheus_metric_attributes = \
self.get_int_prometheus_metric_attributes()
return [*int_prometheus_metric_attributes]
def get_float_metric_attributes(self) -> List[str]:
"""
:return: A list of all variable names representing float metrics.
"""
float_prometheus_metric_attributes = \
self.get_float_prometheus_metric_attributes()
return [*float_prometheus_metric_attributes]
def get_dict_metric_attributes(self) -> List[str]:
"""
:return: A list of all variable names representing dict metrics.
"""
dict_prometheus_metric_attributes = \
self.get_dict_prometheus_metric_attributes()
return [*dict_prometheus_metric_attributes]
def get_str_metric_attributes(self) -> List[str]:
"""
:return: A list of all variable names representing str metrics.
"""
str_prometheus_metric_attributes = \
self.get_str_prometheus_metric_attributes()
return [*str_prometheus_metric_attributes]
def get_all_metric_attributes(self) -> List[str]:
"""
:return: A list of all variable names representing metrics
"""
prometheus_metric_attributes = \
self.get_all_prometheus_metric_attributes()
return [*prometheus_metric_attributes]
def set_prometheus_as_down(self, downtime: Optional[float]) -> None:
"""
This function sets the node's prometheus interface as down. It sets the
time that the interface was initially down to the parameter 'downtime'
if it is not None, otherwise it sets it to the current timestamp.
:param downtime:
:return:
"""
if downtime is None:
self.set_went_down_at_prometheus(datetime.now().timestamp())
else:
self.set_went_down_at_prometheus(downtime)
def set_prometheus_as_up(self) -> None:
"""
This function sets a node's prometheus interface as up. A node's
interface is said to be up if went_down_at_prometheus is None.
:return: None
"""
self.set_went_down_at_prometheus(None)
def set_current_gas_price_info(self, new_percentile: Optional[float],
new_price: Optional[float]) -> None:
"""
This method sets the current_gas_price_info dict based on the new
percentile and price. This is done in this way to protect the Dict
schema.
:param new_percentile: The new percentile to be stored
:param new_price: The new gas to be stored
:return: None
"""
self._current_gas_price_info['percentile'] = new_percentile
self._current_gas_price_info['price'] = new_price
def reset(self) -> None:
"""
This method resets all metrics to their initial state
:return: None
"""
self.set_went_down_at_prometheus(None)
self.set_current_height(None)
self.set_total_block_headers_received(None)
self.set_max_pending_tx_delay(None)
self.set_process_start_time_seconds(None)
self.set_total_gas_bumps(None)
self.set_total_gas_bumps_exceeds_limit(None)
self.set_no_of_unconfirmed_txs(None)
self.set_total_errored_job_runs(None)
self.set_current_gas_price_info(None, None)
self.set_eth_balance_info({})
self.set_last_prometheus_source_used(None)
self.set_last_monitored_prometheus(None)
| 37.407524 | 80 | 0.67636 |
37e57878ec351c326eab8dff88096e5a9b705681 | 8,983 | py | Python | experiments/vgg16/VGG16_utils.py | petrapoklukar/DCA | e5b3f3481433306a4b33e712272f8bbf5e9d05ce | [
"MIT"
] | 2 | 2022-02-14T15:54:22.000Z | 2022-02-15T18:43:36.000Z | experiments/vgg16/VGG16_utils.py | petrapoklukar/DCA | e5b3f3481433306a4b33e712272f8bbf5e9d05ce | [
"MIT"
] | null | null | null | experiments/vgg16/VGG16_utils.py | petrapoklukar/DCA | e5b3f3481433306a4b33e712272f8bbf5e9d05ce | [
"MIT"
] | null | null | null | import pickle
import numpy as np
import os
def _analyze_query_point_assignment(
query_data_dict: dict,
init_Rdata_dict: dict,
init_Edata_dict: dict,
num_R: int,
query_point_assignment_array: np.ndarray,
root: str,
n_points_to_copy=50,
):
"""
Analyzes and visualizes qDCA results.
:param query_data_dict: raw query data.
:param init_Rdata_dict: raw R data.
:param init_Edata_dict: raw E data.
:param num_R: total number of R points.
:param query_point_assignment_array: query point assignments results.
:param root: root directory of the experiment.
:param n_points_to_copy: number of images to save.
:return: accuracy of qDCA assignments; list of (R, query) points with same label;
list of (R, query) points with different label
"""
true_query_data_labels = query_data_dict["labels"]
assigned_R = query_point_assignment_array[
query_point_assignment_array[:, 1] < num_R, 1
]
assigned_E = query_point_assignment_array[
query_point_assignment_array[:, 1] >= num_R, 1
]
assigned_R_labels = init_Rdata_dict["labels"][assigned_R]
assigned_E_labels = init_Edata_dict["labels"][assigned_E - num_R]
assigned_query_data_labels = np.empty(
shape=query_point_assignment_array.shape[0]
).astype(np.int32)
assigned_query_data_labels[
query_point_assignment_array[:, 1] < num_R
] = assigned_R_labels
assigned_query_data_labels[
query_point_assignment_array[:, 1] >= num_R
] = assigned_E_labels
accuracy = (
true_query_data_labels == assigned_query_data_labels
).sum() / assigned_query_data_labels.shape[0]
same_label_idx = np.where(true_query_data_labels == assigned_query_data_labels)[0]
wrong_label_idx = np.where(true_query_data_labels != assigned_query_data_labels)[0]
correct_pairs = []
for i in query_point_assignment_array[same_label_idx]:
query_idx, init_idx = i
if init_idx < num_R:
correct_pairs.append(
[
query_data_dict["paths"].astype(object)[query_idx],
init_Rdata_dict["paths"].astype(object)[init_idx],
query_data_dict["labels"][query_idx],
init_Rdata_dict["labels"][init_idx],
]
)
else:
correct_pairs.append(
[
query_data_dict["paths"].astype(object)[query_idx],
init_Edata_dict["paths"].astype(object)[init_idx - num_R],
query_data_dict["labels"][query_idx],
init_Edata_dict["labels"][init_idx - num_R],
]
)
wrong_pairs = []
for i in query_point_assignment_array[wrong_label_idx]:
query_idx, init_idx = i
if init_idx < num_R:
wrong_pairs.append(
[
query_data_dict["paths"].astype(object)[query_idx],
init_Rdata_dict["paths"].astype(object)[init_idx],
query_data_dict["labels"][query_idx],
init_Rdata_dict["labels"][init_idx],
]
)
else:
wrong_pairs.append(
[
query_data_dict["paths"].astype(object)[query_idx],
init_Edata_dict["paths"].astype(object)[init_idx - num_R],
query_data_dict["labels"][query_idx],
init_Edata_dict["labels"][init_idx - num_R],
]
)
with open(
os.path.join(root, "logs", "analyzed_query_point_assignments.pkl"), "wb"
) as f:
pickle.dump(
{
"accuracy": accuracy,
"same_label_idx": same_label_idx,
"wrong_label_idx": wrong_label_idx,
"correct_pairs": correct_pairs,
"wrong_pairs": wrong_pairs,
"query_point_assignment_array": query_point_assignment_array,
},
f,
)
same_label_image_path = os.path.join(root, "visualization", "same_label_images")
wrong_label_image_path = os.path.join(root, "visualization", "wrong_label_images")
if not os.path.exists(wrong_label_image_path):
os.mkdir(wrong_label_image_path)
if not os.path.exists(same_label_image_path):
os.mkdir(same_label_image_path)
for i in range(n_points_to_copy):
query_image_path, init_image_path, query_label, init_label = correct_pairs[i]
path_to_copy = os.path.join(
same_label_image_path,
"i{0}_init_image_querylabel{1}_initlabel{2}.png".format(
str(i), str(query_label), str(init_label)
),
)
os.system("cp {0} {1}".format(init_image_path, path_to_copy))
path_to_copy2 = os.path.join(
same_label_image_path,
"i{0}_query_image_querylabel{1}_initlabel{2}.png".format(
str(i), str(query_label), str(init_label)
),
)
os.system("cp {0} {1}".format(query_image_path, path_to_copy2))
(
w_query_image_path,
w_init_image_path,
w_query_label,
w_init_label,
) = wrong_pairs[i]
path_to_copy_w = os.path.join(
wrong_label_image_path,
"i{0}_init_image_querylabel{1}_initlabel{2}.png".format(
str(i), str(w_query_label), str(w_init_label)
),
)
os.system("cp {0} {1}".format(w_init_image_path, path_to_copy_w))
path_to_copy_w2 = os.path.join(
wrong_label_image_path,
"i{0}_query_image_querylabel{1}_initlabel{2}.png".format(
i, w_query_label, w_init_label
),
)
os.system("cp {0} {1}".format(w_query_image_path, path_to_copy_w2))
return accuracy, correct_pairs, wrong_pairs
def _generate_query_sets(version: str, N: int = 5000):
"""
Generates query sets for qDCA experiment in Section 4.3.
:param version: either version1 (dogs vs kitchen utils) or version2 (random).
:param N: number of points to sample for R used in DCA.
"""
with open(f"representations/vgg16/{version}/Rfeatures.pkl", "rb") as f:
Rdata_v1 = pickle.load(f)
with open(f"representations/vgg16/{version}/Efeatures.pkl", "rb") as f:
Edata_v1 = pickle.load(f)
init_Ridxs = np.random.choice(
np.arange(len(Rdata_v1["feat_lin1"])), size=N, replace=False
)
query_Ridxs = np.setdiff1d(np.arange(len(Rdata_v1["feat_lin1"])), init_Ridxs)
init_Eidxs = np.random.choice(
np.arange(len(Edata_v1["feat_lin1"])), size=N, replace=False
)
query_Eidxs = np.setdiff1d(np.arange(len(Edata_v1["feat_lin1"])), init_Eidxs)
with open(f"representations/vgg16/{version}/sampled_Rfeatures.pkl", "wb") as f:
pickle.dump(
{
"feat_lin1": Rdata_v1["feat_lin1"][init_Ridxs],
"feat_lin2": Rdata_v1["feat_lin2"][init_Ridxs],
"labels": Rdata_v1["labels"][init_Ridxs],
"paths": np.array(Rdata_v1["paths"])[init_Ridxs],
"init_Ridx": init_Ridxs,
"query_Ridx": query_Ridxs,
},
f,
)
with open(f"representations/vgg16/{version}/sampled_Efeatures.pkl", "wb") as f:
pickle.dump(
{
"feat_lin1": Edata_v1["feat_lin1"][init_Eidxs],
"feat_lin2": Edata_v1["feat_lin2"][init_Eidxs],
"labels": Edata_v1["labels"][init_Eidxs],
"paths": np.array(Edata_v1["paths"])[init_Eidxs],
"init_Eidx": init_Eidxs,
"query_Eidx": query_Eidxs,
},
f,
)
with open(f"representations/vgg16/{version}/query_features.pkl", "wb") as f:
pickle.dump(
{
"feat_lin1": np.concatenate(
[
Rdata_v1["feat_lin1"][query_Ridxs],
Edata_v1["feat_lin1"][query_Eidxs],
]
),
"feat_lin2": np.concatenate(
[
Rdata_v1["feat_lin2"][query_Ridxs],
Edata_v1["feat_lin2"][query_Eidxs],
]
),
"labels": np.concatenate(
[Rdata_v1["labels"][query_Ridxs], Edata_v1["labels"][query_Eidxs]]
),
"paths": np.concatenate(
[
np.array(Rdata_v1["paths"])[query_Ridxs],
np.array(Edata_v1["paths"])[query_Eidxs],
]
),
"init_Eidxs": init_Eidxs,
"query_Eidxs": query_Eidxs,
"init_Ridxs": init_Ridxs,
"query_Ridxs": query_Ridxs,
},
f,
)
| 36.815574 | 87 | 0.571969 |
37e640e884ea7efdcb34d9809f129977c3b8f796 | 2,905 | py | Python | back-end/RawFishSheep/app_cart/views.py | Coldarra/RawFishSheep | 266bd9d8d9832d5c692b63e7515d45fdc4f6acc4 | [
"Apache-2.0"
] | null | null | null | back-end/RawFishSheep/app_cart/views.py | Coldarra/RawFishSheep | 266bd9d8d9832d5c692b63e7515d45fdc4f6acc4 | [
"Apache-2.0"
] | 4 | 2021-10-06T22:49:52.000Z | 2022-02-27T12:28:18.000Z | back-end/RawFishSheep/app_cart/views.py | Coldarra/RawFishSheep | 266bd9d8d9832d5c692b63e7515d45fdc4f6acc4 | [
"Apache-2.0"
] | null | null | null | from .models import *
from decorator import *
from app_goods.views import getGoodsByID
#
| 29.05 | 78 | 0.685714 |
37e6a1c12c2e7ca4fa6cc0bc35bd20189bfd7063 | 7,704 | py | Python | extensions/catsum.py | johannesgiorgis/my-timewarrior-extensions | 1a8b83359298d3cbf002148f02b5ef6f1693a797 | [
"MIT"
] | null | null | null | extensions/catsum.py | johannesgiorgis/my-timewarrior-extensions | 1a8b83359298d3cbf002148f02b5ef6f1693a797 | [
"MIT"
] | 1 | 2022-02-14T16:53:54.000Z | 2022-02-14T16:53:54.000Z | extensions/catsum.py | xoiopure/my-timewarrior-extensions | 1a8b83359298d3cbf002148f02b5ef6f1693a797 | [
"MIT"
] | 1 | 2021-08-29T00:32:18.000Z | 2021-08-29T00:32:18.000Z | #!/usr/bin/env python3
###############################################################################
#
# Category Summaries
#
#
###############################################################################
import datetime
import io
import json
import logging
import pprint
import sys
from typing import Dict, Any
from dateutil import tz
# set logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
# create handler
c_handler = logging.StreamHandler()
c_handler.setLevel(logging.INFO)
# Create formatters and add it to handlers
LOG_FORMAT = "[%(asctime)s - %(levelname)-8s - %(module)s:%(name)s ] %(message)s"
c_format = logging.Formatter(LOG_FORMAT)
c_handler.setFormatter(c_format)
# Add handlers to the logger
logger.addHandler(c_handler)
DATE_FORMAT = "%Y%m%dT%H%M%SZ"
# TODO: Convert to defaultdict
# https://www.accelebrate.com/blog/using-defaultdict-python
# https://stackoverflow.com/questions/9358983/dictionaries-and-default-values
# https://docs.python.org/2/library/collections.html#collections.defaultdict
CATEGORIES: dict = {
"PT": "Personal Time",
"PW": "Planned Work",
"UW": "Unplanned Work",
"OW": "Other Work",
}
def format_seconds(seconds: int) -> str:
"""
Convert seconds to a formatted string
Convert seconds: 3661
To formatted: " 1:01:01"
"""
# print(seconds, type(seconds))
hours = seconds // 3600
minutes = seconds % 3600 // 60
seconds = seconds % 60
return f"{hours:4d}:{minutes:02d}:{seconds:02d}"
def print_dotted_line(width: int = 72):
"""Print a dotted (rather 'dashed') line"""
print("-" * width)
if __name__ == "__main__":
main()
| 31.57377 | 98 | 0.641874 |
37e90c8995ed6a6f4dbc2bb7d6d0c967a69b04ab | 3,881 | py | Python | resources/hotel.py | jnascimentocode/REST-API-COM-PYTHON-E-FLASK | c55dca53f3a864c6c1aba8bbde63dcadc3c19347 | [
"MIT"
] | null | null | null | resources/hotel.py | jnascimentocode/REST-API-COM-PYTHON-E-FLASK | c55dca53f3a864c6c1aba8bbde63dcadc3c19347 | [
"MIT"
] | null | null | null | resources/hotel.py | jnascimentocode/REST-API-COM-PYTHON-E-FLASK | c55dca53f3a864c6c1aba8bbde63dcadc3c19347 | [
"MIT"
] | null | null | null | from typing import ParamSpecArgs
from flask_restful import Resource, reqparse
from models.hotel import HotelModel
from flask_jwt_extended import jwt_required
from models.site import SiteModel
from resources.filtros import *
import sqlite3
path_params = reqparse.RequestParser()
path_params.add_argument('cidade', type=str)
path_params.add_argument('estrelas_min', type=float)
path_params.add_argument('estrelas_max', type=float)
path_params.add_argument('diaria_min', type=float)
path_params.add_argument('diaria_max', type=float)
path_params.add_argument('limit', type=float)
path_params.add_argument('offset', type=float)
| 34.345133 | 116 | 0.631538 |
37e97b75428a1033eda5441303e4da93aa132446 | 221 | py | Python | src/wormhole/__main__.py | dmgolembiowski/magic-wormhole | d517a10282d5e56f300db462b1a6eec517202af7 | [
"MIT"
] | 2,801 | 2021-01-10T16:37:14.000Z | 2022-03-31T19:02:50.000Z | src/wormhole/__main__.py | dmgolembiowski/magic-wormhole | d517a10282d5e56f300db462b1a6eec517202af7 | [
"MIT"
] | 52 | 2021-01-10T01:54:00.000Z | 2022-03-11T13:12:41.000Z | src/wormhole/__main__.py | dmgolembiowski/magic-wormhole | d517a10282d5e56f300db462b1a6eec517202af7 | [
"MIT"
] | 106 | 2021-01-21T14:32:22.000Z | 2022-03-18T10:33:09.000Z | from __future__ import absolute_import, print_function, unicode_literals
if __name__ == "__main__":
from .cli import cli
cli.wormhole()
else:
# raise ImportError('this module should not be imported')
pass
| 27.625 | 72 | 0.737557 |
37eaf107409d84d5c2fde68eaa08ffa5c4d85c18 | 2,413 | py | Python | testing/berge_equilibrium_cndp.py | Eliezer-Beczi/CNDP | 73decdfaef1c9e546ad94dd7448c89078af27034 | [
"MIT"
] | 1 | 2021-08-13T09:14:40.000Z | 2021-08-13T09:14:40.000Z | testing/berge_equilibrium_cndp.py | Eliezer-Beczi/CNDP | 73decdfaef1c9e546ad94dd7448c89078af27034 | [
"MIT"
] | null | null | null | testing/berge_equilibrium_cndp.py | Eliezer-Beczi/CNDP | 73decdfaef1c9e546ad94dd7448c89078af27034 | [
"MIT"
] | null | null | null | import networkx as nx
import utils.connectivity_metrics as connectivity_metric
from platypus import NSGAII, EpsMOEA, NSGAIII, EpsNSGAII, Problem, Dominance, Subset, TournamentSelector, \
HypervolumeFitnessEvaluator, Archive
import statistics
import multiprocessing as mp
G = nx.read_adjlist("input/Ventresca/BarabasiAlbert_n500m1.txt")
k = 50
num_of_tests = 10
def get_critical_nodes():
algorithm = NSGAII(CNDP(), selector=TournamentSelector(dominance=BergeDominance()), archive=BergeArchive())
algorithm.run(1000)
fitness = algorithm.result[0].objectives[0]
print(fitness)
return fitness
if __name__ == '__main__':
pool = mp.Pool(mp.cpu_count())
samples = pool.starmap_async(get_critical_nodes, [() for _ in range(num_of_tests)]).get()
pool.close()
avg = sum(samples) / len(samples)
stdev = statistics.stdev(samples)
print(f"Average: {avg}")
print(f"Standard Deviation: {stdev}")
| 25.135417 | 111 | 0.63075 |
37eb1b0657a210c55097430b315fbcd465c4cdfe | 814 | py | Python | policykit/django_db_logger/migrations/0002_initial.py | mashton/policyk | 623523d76d63c06b6d559ad7b477d80512fbd2e7 | [
"MIT"
] | 78 | 2020-05-08T17:25:38.000Z | 2022-01-13T05:44:50.000Z | policykit/django_db_logger/migrations/0002_initial.py | mashton/policyk | 623523d76d63c06b6d559ad7b477d80512fbd2e7 | [
"MIT"
] | 302 | 2020-02-20T07:04:30.000Z | 2022-02-25T17:44:23.000Z | policykit/django_db_logger/migrations/0002_initial.py | mashton/policyk | 623523d76d63c06b6d559ad7b477d80512fbd2e7 | [
"MIT"
] | 13 | 2020-04-17T19:44:26.000Z | 2022-02-25T17:18:04.000Z | # Generated by Django 3.2.2 on 2021-09-02 15:10
from django.db import migrations, models
import django.db.models.deletion
| 29.071429 | 134 | 0.638821 |
37eb3791b2e71065562272b988c38a600939b27b | 3,087 | py | Python | Charm/models/risk_functions.py | TanyaAdams1/Charm | cc6dd64d01f8cb4cf0eb92dadefcb7575d75ec9d | [
"BSD-3-Clause"
] | 17 | 2018-04-23T20:17:58.000Z | 2021-04-12T19:28:40.000Z | Charm/models/risk_functions.py | TanyaAdams1/Charm | cc6dd64d01f8cb4cf0eb92dadefcb7575d75ec9d | [
"BSD-3-Clause"
] | 1 | 2020-02-01T23:57:28.000Z | 2020-02-04T18:03:17.000Z | Charm/models/risk_functions.py | TanyaAdams1/Charm | cc6dd64d01f8cb4cf0eb92dadefcb7575d75ec9d | [
"BSD-3-Clause"
] | 3 | 2018-04-19T19:24:38.000Z | 2020-11-06T00:33:53.000Z | import numpy as np
from mcerp import *
from uncertainties.core import AffineScalarFunc
| 29.4 | 74 | 0.551344 |
37eb8f04b291f998b42a8e819f9ce512c5fabcfb | 277 | py | Python | code/doubanUtils.py | verazuo/douban_crawler | 042e870c74df8b6f4eb1cd2af3b90d5b6699ab8f | [
"MIT"
] | 1 | 2021-04-03T12:41:29.000Z | 2021-04-03T12:41:29.000Z | code/doubanUtils.py | verazuo/douban_crawler | 042e870c74df8b6f4eb1cd2af3b90d5b6699ab8f | [
"MIT"
] | null | null | null | code/doubanUtils.py | verazuo/douban_crawler | 042e870c74df8b6f4eb1cd2af3b90d5b6699ab8f | [
"MIT"
] | null | null | null | import requests
import re
from bs4 import BeautifulSoup
| 27.7 | 54 | 0.722022 |
37eba4f34e7db6830d71aa2b98daa23f3f5f4d63 | 1,565 | py | Python | 491/491.py | kaixiang1992/python-flask | 2b4c597d83f5a6ed662d42d7ff692e563a9adbf8 | [
"MIT"
] | null | null | null | 491/491.py | kaixiang1992/python-flask | 2b4c597d83f5a6ed662d42d7ff692e563a9adbf8 | [
"MIT"
] | null | null | null | 491/491.py | kaixiang1992/python-flask | 2b4c597d83f5a6ed662d42d7ff692e563a9adbf8 | [
"MIT"
] | null | null | null | from sqlalchemy import create_engine, Column, Integer, String, ForeignKey
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, relationship
# TODO: db_uri
# dialect+driver://username:password@host:port/database?charset=utf8
DB_URI = 'mysql+pymysql://root:root123@127.0.0.1:3300/first_sqlalchemy?charset=utf8'
engine = create_engine(DB_URI)
Base = declarative_base(bind=engine)
session = sessionmaker(bind=engine)()
# TODO: User
# TODO: Article
# TODO:
# Base.metadata.drop_all()
# TODO:
# Base.metadata.create_all()
#
# user = User(name='zhiliao')
# article1 = Article(title='python')
# article2 = Article(title='flask')
#
# user.articles.append(article1)
# user.articles.append(article2)
# TODO:
# session.add(user)
# session.commit()
# TODO: 1.session.delete`nullable=False`
# TODO: 2.session.delete`nullable=False`
user = session.query(User).first()
print(user)
session.delete(user)
session.commit()
| 26.525424 | 84 | 0.729073 |
37ebc35183b9314a344aaf25eb9e7de4a348916a | 2,149 | py | Python | spacy/tests/tagger/test_lemmatizer.py | TerminalWitchcraft/spaCy | 29adbef095c04e21a691e912671e4ec21082b047 | [
"MIT"
] | 1 | 2018-09-24T17:00:23.000Z | 2018-09-24T17:00:23.000Z | spacy/tests/tagger/test_lemmatizer.py | TerminalWitchcraft/spaCy | 29adbef095c04e21a691e912671e4ec21082b047 | [
"MIT"
] | null | null | null | spacy/tests/tagger/test_lemmatizer.py | TerminalWitchcraft/spaCy | 29adbef095c04e21a691e912671e4ec21082b047 | [
"MIT"
] | null | null | null | # coding: utf-8
from __future__ import unicode_literals
from ...lemmatizer import read_index, read_exc
import pytest
| 30.7 | 74 | 0.604467 |
37ebf327c9046920009b45cecc899607501e8a37 | 2,530 | py | Python | sdk/communication/azure-communication-phonenumbers/azure/communication/phonenumbers/_generated/models/__init__.py | RAY-316/azure-sdk-for-python | 4f7790deaf46c6f4e965f099f36eb73a7954ad5b | [
"MIT"
] | null | null | null | sdk/communication/azure-communication-phonenumbers/azure/communication/phonenumbers/_generated/models/__init__.py | RAY-316/azure-sdk-for-python | 4f7790deaf46c6f4e965f099f36eb73a7954ad5b | [
"MIT"
] | null | null | null | sdk/communication/azure-communication-phonenumbers/azure/communication/phonenumbers/_generated/models/__init__.py | RAY-316/azure-sdk-for-python | 4f7790deaf46c6f4e965f099f36eb73a7954ad5b | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
try:
from ._models_py3 import AcquiredPhoneNumbers
from ._models_py3 import CommunicationError
from ._models_py3 import CommunicationErrorResponse
from ._models_py3 import PhoneNumberCapabilities
from ._models_py3 import PhoneNumberCapabilitiesRequest
from ._models_py3 import PhoneNumberCost
from ._models_py3 import PhoneNumberOperation
from ._models_py3 import PhoneNumberPurchaseRequest
from ._models_py3 import PhoneNumberSearchRequest
from ._models_py3 import PhoneNumberSearchResult
from ._models_py3 import PurchasedPhoneNumber
except (SyntaxError, ImportError):
from ._models import AcquiredPhoneNumbers # type: ignore
from ._models import CommunicationError # type: ignore
from ._models import CommunicationErrorResponse # type: ignore
from ._models import PhoneNumberCapabilities # type: ignore
from ._models import PhoneNumberCapabilitiesRequest # type: ignore
from ._models import PhoneNumberCost # type: ignore
from ._models import PhoneNumberOperation # type: ignore
from ._models import PhoneNumberPurchaseRequest # type: ignore
from ._models import PhoneNumberSearchRequest # type: ignore
from ._models import PhoneNumberSearchResult # type: ignore
from ._models import PurchasedPhoneNumber # type: ignore
from ._phone_numbers_client_enums import (
BillingFrequency,
PhoneNumberAssignmentType,
PhoneNumberCapabilityType,
PhoneNumberOperationStatus,
PhoneNumberOperationType,
PhoneNumberType,
)
__all__ = [
'AcquiredPhoneNumbers',
'CommunicationError',
'CommunicationErrorResponse',
'PhoneNumberCapabilities',
'PhoneNumberCapabilitiesRequest',
'PhoneNumberCost',
'PhoneNumberOperation',
'PhoneNumberPurchaseRequest',
'PhoneNumberSearchRequest',
'PhoneNumberSearchResult',
'PurchasedPhoneNumber',
'BillingFrequency',
'PhoneNumberAssignmentType',
'PhoneNumberCapabilityType',
'PhoneNumberOperationStatus',
'PhoneNumberOperationType',
'PhoneNumberType',
]
| 40.806452 | 94 | 0.729644 |
37ee0cfd689d053055f9512b80721598ce49ab1a | 1,845 | py | Python | AlgoNet2/Helper.py | Bhaney44/AlgorandDevelopment | 309e68337227af879f5c4e92c72156928a39fe32 | [
"MIT"
] | null | null | null | AlgoNet2/Helper.py | Bhaney44/AlgorandDevelopment | 309e68337227af879f5c4e92c72156928a39fe32 | [
"MIT"
] | 1 | 2021-04-24T19:24:05.000Z | 2021-04-28T05:32:40.000Z | AlgoNet2/Helper.py | Bhaney44/AlgorandDevelopment | 309e68337227af879f5c4e92c72156928a39fe32 | [
"MIT"
] | 1 | 2022-01-17T18:00:56.000Z | 2022-01-17T18:00:56.000Z | import numpy as np
from keras.models import Sequential
from keras.layers import LSTM, Dense, Dropout
def visualize_training_results(results):
"""
Plots the loss and accuracy for the training and testing data
"""
history = results.history
plt.figure(figsize=(12,4))
plt.plot(history['val_loss'])
plt.plot(history['loss'])
plt.legend(['val_loss', 'loss'])
plt.title('Loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.show()
plt.figure(figsize=(12,4))
plt.plot(history['val_accuracy'])
plt.plot(history['accuracy'])
plt.legend(['val_accuracy', 'accuracy'])
plt.title('Accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.show()
def split_sequence(seq, n_steps_in, n_steps_out):
"""
Splits the univariate time sequence
"""
X, y = [], []
for i in range(len(seq)):
end = i + n_steps_in
out_end = end + n_steps_out
if out_end > len(seq):
break
seq_x, seq_y = seq[i:end], seq[end:out_end]
X.append(seq_x)
y.append(seq_y)
return np.array(X), np.array(y)
def layer_maker(n_layers, n_nodes, activation, drop=None, d_rate=.5):
"""
Create a specified number of hidden layers for an RNN
Optional: Adds regularization option, dropout layer to prevent potential overfitting if necessary
"""
model = Sequential()
# Creating the specified number of hidden layers with the specified number of nodes
for x in range(1,n_layers+1):
model.add(LSTM(n_nodes, activation=activation, return_sequences=True))
# Adds a Dropout layer after every Nth hidden layer (the 'drop' variable)
try:
if x % drop == 0:
model.add(Dropout(d_rate))
except:
pass
| 27.954545 | 101 | 0.615176 |
37ef3e58f557478ce40c98955a961b550b4256ca | 14,120 | py | Python | apex/contrib/multihead_attn/self_multihead_attn_func.py | Muflhi01/apex | 79c018776129aad13abeb4ce63d24e1fbb4cd29e | [
"BSD-3-Clause"
] | 6,523 | 2018-04-25T17:35:27.000Z | 2022-03-31T22:49:45.000Z | apex/contrib/multihead_attn/self_multihead_attn_func.py | Muflhi01/apex | 79c018776129aad13abeb4ce63d24e1fbb4cd29e | [
"BSD-3-Clause"
] | 1,100 | 2018-05-18T00:03:34.000Z | 2022-03-30T22:00:33.000Z | apex/contrib/multihead_attn/self_multihead_attn_func.py | Muflhi01/apex | 79c018776129aad13abeb4ce63d24e1fbb4cd29e | [
"BSD-3-Clause"
] | 1,057 | 2018-05-07T13:53:04.000Z | 2022-03-31T09:18:47.000Z | import torch
import torch.nn.functional as F
self_attn_func = SelfAttnFunc.apply
| 45.844156 | 133 | 0.590652 |
37ef3fd76dba247104e4038149d9913b2621526c | 6,481 | py | Python | api/app/reviews/models.py | NikolaSiplakova/Baobab | 180cd3cb492ed47d38ca0b473572fad0ac6f604b | [
"Apache-2.0"
] | null | null | null | api/app/reviews/models.py | NikolaSiplakova/Baobab | 180cd3cb492ed47d38ca0b473572fad0ac6f604b | [
"Apache-2.0"
] | null | null | null | api/app/reviews/models.py | NikolaSiplakova/Baobab | 180cd3cb492ed47d38ca0b473572fad0ac6f604b | [
"Apache-2.0"
] | null | null | null | from datetime import datetime
from app import db
from app.utils import misc
| 42.084416 | 123 | 0.666101 |
37ef5b190722144951d2dc7179cd76d69b1cdbc2 | 3,307 | py | Python | speednet/vae/ConvVae.py | Abhranta/speednet | d15971e946cddc62a644d6a6f3be10a4df5b2ce2 | [
"MIT"
] | 1 | 2021-01-20T14:29:14.000Z | 2021-01-20T14:29:14.000Z | speednet/vae/ConvVae.py | Abhranta/speednet | d15971e946cddc62a644d6a6f3be10a4df5b2ce2 | [
"MIT"
] | null | null | null | speednet/vae/ConvVae.py | Abhranta/speednet | d15971e946cddc62a644d6a6f3be10a4df5b2ce2 | [
"MIT"
] | null | null | null | import torch.nn as nn
import torch
from utils import Flatten , Unflatten , weights_init , down_conv , up_conv | 47.927536 | 133 | 0.556093 |
37f19f659d9ef143b2408f934266bdcc951f5ade | 73,603 | py | Python | nelpy/utils.py | IsaacBusaleh/nelpy | f2663cf6f028c9bd0e630fbf8a527c236f4e0f41 | [
"MIT"
] | 1 | 2021-01-01T17:59:31.000Z | 2021-01-01T17:59:31.000Z | nelpy/utils.py | IsaacBusaleh/nelpy | f2663cf6f028c9bd0e630fbf8a527c236f4e0f41 | [
"MIT"
] | null | null | null | nelpy/utils.py | IsaacBusaleh/nelpy | f2663cf6f028c9bd0e630fbf8a527c236f4e0f41 | [
"MIT"
] | null | null | null | """This module contains helper functions and utilities for nelpy."""
__all__ = ['spatial_information',
'frange',
'swap_cols',
'swap_rows',
'pairwise',
'is_sorted',
'linear_merge',
'PrettyDuration',
'ddt_asa',
'get_contiguous_segments',
'get_events_boundaries',
'get_threshold_crossing_epochs',
'_bst_get_bins']
import numpy as np
import logging
from itertools import tee, repeat
from collections import namedtuple
from math import floor
from scipy.signal import hilbert
import scipy.ndimage.filters #import gaussian_filter1d, gaussian_filter
from numpy import log, ceil
import copy
import sys
import ctypes
from multiprocessing import Array, cpu_count
from multiprocessing.pool import Pool
import pdb
from . import core # so that core.RegularlySampledAnalogSignalArray is exposed
from . import auxiliary # so that auxiliary.TuningCurve1D is epxosed
from . import filtering
from .utils_.decorators import keyword_deprecation
# def sub2ind(array_shape, rows, cols):
# ind = rows*array_shape[1] + cols
# ind[ind < 0] = -1
# ind[ind >= array_shape[0]*array_shape[1]] = -1
# return ind
# def ind2sub(array_shape, ind):
# # see also np.unravel_index(ind, array.shape)
# ind[ind < 0] = -1
# ind[ind >= array_shape[0]*array_shape[1]] = -1
# rows = (ind.astype('int') / array_shape[1])
# cols = ind % array_shape[1]
# return (rows, cols)
def ragged_array(arr):
"""Takes a list of arrays, and returns a ragged array.
See https://github.com/numpy/numpy/issues/12468
"""
n_elem = len(arr)
out = np.array(n_elem*[None])
for ii in range(out.shape[0]):
out[ii] = arr[ii]
return out
def asa_indices_within_epochs(asa, intervalarray):
"""Return indices of ASA within epochs.
[[start, stop]
...
[start, stop]]
so that data can be associated with asa._data[:,start:stop] for each epoch.
"""
indices = []
intervalarray = intervalarray[asa.support]
for interval in intervalarray.merge().data:
a_start = interval[0]
a_stop = interval[1]
frm, to = np.searchsorted(asa._abscissa_vals, (a_start, a_stop))
indices.append((frm, to))
indices = np.array(indices, ndmin=2)
return indices
def frange(start, stop, step):
"""arange with floating point step"""
# TODO: this function is not very general; we can extend it to work
# for reverse (stop < start), empty, and default args, etc.
# there are also many edge cases where this is weird.
# see https://stackoverflow.com/questions/7267226/range-for-floats
# for better alternatives.
num_steps = int(np.floor((stop-start)/step))
return np.linspace(start, stop, num=num_steps, endpoint=False)
def spatial_information(ratemap):
"""Compute the spatial information and firing sparsity...
The specificity index examines the amount of information
(in bits) that a single spike conveys about the animal's
location (i.e., how well cell firing predicts the animal's
location).The spatial information content of cell discharge was
calculated using the formula:
information content = \Sum P_i(R_i/R)log_2(R_i/R)
where i is the bin number, P_i, is the probability for occupancy
of bin i, R_i, is the mean firing rate for bin i, and R is the
overall mean firing rate.
In order to account for the effects of low firing rates (with
fewer spikes there is a tendency toward higher information
content) or random bursts of firing, the spike firing
time-series was randomly offset in time from the rat location
time-series, and the information content was calculated. A
distribution of the information content based on 100 such random
shifts was obtained and was used to compute a standardized score
(Zscore) of information content for that cell. While the
distribution is not composed of independent samples, it was
nominally normally distributed, and a Z value of 2.29 was chosen
as a cut-off for significance (the equivalent of a one-tailed
t-test with P = 0.01 under a normal distribution).
Reference(s)
------------
Markus, E. J., Barnes, C. A., McNaughton, B. L., Gladden, V. L.,
and Skaggs, W. E. (1994). "Spatial information content and
reliability of hippocampal CA1 neurons: effects of visual
input", Hippocampus, 4(4), 410-421.
Parameters
----------
ratemap : array of shape (n_units, n_bins)
Rate map in Hz.
Returns
-------
si : array of shape (n_units,)
spatial information (in bits) per unit
"""
ratemap = copy.deepcopy(ratemap)
# ensure that the ratemap always has nonzero firing rates,
# otherwise the spatial information might return NaNs:
bkg_rate = ratemap[ratemap>0].min()
ratemap[ratemap < bkg_rate] = bkg_rate
number_of_spatial_bins = np.prod(ratemap.shape[1:])
weight_per_bin = 1/number_of_spatial_bins
Pi = 1
if len(ratemap.shape) == 3:
# we have 2D tuning curve, (n_units, n_x, n_y)
R = ratemap.mean(axis=1).mean(axis=1) # mean firing rate
Ri = np.transpose(ratemap, (2,1,0))
si = np.sum(np.sum((Pi*((Ri / R)*np.log2(Ri / R)).T), axis=1), axis=1)
elif len(ratemap.shape) == 2:
# we have 1D tuning curve, (n_units, n_x)
R = ratemap.mean(axis=1) # mean firing rate
Ri = ratemap.T
si = np.sum((Pi*((Ri / R)*np.log2(Ri / R)).T), axis=1)
else:
raise TypeError("rate map shape not supported / understood!")
return si/number_of_spatial_bins
def spatial_sparsity(ratemap):
"""Compute the firing sparsity...
The specificity index examines the amount of information
(in bits) that a single spike conveys about the animal's
location (i.e., how well cell firing predicts the animal's
location).The spatial information content of cell discharge was
calculated using the formula:
information content = \Sum P_i(R_i/R)log_2(R_i/R)
where i is the bin number, P_i, is the probability for occupancy
of bin i, R_i, is the mean firing rate for bin i, and R is the
overall mean firing rate.
In order to account for the effects of low firing rates (with
fewer spikes there is a tendency toward higher information
content) or random bursts of firing, the spike firing
time-series was randomly offset in time from the rat location
time-series, and the information content was calculated. A
distribution of the information content based on 100 such random
shifts was obtained and was used to compute a standardized score
(Zscore) of information content for that cell. While the
distribution is not composed of independent samples, it was
nominally normally distributed, and a Z value of 2.29 was chosen
as a cut-off for significance (the equivalent of a one-tailed
t-test with P = 0.01 under a normal distribution).
Reference(s)
------------
Markus, E. J., Barnes, C. A., McNaughton, B. L., Gladden, V. L.,
and Skaggs, W. E. (1994). "Spatial information content and
reliability of hippocampal CA1 neurons: effects of visual
input", Hippocampus, 4(4), 410-421.
Parameters
----------
occupancy : array of shape (n_bins,)
Occupancy of the animal.
ratemap : array of shape (n_units, n_bins)
Rate map in Hz.
Returns
-------
si : array of shape (n_units,)
spatial information (in bits) per unit
sparsity: array of shape (n_units,)
sparsity (in percent) for each unit
"""
number_of_spatial_bins = np.prod(ratemap.shape[1:])
weight_per_bin = 1/number_of_spatial_bins
Pi = 1
if len(ratemap.shape) == 3:
# we have 2D tuning curve, (n_units, n_x, n_y)
R = ratemap.mean(axis=1).mean(axis=1) # mean firing rate
Ri = ratemap
sparsity = np.sum(np.sum((Ri*Pi), axis=1), axis=1)/(R**2)
elif len(ratemap.shape) == 2:
# we have 1D tuning curve, (n_units, n_x)
R = ratemap.mean(axis=1) # mean firing rate
Ri = ratemap.T
sparsity = np.sum((Pi*Ri.T), axis=1)/(R**2)
else:
raise TypeError("rate map shape not supported / understood!")
return sparsity/number_of_spatial_bins
def _bst_get_bins_inside_interval(interval, ds, w=1):
"""(np.array) Return bin edges entirely contained inside an interval.
Bin edges always start at interval.start, and continue for as many
bins as would fit entirely inside the interval.
NOTE 1: there are (n+1) bin edges associated with n bins.
WARNING: if an interval is smaller than ds, then no bin will be
associated with the particular interval.
NOTE 2: nelpy uses half-open intervals [a,b), but if the bin
width divides b-a, then the bins will cover the entire
range. For example, if interval = [0,2) and ds = 1, then
bins = [0,1,2], even though [0,2] is not contained in
[0,2). There might be numerical precision deviations from this?
Parameters
----------
interval : EpochArray
EpochArray containing a single interval with a start, and stop
ds : float
Time bin width, in seconds.
w : number of bins to use in a sliding window mode. Default is 1 (no sliding window).
For example, 40 ms bins, with a stride of 5 ms, can be achieved by using (ds=0.005, w=8)
For now, w has to be an integer, and therefore 5 second bins, with a stride of 2 seconds
are not supported within this framework.
Returns
-------
bins : array
Bin edges in an array of shape (n+1,) where n is the number
of bins
centers : array
Bin centers in an array of shape (n,) where n is the number
of bins
"""
if interval.length < ds:
return None, None
n_bins = int(np.floor(interval.length / ds)) # number of bins
# linspace is better than arange for non-integral steps
bins = np.linspace(interval.start, interval.start + n_bins*ds, n_bins+1)
if w > 1:
wn_bins = np.max((1, n_bins - w + 1))
wn_bins = bins[:wn_bins+1] + w/2*ds - ds/2
bins = wn_bins
centers = bins[:-1] + (ds / 2)
return bins, centers
def _bst_get_bins(intervalArray, ds, w=1):
"""
Docstring goes here. TBD. For use with bins that are contained
wholly inside the intervals.
"""
b = [] # bin list
c = [] # centers list
left_edges = []
right_edges = []
counter = 0
for interval in intervalArray:
bins, centers = _bst_get_bins_inside_interval(interval=interval, ds=ds, w=w)
if bins is not None:
left_edges.append(counter)
counter += len(centers) - 1
right_edges.append(counter)
counter += 1
b.extend(bins.tolist())
c.extend(centers.tolist())
bins = np.array(b)
bin_centers = np.array(c)
le = np.array(left_edges)
le = le[:, np.newaxis]
re = np.array(right_edges)
re = re[:, np.newaxis]
binned_support = np.hstack((le, re))
lengths = np.atleast_1d((binned_support[:,1] - binned_support[:,0] + 1).squeeze())
support_starts = bins[np.insert(np.cumsum(lengths+1),0,0)[:-1]]
support_stops = bins[np.insert(np.cumsum(lengths+1)-1,0,0)[1:]]
supportdata = np.vstack([support_starts, support_stops]).T
support = type(intervalArray)(supportdata) # set support to TRUE bin support
return bins, bin_centers, binned_support, support
def is_odd(n):
"""Returns True if n is odd, and False if n is even.
Assumes integer.
"""
return bool(n & 1)
def swap_cols(arr, frm, to):
"""swap columns of a 2D np.array"""
if arr.ndim > 1:
arr[:,[frm, to]] = arr[:,[to, frm]]
else:
arr[frm], arr[to] = arr[to], arr[frm]
def swap_rows(arr, frm, to):
"""swap rows of a 2D np.array"""
if arr.ndim > 1:
arr[[frm, to],:] = arr[[to, frm],:]
else:
arr[frm], arr[to] = arr[to], arr[frm]
def pairwise(iterable):
"""returns a zip of all neighboring pairs.
This is used as a helper function for is_sorted.
Example
-------
>>> mylist = [2, 3, 6, 8, 7]
>>> list(pairwise(mylist))
[(2, 3), (3, 6), (6, 8), (8, 7)]
"""
a, b = tee(iterable)
next(b, None)
return zip(a, b)
def is_sorted_general(iterable, key=lambda a, b: a <= b):
"""Check to see if iterable is monotonic increasing (sorted)."""
return all(key(a, b) for a, b in pairwise(iterable))
def is_sorted(x, chunk_size=None):
"""Returns True if iterable is monotonic increasing (sorted).
NOTE: intended for 1D array, list or tuple. Will not work on
more than 1D
This function works in-core with memory footrpint XXX.
chunk_size = 100000 is probably a good choice.
"""
if not isinstance(x, (tuple, list, np.ndarray)):
raise TypeError("Unsupported type {}".format(type(x)))
x = np.atleast_1d(np.array(x).squeeze())
if x.ndim > 1:
raise ValueError("Input x must be 1-dimensional")
if chunk_size is None:
chunk_size = 500000
stop = x.size
for chunk_start in range(0, stop, chunk_size):
chunk_stop = int(min(stop, chunk_start + chunk_size + 1))
chunk = x[chunk_start:chunk_stop]
if not np.all(chunk[:-1] <= chunk[1:]):
return False
return True
def get_mua_events(mua, fs=None, minLength=None, maxLength=None, PrimaryThreshold=None, minThresholdLength=None, SecondaryThreshold=None):
"""Determine MUA/PBEs from multiunit activity.
MUA : multiunit activity
PBE : population burst event
Parameters
----------
mua : AnalogSignalArray
AnalogSignalArray with one signal, namely the multiunit firing rate [in Hz].
fs : float, optional
Sampling frequency of mua, in Hz. If not specified, it will be inferred from
mua.fs
minLength : float, optional
maxLength : float, optional
PrimaryThreshold : float, optional
SecondaryThreshold : float, optional
minThresholdLength : float, optional
Returns
-------
mua_epochs : EpochArray
EpochArray containing all the MUA events / PBEs.
Example
-------
mua = get_mua(spiketrain)
mua_epochs = get_mua_events(mua)
PBEs = get_PBEs(spiketrain, min_active=5)
= get_PBEs(get_mua_events(get_mua(*)), spiketrain, min_active=5)
"""
if fs is None:
fs = mua.fs
if fs is None:
raise ValueError("fs must either be specified, or must be contained in mua!")
if PrimaryThreshold is None:
PrimaryThreshold = mua.mean() + 3*mua.std()
if SecondaryThreshold is None:
SecondaryThreshold = mua.mean()
if minLength is None:
minLength = 0.050 # 50 ms minimum event duration
if maxLength is None:
maxLength = 0.750 # 750 ms maximum event duration
if minThresholdLength is None:
minThresholdLength = 0.0
# determine MUA event bounds:
mua_bounds_idx, maxes, _ = get_events_boundaries(
x = mua.data,
PrimaryThreshold = PrimaryThreshold,
SecondaryThreshold = SecondaryThreshold,
minThresholdLength = minThresholdLength,
minLength = minLength,
maxLength = maxLength,
ds = 1/fs
)
if len(mua_bounds_idx) == 0:
logging.warning("no mua events detected")
return core.EpochArray(empty=True)
# store MUA bounds in an EpochArray
mua_epochs = core.EpochArray(mua.time[mua_bounds_idx])
return mua_epochs
def get_contiguous_segments(data, *, step=None, assume_sorted=None,
in_core=True, index=False, inclusive=False,
fs=None, sort=None, in_memory=None):
"""Compute contiguous segments (seperated by step) in a list.
Note! This function requires that a sorted list is passed.
It first checks if the list is sorted O(n), and only sorts O(n log(n))
if necessary. But if you know that the list is already sorted,
you can pass assume_sorted=True, in which case it will skip
the O(n) check.
Returns an array of size (n_segments, 2), with each row
being of the form ([start, stop]) [inclusive, exclusive].
NOTE: when possible, use assume_sorted=True, and step=1 as explicit
arguments to function call.
WARNING! Step is robustly computed in-core (i.e., when in_core is
True), but is assumed to be 1 when out-of-core.
Example
-------
>>> data = [1,2,3,4,10,11,12]
>>> get_contiguous_segments(data)
([1,5], [10,13])
>>> get_contiguous_segments(data, index=True)
([0,4], [4,7])
Parameters
----------
data : array-like
1D array of sequential data, typically assumed to be integral (sample
numbers).
step : float, optional
Expected step size for neighboring samples. Default uses numpy to find
the median, but it is much faster and memory efficient to explicitly
pass in step=1.
assume_sorted : bool, optional
If assume_sorted == True, then data is not inspected or re-ordered. This
can be significantly faster, especially for out-of-core computation, but
it should only be used when you are confident that the data is indeed
sorted, otherwise the results from get_contiguous_segments will not be
reliable.
in_core : bool, optional
If True, then we use np.diff which requires all the data to fit
into memory simultaneously, otherwise we use groupby, which uses
a generator to process potentially much larger chunks of data,
but also much slower.
index : bool, optional
If True, the indices of segment boundaries will be returned. Otherwise,
the segment boundaries will be returned in terms of the data itself.
Default is False.
inclusive : bool, optional
If True, the boundaries are returned as [(inclusive idx, inclusive idx)]
Default is False, and can only be used when index==True.
Deprecated
----------
in_memory : bool, optional
This is equivalent to the new 'in-core'.
sort : bool, optional
This is equivalent to the new 'assume_sorted'
fs : sampling rate (Hz) used to extend half-open interval support by 1/fs
"""
# handle deprecated API calls:
if in_memory:
in_core = in_memory
logging.warning("'in_memory' has been deprecated; use 'in_core' instead")
if sort:
assume_sorted = sort
logging.warning("'sort' has been deprecated; use 'assume_sorted' instead")
if fs:
step = 1/fs
logging.warning("'fs' has been deprecated; use 'step' instead")
if inclusive:
assert index, "option 'inclusive' can only be used with 'index=True'"
if in_core:
data = np.asarray(data)
if not assume_sorted:
if not is_sorted(data):
data = np.sort(data) # algorithm assumes sorted list
if step is None:
step = np.median(np.diff(data))
# assuming that data(t1) is sampled somewhere on [t, t+1/fs) we have a 'continuous' signal as long as
# data(t2 = t1+1/fs) is sampled somewhere on [t+1/fs, t+2/fs). In the most extreme case, it could happen
# that t1 = t and t2 = t + 2/fs, i.e. a difference of 2 steps.
if np.any(np.diff(data) < step):
logging.warning("some steps in the data are smaller than the requested step size.")
breaks = np.argwhere(np.diff(data)>=2*step)
starts = np.insert(breaks+1, 0, 0)
stops = np.append(breaks, len(data)-1)
bdries = np.vstack((data[starts], data[stops] + step)).T
if index:
if inclusive:
indices = np.vstack((starts, stops)).T
else:
indices = np.vstack((starts, stops + 1)).T
return indices
else:
from itertools import groupby
from operator import itemgetter
if not assume_sorted:
if not is_sorted(data):
# data = np.sort(data) # algorithm assumes sorted list
raise NotImplementedError("out-of-core sorting has not been implemented yet...")
if step is None:
step = 1
bdries = []
if not index:
for k, g in groupby(enumerate(data), lambda ix: (ix[0] - ix[1])):
f = itemgetter(1)
gen = (f(x) for x in g)
start = next(gen)
stop = start
for stop in gen:
pass
bdries.append([start, stop + step])
else:
counter = 0
for k, g in groupby(enumerate(data), lambda ix: (ix[0] - ix[1])):
f = itemgetter(1)
gen = (f(x) for x in g)
_ = next(gen)
start = counter
stop = start
for _ in gen:
stop +=1
if inclusive:
bdries.append([start, stop])
else:
bdries.append([start, stop + 1])
counter = stop + 1
return np.asarray(bdries)
def get_direction(asa, *, sigma=None):
"""Return epochs during which an animal was running left to right, or right
to left.
Parameters
----------
asa : AnalogSignalArray 1D
AnalogSignalArray containing the 1D position data.
sigma : float, optional
Smoothing to apply to position (x) before computing gradient estimate.
Default is 0.
Returns
-------
l2r, r2l : EpochArrays
EpochArrays corresponding to left-to-right and right-to-left movement.
"""
if sigma is None:
sigma = 0
if not isinstance(asa, core.AnalogSignalArray):
raise TypeError('AnalogSignalArray expected!')
assert asa.n_signals == 1, "1D AnalogSignalArray expected!"
direction = dxdt_AnalogSignalArray(asa.smooth(sigma=sigma),
rectify=False).data
direction[direction>=0] = 1
direction[direction<0] = -1
direction = direction.squeeze()
l2r = get_contiguous_segments(np.argwhere(direction>0).squeeze(), step=1)
l2r[:,1] -= 1 # change bounds from [inclusive, exclusive] to [inclusive, inclusive]
l2r = core.EpochArray(asa.abscissa_vals[l2r])
r2l = get_contiguous_segments(np.argwhere(direction<0).squeeze(), step=1)
r2l[:,1] -= 1 # change bounds from [inclusive, exclusive] to [inclusive, inclusive]
r2l = core.EpochArray(asa.abscissa_vals[r2l])
return l2r, r2l
def __radd__(self, other):
"""b + a"""
return self.__add__(other)
def __sub__(self, other):
"""a - b"""
return PrettyDuration(self.duration - other)
def __rsub__(self, other):
"""b - a"""
return other - self.duration
def __mul__(self, other):
"""a * b"""
return PrettyDuration(self.duration * other)
def __rmul__(self, other):
"""b * a"""
return self.__mul__(other)
def __truediv__(self, other):
"""a / b"""
return PrettyDuration(self.duration / other)
def shrinkMatColsTo(mat, numCols):
""" Docstring goes here
Shrinks a NxM1 matrix down to an NxM2 matrix, where M2 <= M1"""
import scipy.ndimage
numCells = mat.shape[0]
numColsMat = mat.shape[1]
a = np.zeros((numCells, numCols))
for row in np.arange(numCells):
niurou = scipy.ndimage.interpolation.zoom(input=mat[row,:], zoom=(numCols/numColsMat), order = 1)
a[row,:] = niurou
return a
def find_threshold_crossing_events(x, threshold, *, mode='above'):
"""Find threshold crossing events. INCLUSIVE
Parameters
----------
x : numpy array
Input data
threshold : float
The value whose crossing triggers an event
mode : string, optional in ['above', 'below']; default 'above'
event triggering above, or below threshold
Returns
-------
eventlist : list
List containing the indices corresponding to threshold crossings
eventmax : list
List containing the maximum value of each event
"""
from itertools import groupby
from operator import itemgetter
if mode == 'below':
cross_threshold = np.where(x <= threshold, 1, 0)
elif mode == 'above':
cross_threshold = np.where(x >= threshold, 1, 0)
else:
raise NotImplementedError(
"mode {} not understood for find_threshold_crossing_events".format(str(mode)))
eventlist = []
eventmax = []
for k,v in groupby(enumerate(cross_threshold),key=itemgetter(1)):
if k:
v = list(v)
eventlist.append([v[0][0],v[-1][0]])
try :
eventmax.append(x[v[0][0]:(v[-1][0]+1)].max())
except :
print(v, x[v[0][0]:v[-1][0]])
eventmax = np.asarray(eventmax)
eventlist = np.asarray(eventlist)
return eventlist, eventmax
def get_events_boundaries(x, *, PrimaryThreshold=None,
SecondaryThreshold=None,
minThresholdLength=None, minLength=None,
maxLength=None, ds=None, mode='above'):
"""get event boundaries such that event.max >= PrimaryThreshold
and the event extent is defined by SecondaryThreshold.
Note that when PrimaryThreshold==SecondaryThreshold, then this is a
simple threshold crossing algorithm.
NB. minLength and maxLength are applied to the SecondaryThreshold
events, whereas minThresholdLength is applied to the
PrimaryThreshold events.
Parameters
----------
x : numpy array
Input data
mode : string, optional in ['above', 'below']; default 'above'
event triggering above, or below threshold
PrimaryThreshold : float, optional
If mode=='above', requires that event.max >= PrimaryThreshold
If mode=='below', requires that event.min <= PrimaryThreshold
SecondaryThreshold : float, optional
The value that defines the event extent
minThresholdLength : float, optional
Minimum duration for which the PrimaryThreshold is crossed
minLength : float, optional
Minimum duration for which the SecondaryThreshold is crossed
maxLength : float, optional
Maximum duration for which the SecondaryThreshold is crossed
ds : float, optional
Time step of the input data x
Returns
-------
returns bounds, maxes, events
where bounds <==> SecondaryThreshold to SecondaryThreshold, inclusive
maxes <==> maximum value during each event
events <==> PrimaryThreshold to PrimaryThreshold, inclusive
"""
# TODO: x must be a numpy array
# TODO: ds is often used, but we have no default, and no check for when
# it is left as None.
# TODO: the Docstring should equally be improved.
x = x.squeeze()
if x.ndim > 1:
raise TypeError("multidimensional arrays not supported!")
if PrimaryThreshold is None: # by default, threshold is 3 SDs above mean of x
PrimaryThreshold = np.mean(x) + 3*np.std(x)
if SecondaryThreshold is None: # by default, revert back to mean of x
SecondaryThreshold = np.mean(x) # + 0*np.std(x)
events, _ = \
find_threshold_crossing_events(x=x,
threshold=PrimaryThreshold,
mode=mode)
# apply minThresholdLength criterion:
if minThresholdLength is not None and len(events) > 0:
durations = (events[:,1] - events[:,0] + 1) * ds
events = events[[durations >= minThresholdLength]]
if len(events) == 0:
bounds, maxes, events = [], [], []
logging.warning("no events satisfied criteria")
return bounds, maxes, events
# Find periods where value is > SecondaryThreshold; note that the previous periods should be within these!
if mode == 'above':
assert SecondaryThreshold <= PrimaryThreshold, \
"Secondary Threshold by definition should include more data than Primary Threshold"
elif mode == 'below':
assert SecondaryThreshold >= PrimaryThreshold, \
"Secondary Threshold by definition should include more data than Primary Threshold"
else:
raise NotImplementedError(
"mode {} not understood for find_threshold_crossing_events".format(str(mode)))
bounds, broader_maxes = \
find_threshold_crossing_events(x=x,
threshold=SecondaryThreshold,
mode=mode)
# Find corresponding big windows for potential events
# Specifically, look for closest left edge that is just smaller
outer_boundary_indices = np.searchsorted(bounds[:,0], events[:,0], side='right')
# searchsorted finds the index after, so subtract one to get index before
outer_boundary_indices = outer_boundary_indices - 1
# Find extended boundaries for events by pairing to larger windows
# (Note that there may be repeats if the larger window contains multiple > 3SD sections)
bounds = bounds[outer_boundary_indices,:]
maxes = broader_maxes[outer_boundary_indices]
if minLength is not None and len(events) > 0:
durations = (bounds[:,1] - bounds[:,0] + 1) * ds
# TODO: refactor [durations <= maxLength] but be careful about edge cases
bounds = bounds[[durations >= minLength]]
maxes = maxes[[durations >= minLength]]
events = events[[durations >= minLength]]
if maxLength is not None and len(events) > 0:
durations = (bounds[:,1] - bounds[:,0] + 1) * ds
# TODO: refactor [durations <= maxLength] but be careful about edge cases
bounds = bounds[[durations <= maxLength]]
maxes = maxes[[durations <= maxLength]]
events = events[[durations <= maxLength]]
if len(events) == 0:
bounds, maxes, events = [], [], []
logging.warning("no events satisfied criteria")
return bounds, maxes, events
# Now, since all that we care about are the larger windows, so we should get rid of repeats
_, unique_idx = np.unique(bounds[:,0], return_index=True)
bounds = bounds[unique_idx,:] # SecondaryThreshold to SecondaryThreshold
maxes = maxes[unique_idx] # maximum value during event
events = events[unique_idx,:] # PrimaryThreshold to PrimaryThreshold
return bounds, maxes, events
def signal_envelope1D(data, *, sigma=None, fs=None):
logging.warnings("'signal_envelope1D' is deprecated; use 'signal_envelope_1d' instead!")
return signal_envelope_1d(data, sigma=sigma, fs=fs)
def signal_envelope_1d(data, *, sigma=None, fs=None):
"""Finds the signal envelope by taking the absolute value
of the Hilbert transform
Parameters
----------
data : numpy array, list, or RegularlySampledAnalogSignalArray
Input data
If data is a numpy array, it is expected to have shape
(n_signals, n_samples)
If data is a list, it is expected to have length n_signals,
where each sublist has length n_samples, i.e. data is not
jagged
sigma : float, optional
Standard deviation of the Gaussian kernel used to
smooth the envelope after applying the Hilbert transform.
Units of seconds. Default is 4 ms
fs : float, optional
Sampling rate of the signal
Returns
-------
out : same type as the input object
An object containing the signal envelope
TODO: this is not yet epoch-aware!
UPDATE: this is actually epoch-aware by now!
"""
if sigma is None:
sigma = 0.004 # 4 ms standard deviation
if fs is None:
if isinstance(data, (np.ndarray, list)):
raise ValueError("sampling frequency must be specified!")
elif isinstance(data, core.RegularlySampledAnalogSignalArray):
fs = data.fs
if isinstance(data, (np.ndarray, list)):
data_array = np.array(data)
n_dims = np.array(data).ndim
assert n_dims <= 2, "Only 1D signals supported!"
if n_dims == 1:
input_data = data_array.reshape((1, data_array.size))
else:
input_data = data_array
n_signals, n_samples = input_data.shape
# Compute number of samples to compute fast FFTs
padlen = nextfastpower(n_samples) - n_samples
# Pad data
paddeddata = np.hstack( (input_data, np.zeros((n_signals, padlen))) )
# Use hilbert transform to get an envelope
envelope = np.absolute(hilbert(paddeddata, axis=-1))
# free up memory
del paddeddata
# Truncate results back to original length
envelope = envelope[..., :n_samples]
if sigma:
# Smooth envelope with a gaussian (sigma = 4 ms default)
EnvelopeSmoothingSD = sigma*fs
smoothed_envelope = scipy.ndimage.filters.gaussian_filter1d(envelope, EnvelopeSmoothingSD,
mode='constant', axis=-1)
envelope = smoothed_envelope
if isinstance(data, list):
envelope = envelope.tolist()
return envelope
elif isinstance(data, core.RegularlySampledAnalogSignalArray):
# Only ASA data of shape (n_signals, n_timepoints) -> 2D currently supported
assert data.data.ndim == 2
cum_lengths = np.insert(np.cumsum(data.lengths), 0, 0)
newasa = data.copy()
# for segment in data:
for idx in range(data.n_epochs):
# print('hilberting epoch {}/{}'.format(idx+1, data.n_epochs))
segment_data = data._data[:,cum_lengths[idx]:cum_lengths[idx+1]]
n_signals, n_samples = segment_data.shape
# Compute number of samples to compute fast FFTs:
padlen = nextfastpower(n_samples) - n_samples
# Pad data
paddeddata = np.hstack( (segment_data, np.zeros((n_signals, padlen))) )
# Use hilbert transform to get an envelope
envelope = np.absolute(hilbert(paddeddata, axis=-1))
# free up memory
del paddeddata
# Truncate results back to original length
envelope = envelope[..., :n_samples]
if sigma:
# Smooth envelope with a gaussian (sigma = 4 ms default)
EnvelopeSmoothingSD = sigma*fs
smoothed_envelope = scipy.ndimage.filters.gaussian_filter1d(envelope, EnvelopeSmoothingSD,
mode='constant', axis=-1)
envelope = smoothed_envelope
newasa._data[:,cum_lengths[idx]:cum_lengths[idx+1]] = np.atleast_2d(envelope)
return newasa
def nextpower(n, base=2.0):
"""Return the next integral power of two greater than the given number.
Specifically, return m such that
m >= n
m == 2**x
where x is an integer. Use base argument to specify a base other than 2.
This is useful for ensuring fast FFT sizes.
From https://gist.github.com/bhawkins/4479607 (Brian Hawkins)
"""
x = base**ceil (log (n) / log (base))
if type(n) == np.ndarray:
return np.asarray (x, dtype=int)
else:
return int (x)
def nextfastpower(n):
"""Return the next integral power of small factors greater than the given
number. Specifically, return m such that
m >= n
m == 2**x * 3**y * 5**z
where x, y, and z are integers.
This is useful for ensuring fast FFT sizes.
From https://gist.github.com/bhawkins/4479607 (Brian Hawkins)
See also http://scipy.github.io/devdocs/generated/scipy.fftpack.next_fast_len.html
"""
if n < 7:
return max (n, 1)
# x, y, and z are all bounded from above by the formula of nextpower.
# Compute all possible combinations for powers of 3 and 5.
# (Not too many for reasonable FFT sizes.)
n35 = np.outer (power_series (n, 3.0), power_series (n, 5.0))
n35 = n35[n35<=n]
# Lump the powers of 3 and 5 together and solve for the powers of 2.
n2 = nextpower (n / n35)
return int (min (n2 * n35))
def get_threshold_crossing_epochs(asa, t1=None, t2=None, mode='above'):
"""Return epochs where a signal crosses a compound threshold specified by t1
and t2.
Parameters
----------
asa : AnalogSignalArray
AnalogSignalArray containing a single channel
t1 : float, optional
Primary threshold. Minimum signal value that has to be reached /
exceeded during an event. Default is 3 standard deviations above signal
mean.
t2 : float, optional
Secondary threshold. Signal value that defines the event boundaries.
Default is signal mean.
mode : string, optional
Mode of operation. One of ['above', 'below']. If 'above', then return
epochs where the signal exceeds the compound threshold, and if 'below',
then return epochs where the signal falls below the compound threshold.
Default is 'above'.
Returns
-------
epochs : EpochArray
EpochArray with all the epochs where the signal satisfied the criteria.
"""
if asa.n_signals > 1:
raise TypeError("multidimensional AnalogSignalArrays not supported!")
x = asa.data.squeeze()
if t1 is None: # by default, threshold is 3 SDs above mean of x
t1 = np.mean(x) + 3*np.std(x)
if t2 is None: # by default, revert back to mean of x
t2 = np.mean(x)
# compute periods where signal exceeds compound threshold
epoch_bounds, _, _ = get_events_boundaries(
x=x,
PrimaryThreshold=t1,
SecondaryThreshold=t2,
mode=mode
)
# convert bounds to time in seconds
epoch_bounds = asa.time[epoch_bounds]
if len(epoch_bounds) == 0:
return type(asa._abscissa.support)(empty=True)
# add 1/fs to stops for open interval
epoch_bounds[:,1] += 1/asa.fs
# create EpochArray with threshould exceeding bounds
epochs = type(asa._abscissa.support)(epoch_bounds)
return epochs
def get_run_epochs(speed, v1=10, v2=8):
"""Return epochs where animal is running at least as fast as
specified by v1 and v2.
Parameters
----------
speed : AnalogSignalArray
AnalogSignalArray containing single channel speed, in units/sec
v1 : float, optional
Minimum speed (in same units as speed) that has to be reached /
exceeded during an event. Default is 10 [units/sec]
v2 : float, optional
Speed that defines the event boundaries. Default is 8 [units/sec]
Returns
-------
run_epochs : EpochArray
EpochArray with all the epochs where speed satisfied the criteria.
"""
run_epochs = get_threshold_crossing_epochs(asa=speed, t1=v1, t2=v2, mode='above')
return run_epochs
def get_inactive_epochs(speed, v1=5, v2=7):
"""Return epochs where animal is running no faster than specified by
v1 and v2.
Parameters
----------
speed : AnalogSignalArray
AnalogSignalArray containing single channel speed, in units/sec
v1 : float, optional
Minimum speed (in same units as speed) that has to be reached /
exceeded during an event. Default is 10 [units/sec]
v2 : float, optional
Speed that defines the event boundaries. Default is 8 [units/sec]
Returns
-------
inactive_epochs : EpochArray
EpochArray with all the epochs where speed satisfied the criteria.
"""
inactive_epochs = get_threshold_crossing_epochs(asa=speed, t1=v1, t2=v2, mode='below')
return inactive_epochs
def spiketrain_union(st1, st2):
"""Join two spiketrains together.
WARNING! This function should be improved a lot!
"""
assert st1.n_units == st2.n_units
support = st1.support.join(st2.support)
newdata = []
for unit in range(st1.n_units):
newdata.append(np.append(st1.time[unit], st2.time[unit]))
fs = None
if st1.fs == st2.fs:
fs = st1.fs
return core.SpikeTrainArray(newdata, support=support, fs=fs)
########################################################################
# uncurated below this line!
########################################################################
def find_nearest_idx(array, val):
"""Finds nearest index in array to value.
Parameters
----------
array : np.array
val : float
Returns
-------
Index into array that is closest to val
TODO: this is a better version that should be incorporated:
# Based on answer here: http://stackoverflow.com/questions/2566412/find-nearest-value-in-numpy-array
def find_nearest(array,values):
right_idxs = np.searchsorted(array, values, side="left")
left_idxs = np.where(right_idxs > 0, right_idxs-1, right_idxs)
right_idxs = np.where(right_idxs == len(array), len(array)-1, right_idxs)
closest_idx = np.where(np.abs(values - array[right_idxs]) < np.abs(values - array[left_idxs]),
right_idxs, left_idxs)
return closest_idx
"""
return (np.abs(array-val)).argmin()
def find_nearest_indices(array, vals):
"""Finds nearest index in array to value.
Parameters
----------
array : np.array
This is the array you wish to index into.
vals : np.array
This is the array that you are getting your indices from.
Returns
-------
Indices into array that is closest to vals.
Notes
-----
Wrapper around find_nearest_idx().
"""
return np.array([find_nearest_idx(array, val) for val in vals], dtype=int)
def get_sort_idx(tuning_curves):
"""Finds indices to sort neurons by max firing in tuning curve.
Parameters
----------
tuning_curves : list of lists
Where each inner list is the tuning curves for an individual
neuron.
Returns
-------
sorted_idx : list
List of integers that correspond to the neuron in sorted order.
"""
tc_max_loc = []
for i, neuron_tc in enumerate(tuning_curves):
tc_max_loc.append((i, np.where(neuron_tc == np.max(neuron_tc))[0][0]))
sorted_by_tc = sorted(tc_max_loc, key=lambda x: x[1])
sorted_idx = []
for idx in sorted_by_tc:
sorted_idx.append(idx[0])
return sorted_idx
def collapse_time(obj, gap=0):
"""Collapse all epochs in a SpikeTrainArray and collapse them into a single, contiguous SpikeTrainArray"""
# TODO: redo SpikeTrainArray so as to keep the epochs separate!, and to support gaps!
# We'll have to ajust all the spikes per epoch... and we'll have to compute a new support. Also set a flag!
# If it's a SpikeTrainArray, then we left-shift the spike times. If it's an AnalogSignalArray, then we
# left-shift the time and tdata.
# Also set a new attribute, with the boundaries in seconds.
if isinstance(obj, core.RegularlySampledAnalogSignalArray):
new_obj = type(obj)(empty=True)
new_obj._data = obj._data
durations = obj.support.durations
starts = np.insert(np.cumsum(durations + gap),0,0)[:-1]
stops = starts + durations
newsupport = type(obj._abscissa.support)(np.vstack((starts, stops)).T)
new_obj._support = newsupport
new_time = obj.time.astype(float) # fast copy
time_idx = np.insert(np.cumsum(obj.lengths),0,0)
new_offset = 0
for epidx in range(obj.n_epochs):
if epidx > 0:
new_time[time_idx[epidx]:time_idx[epidx+1]] = new_time[time_idx[epidx]:time_idx[epidx+1]] - obj.time[time_idx[epidx]] + new_offset + gap
new_offset += durations[epidx] + gap
else:
new_time[time_idx[epidx]:time_idx[epidx+1]] = new_time[time_idx[epidx]:time_idx[epidx+1]] - obj.time[time_idx[epidx]] + new_offset
new_offset += durations[epidx]
new_obj._time = new_time
new_obj._fs = obj._fs
elif isinstance(obj, core.EventArray):
if gap > 0:
raise ValueError("gaps not supported for SpikeTrainArrays yet!")
new_obj = type(obj)(empty=True)
new_time = [[] for _ in range(obj.n_series)]
duration = 0
for st_ in obj:
le = st_.support.start
for unit_ in range(obj.n_series):
new_time[unit_].extend(st_._data[unit_] - le + duration)
duration += st_.support.duration
new_time = np.asanyarray([np.asanyarray(unittime) for unittime in new_time])
new_obj._data = new_time
new_obj.support = type(obj._abscissa.support)([0, duration])
new_obj._series_ids = obj._series_ids
new_obj._series_labels = obj._series_labels
new_obj._series_tags = obj._series_tags
elif isinstance(obj, core.BinnedEventArray):
raise NotImplementedError("BinnedEventArrays are not yet supported, but bst.data is essentially already collapsed!")
else:
raise TypeError("unsupported type for collapse_time")
return new_obj
def cartesian(xcenters, ycenters):
"""Finds every combination of elements in two arrays.
Parameters
----------
xcenters : np.array
ycenters : np.array
Returns
-------
cartesian : np.array
With shape(n_sample, 2).
"""
return np.transpose([np.tile(xcenters, len(ycenters)), np.repeat(ycenters, len(xcenters))])
| 37.135721 | 190 | 0.615709 |
37f1a13e31b524b47983953b4e76242354934ac4 | 23,625 | py | Python | python/paddle/fluid/contrib/slim/quantization/imperative/qat.py | logan-siyao-peng/Paddle | 10a8f3e5c3151c1abb810fba2994cc30e1232bec | [
"Apache-2.0"
] | null | null | null | python/paddle/fluid/contrib/slim/quantization/imperative/qat.py | logan-siyao-peng/Paddle | 10a8f3e5c3151c1abb810fba2994cc30e1232bec | [
"Apache-2.0"
] | null | null | null | python/paddle/fluid/contrib/slim/quantization/imperative/qat.py | logan-siyao-peng/Paddle | 10a8f3e5c3151c1abb810fba2994cc30e1232bec | [
"Apache-2.0"
] | 1 | 2021-01-17T01:11:45.000Z | 2021-01-17T01:11:45.000Z | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import logging
import numpy as np
import sys
import os
import paddle
from paddle.fluid import dygraph, core, framework
from paddle.fluid.executor import Executor
from paddle.fluid.dygraph.io import INFER_MODEL_SUFFIX, INFER_PARAMS_SUFFIX
from paddle.nn import Linear, Conv2D, Conv2DTranspose, MaxPool2D, MaxPool1D, BatchNorm1D, BatchNorm2D, BatchNorm3D
from paddle.fluid.dygraph.nn import BatchNorm, Pool2D
from paddle.fluid.io import load_inference_model, save_inference_model
from paddle.nn.layer.activation import ReLU, LeakyReLU, Sigmoid, ReLU6, Tanh, Softmax, PReLU, Swish
from paddle.fluid.log_helper import get_logger
from . import quant_nn
from .. import quantization_pass
__all__ = ['ImperativeQuantAware', 'ImperativeCalcOutScale']
_logger = get_logger(
__name__, logging.INFO, fmt='%(asctime)s-%(levelname)s: %(message)s')
_op_real_in_out_name = {
"conv2d": [["Input", "Filter"], ["Output"]],
"conv2d_transpose": [["Input", "Filter"], ["Output"]],
"pool2d": [["X"], ["Out"]],
"elementwise_add": [["X", "Y"], ["Out"]],
"softmax": [["X"], ["Out"]],
"relu": [["X"], ["Out"]],
"relu6": [["X"], ["Out"]],
"leaky_relu": [["X"], ["Out"]],
"prelu": [["X"], ["Out"]],
"tanh": [["X"], ["Out"]],
"batch_norm": [["X"], ["Y"]],
"sigmoid": [["X"], ["Out"]],
"swish": [["X"], ["Out"]],
}
| 46.232877 | 114 | 0.600974 |
37f415fea8c9ac7d7647ab03f1f9ceb7a0593bde | 1,815 | py | Python | sc/northwind.py | elliotgunn/DS-Unit-3-Sprint-2-SQL-and-Databases | c730e2b3e66199226fa7549511cbb7801eb7a694 | [
"MIT"
] | null | null | null | sc/northwind.py | elliotgunn/DS-Unit-3-Sprint-2-SQL-and-Databases | c730e2b3e66199226fa7549511cbb7801eb7a694 | [
"MIT"
] | null | null | null | sc/northwind.py | elliotgunn/DS-Unit-3-Sprint-2-SQL-and-Databases | c730e2b3e66199226fa7549511cbb7801eb7a694 | [
"MIT"
] | null | null | null | import pandas as pd
import sqlite3
from pandas import DataFrame
n_conn = sqlite3.connect('northwind_small.sqlite3')
n_curs = n_conn.cursor()
# What are the ten most expensive items (per unit price) in the database?
query = """
SELECT ProductName, UnitPrice
FROM Product
ORDER BY UnitPrice DESC
LIMIT 10
"""
n_curs.execute(query)
print(n_curs.fetchall())
# What is the average age of an employee at the time of their hiring? (Hint: a
# lot of arithmetic works with dates.)
query = """
SELECT AVG(HireDate-BirthDate)
FROM Employee
"""
n_curs.execute(query)
print(n_curs.fetchall())
# answer: 37.22
# (*Stretch*) How does the average age of employee at hire vary by city?
query = """SELECT City, AVG(HireDate-BirthDate)
FROM Employee
GROUP BY City
"""
n_curs.execute(query)
print(n_curs.fetchall())
# What are the ten most expensive items (per unit price)
# in the database *and* their suppliers?
query = """
SELECT ProductName, UnitPrice, CompanyName
FROM Product as p
JOIN Supplier as s
ON p.SupplierID = s.ID
ORDER BY UnitPrice DESC
LIMIT 10
"""
n_curs.execute(query)
print(n_curs.fetchall())
# What is the largest category (by number of unique products in it)?
query = """
SELECT CategoryName, COUNT(CategoryName)
FROM Category as c
JOIN Product as p
ON c.ID=p.CategoryID
GROUP BY CategoryName
ORDER by COUNT(CategoryName) DESC
"""
n_curs.execute(query)
print(n_curs.fetchall())
# largest category is Confections 13
# (*Stretch*) Who's the employee with the most territories? Use `TerritoryId`
# (not name, region, or other fields) as the unique identifier for territories.
# EMPLOYEE ID 7
query = """
SELECT EmployeeId, TerritoryId, COUNT(DISTINCT TerritoryId)
FROM EmployeeTerritory
GROUP BY EmployeeId
ORDER BY COUNT(DISTINCT TerritoryId) DESC
"""
n_curs.execute(query)
print(n_curs.fetchall())
| 22.6875 | 79 | 0.755923 |
37f57e46f9f9adb655b1a0930224aed655bce6c7 | 1,396 | py | Python | tests/test_app/library/loans/admin.py | Pijuli/django-jazzmin | e3f9d45183d58f78bf4c6793969490631a84681d | [
"MIT"
] | 972 | 2020-05-12T19:51:01.000Z | 2022-03-31T20:18:33.000Z | tests/test_app/library/loans/admin.py | Pijuli/django-jazzmin | e3f9d45183d58f78bf4c6793969490631a84681d | [
"MIT"
] | 290 | 2020-05-12T17:35:21.000Z | 2022-03-31T15:18:59.000Z | tests/test_app/library/loans/admin.py | Pijuli/django-jazzmin | e3f9d45183d58f78bf4c6793969490631a84681d | [
"MIT"
] | 166 | 2020-06-11T10:50:47.000Z | 2022-03-24T12:19:00.000Z | from django.contrib import admin
from django.urls import path
from .models import BookLoan, Library
from .views import CustomView
| 25.851852 | 85 | 0.593123 |
37f5b2546c850f56d3d094ea379b377bba04af7c | 2,051 | py | Python | lib/MergeMetabolicAnnotations/utils/CompareAnnotationsUtil.py | jeffkimbrel/MergeMetabolicAnnotations | ec971d114d57942cef73dc2980c8faf48cea7afe | [
"MIT"
] | 1 | 2021-08-04T15:42:46.000Z | 2021-08-04T15:42:46.000Z | lib/MergeMetabolicAnnotations/utils/CompareAnnotationsUtil.py | jeffkimbrel/MergeMetabolicAnnotations | ec971d114d57942cef73dc2980c8faf48cea7afe | [
"MIT"
] | 3 | 2019-02-01T22:14:02.000Z | 2021-02-03T03:16:52.000Z | lib/MergeMetabolicAnnotations/utils/CompareAnnotationsUtil.py | jeffkimbrel/MergeMetabolicAnnotations | ec971d114d57942cef73dc2980c8faf48cea7afe | [
"MIT"
] | 3 | 2018-11-30T21:31:00.000Z | 2021-01-12T16:13:01.000Z | import os
import datetime
import logging
import json
import uuid
from installed_clients.WorkspaceClient import Workspace as Workspace
from installed_clients.KBaseReportClient import KBaseReport
from installed_clients.annotation_ontology_apiServiceClient import annotation_ontology_api
import MergeMetabolicAnnotations.utils.functions as f
| 36.625 | 92 | 0.686494 |
37f7e625de3ee5f43165604bef1b04155036f942 | 56 | py | Python | models/__init__.py | TvSeriesFans/CineMonster | 036a3223618afd536932d21b0e86d18d0fba3b28 | [
"Apache-2.0"
] | 15 | 2017-09-17T17:52:43.000Z | 2020-08-31T15:41:12.000Z | models/__init__.py | TvSeriesFans/CineMonster | 036a3223618afd536932d21b0e86d18d0fba3b28 | [
"Apache-2.0"
] | 13 | 2017-03-14T13:24:14.000Z | 2021-08-20T13:52:54.000Z | models/__init__.py | TvSeriesFans/CineMonster | 036a3223618afd536932d21b0e86d18d0fba3b28 | [
"Apache-2.0"
] | 27 | 2017-07-01T18:33:49.000Z | 2021-08-05T09:13:18.000Z | from models.Model import Player, Group, Session, engine
| 28 | 55 | 0.803571 |
37f85d17e2772b9092e4ca6adf7715edc27bc547 | 1,168 | py | Python | src/backend/tests/test_game/test_models.py | ToJestKrzysio/TheJungleGame | 904dd4adc937145df2c8c353eb83bec3b5dd1f7e | [
"MIT"
] | null | null | null | src/backend/tests/test_game/test_models.py | ToJestKrzysio/TheJungleGame | 904dd4adc937145df2c8c353eb83bec3b5dd1f7e | [
"MIT"
] | null | null | null | src/backend/tests/test_game/test_models.py | ToJestKrzysio/TheJungleGame | 904dd4adc937145df2c8c353eb83bec3b5dd1f7e | [
"MIT"
] | null | null | null | from unittest.mock import Mock, patch
import numpy as np
from game.models import ValuePolicyModel
| 27.809524 | 85 | 0.673801 |
37f98cbd357a8c5c3fa954e32959aafbe7a7882b | 653 | py | Python | sina_spider/items.py | yanwen0614/Weibo | 5d85e39d0cf7fc848bf5a06df08acbf38661db8d | [
"Apache-2.0"
] | null | null | null | sina_spider/items.py | yanwen0614/Weibo | 5d85e39d0cf7fc848bf5a06df08acbf38661db8d | [
"Apache-2.0"
] | null | null | null | sina_spider/items.py | yanwen0614/Weibo | 5d85e39d0cf7fc848bf5a06df08acbf38661db8d | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
from scrapy import Item, Field
def main():
item = TopicItem()
pass
if __name__ == '__main__':
main() | 18.138889 | 51 | 0.603369 |
37fa8681617c1303e48ae88c3c02ae64abad5b16 | 9,259 | py | Python | emission/core/wrapper/client.py | Andrew-Tan/e-mission-server | 91d59bee86e63d803e401f10f4b6a2502effedda | [
"BSD-3-Clause"
] | null | null | null | emission/core/wrapper/client.py | Andrew-Tan/e-mission-server | 91d59bee86e63d803e401f10f4b6a2502effedda | [
"BSD-3-Clause"
] | 1 | 2017-08-31T19:54:16.000Z | 2017-08-31T19:54:16.000Z | emission/core/wrapper/client.py | Andrew-Tan/e-mission-server | 91d59bee86e63d803e401f10f4b6a2502effedda | [
"BSD-3-Clause"
] | null | null | null | import json
import logging
import dateutil.parser
from datetime import datetime
# Our imports
from emission.core.get_database import get_profile_db, get_client_db, get_pending_signup_db
import emission.clients.common
| 38.102881 | 186 | 0.696836 |
37fb65065000540ac24e1a720effb1e2f2805f1c | 57 | py | Python | setup.py | karianjahi/advent_of_code | 16939cc7c475465c35d8750328b9b7aef60fc4d6 | [
"MIT"
] | null | null | null | setup.py | karianjahi/advent_of_code | 16939cc7c475465c35d8750328b9b7aef60fc4d6 | [
"MIT"
] | null | null | null | setup.py | karianjahi/advent_of_code | 16939cc7c475465c35d8750328b9b7aef60fc4d6 | [
"MIT"
] | null | null | null | import setuptools
setuptools.setup(name='advent_of_code') | 28.5 | 39 | 0.859649 |
37fbd5a54d581539270bd58bff9f475311ff3236 | 68 | py | Python | test/sanity_import_vpp_papi.py | amithbraj/vpp | edf1da94dc099c6e2ab1d455ce8652fada3cdb04 | [
"Apache-2.0"
] | 751 | 2017-07-13T06:16:46.000Z | 2022-03-30T09:14:35.000Z | test/sanity_import_vpp_papi.py | amithbraj/vpp | edf1da94dc099c6e2ab1d455ce8652fada3cdb04 | [
"Apache-2.0"
] | 63 | 2018-06-11T09:48:35.000Z | 2021-01-05T09:11:03.000Z | test/sanity_import_vpp_papi.py | amithbraj/vpp | edf1da94dc099c6e2ab1d455ce8652fada3cdb04 | [
"Apache-2.0"
] | 479 | 2017-07-13T06:17:26.000Z | 2022-03-31T18:20:43.000Z | #!/usr/bin/env python3
""" sanity check script """
import vpp_papi
| 13.6 | 27 | 0.691176 |
37fbd663edc97f78d91a3917050b5ae91d7a6023 | 2,191 | py | Python | examples/labs/demo_dmtx.py | yarikoptic/nipy | 749302c7ffa8ea714cc32d405f0df521102bbc6f | [
"BSD-3-Clause"
] | null | null | null | examples/labs/demo_dmtx.py | yarikoptic/nipy | 749302c7ffa8ea714cc32d405f0df521102bbc6f | [
"BSD-3-Clause"
] | null | null | null | examples/labs/demo_dmtx.py | yarikoptic/nipy | 749302c7ffa8ea714cc32d405f0df521102bbc6f | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
from __future__ import print_function # Python 2/3 compatibility
__doc__ = """
Examples of design matrices specification and and computation (event-related
design, FIR design, etc)
Requires matplotlib
Author : Bertrand Thirion: 2009-2010
"""
print(__doc__)
import numpy as np
try:
import matplotlib.pyplot as plt
except ImportError:
raise RuntimeError("This script needs the matplotlib library")
from nipy.modalities.fmri.design_matrix import make_dmtx
from nipy.modalities.fmri.experimental_paradigm import (EventRelatedParadigm,
BlockParadigm)
# frame times
tr = 1.0
nscans = 128
frametimes = np.linspace(0, (nscans - 1) * tr, nscans)
# experimental paradigm
conditions = ['c0', 'c0', 'c0', 'c1', 'c1', 'c1', 'c3', 'c3', 'c3']
onsets = [30, 70, 100, 10, 30, 90, 30, 40, 60]
hrf_model = 'canonical'
motion = np.cumsum(np.random.randn(128, 6), 0)
add_reg_names = ['tx', 'ty', 'tz', 'rx', 'ry', 'rz']
#event-related design matrix
paradigm = EventRelatedParadigm(conditions, onsets)
X1 = make_dmtx(
frametimes, paradigm, drift_model='polynomial', drift_order=3,
add_regs=motion, add_reg_names=add_reg_names)
# block design matrix
duration = 7 * np.ones(9)
paradigm = BlockParadigm(con_id=conditions, onset=onsets,
duration=duration)
X2 = make_dmtx(frametimes, paradigm, drift_model='polynomial',
drift_order=3)
# FIR model
paradigm = EventRelatedParadigm(conditions, onsets)
hrf_model = 'FIR'
X3 = make_dmtx(frametimes, paradigm, hrf_model='fir',
drift_model='polynomial', drift_order=3,
fir_delays=np.arange(1, 6))
# plot the results
fig = plt.figure(figsize=(10, 6))
ax = plt.subplot(1, 3, 1)
X1.show(ax=ax)
ax.set_title('Event-related design matrix', fontsize=12)
ax = plt.subplot(1, 3, 2)
X2.show(ax=ax)
ax.set_title('Block design matrix', fontsize=12)
ax = plt.subplot(1, 3, 3)
X3.show(ax=ax)
ax.set_title('FIR design matrix', fontsize=12)
plt.subplots_adjust(top=0.9, bottom=0.25)
plt.show()
| 30.013699 | 77 | 0.685075 |
37fdb024ea14a002f56310787cf60b4ca3d52485 | 36,821 | py | Python | fbpcs/private_computation/test/service/test_private_computation.py | yelixu2/fbpcs | 31b1154bf1a207471fa207a0b0e4c74693f09608 | [
"MIT"
] | null | null | null | fbpcs/private_computation/test/service/test_private_computation.py | yelixu2/fbpcs | 31b1154bf1a207471fa207a0b0e4c74693f09608 | [
"MIT"
] | null | null | null | fbpcs/private_computation/test/service/test_private_computation.py | yelixu2/fbpcs | 31b1154bf1a207471fa207a0b0e4c74693f09608 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from collections import defaultdict
from typing import List, Optional, Tuple
from unittest.mock import MagicMock, call, patch
from fbpcp.entity.container_instance import ContainerInstance, ContainerInstanceStatus
from fbpcp.service.mpc import MPCInstanceStatus, MPCParty, MPCService
from fbpcp.service.onedocker import OneDockerService
from fbpcs.common.entity.pcs_mpc_instance import PCSMPCInstance
from fbpcs.data_processing.lift_id_combiner.lift_id_spine_combiner_cpp import (
CppLiftIdSpineCombinerService,
)
from fbpcs.data_processing.sharding.sharding_cpp import CppShardingService
from fbpcs.onedocker_binary_config import OneDockerBinaryConfig
from fbpcs.onedocker_binary_names import OneDockerBinaryNames
from fbpcs.onedocker_service_config import OneDockerServiceConfig
from fbpcs.pcf.tests.async_utils import to_sync
from fbpcs.pid.entity.pid_instance import (
PIDInstance,
PIDInstanceStatus,
PIDProtocol,
PIDRole,
)
from fbpcs.pid.service.pid_service.pid import PIDService
from fbpcs.private_computation.entity.private_computation_instance import (
PrivateComputationGameType,
PrivateComputationInstance,
PrivateComputationInstanceStatus,
PrivateComputationRole,
UnionedPCInstance,
)
from fbpcs.private_computation.entity.private_computation_stage_type import (
PrivateComputationStageType,
)
from fbpcs.private_computation.repository.private_computation_game import GameNames
from fbpcs.private_computation.service.errors import (
PrivateComputationServiceValidationError,
)
from fbpcs.private_computation.service.private_computation import (
PrivateComputationService,
NUM_NEW_SHARDS_PER_FILE,
DEFAULT_K_ANONYMITY_THRESHOLD,
)
from fbpcs.private_computation.service.private_computation_stage_service import (
PrivateComputationStageService,
)
# TODO T94666166: libfb won't work in OSS
from libfb.py.asyncio.mock import AsyncMock
from libfb.py.testutil import data_provider
from fbpcs.private_computation.service.utils import (
create_and_start_mpc_instance,
gen_mpc_game_args_to_retry,
map_private_computation_role_to_mpc_party,
DEFAULT_CONTAINER_TIMEOUT_IN_SEC,
)
def test_map_private_computation_role_to_mpc_party(self):
self.assertEqual(
MPCParty.SERVER,
map_private_computation_role_to_mpc_party(PrivateComputationRole.PUBLISHER),
)
self.assertEqual(
MPCParty.CLIENT,
map_private_computation_role_to_mpc_party(PrivateComputationRole.PARTNER),
)
def test_get_status_from_stage(self):
# Test get status from an MPC stage
mpc_instance = PCSMPCInstance.create_instance(
instance_id="test_mpc_id",
game_name=GameNames.SHARD_AGGREGATOR.value,
mpc_party=MPCParty.SERVER,
num_workers=2,
status=MPCInstanceStatus.FAILED,
)
self.assertEqual(
PrivateComputationInstanceStatus.AGGREGATION_FAILED,
self.private_computation_service._get_status_from_stage(mpc_instance),
)
# Test get status from the PID stage
pid_instance = PIDInstance(
instance_id="test_pid_id",
protocol=PIDProtocol.UNION_PID,
pid_role=PIDRole.PUBLISHER,
num_shards=4,
input_path="input",
output_path="output",
stages_containers={},
stages_status={},
status=PIDInstanceStatus.COMPLETED,
)
self.assertEqual(
PrivateComputationInstanceStatus.ID_MATCHING_COMPLETED,
self.private_computation_service._get_status_from_stage(pid_instance),
)
def test_prepare_data(self):
private_computation_instance = self.create_sample_instance(
status=PrivateComputationInstanceStatus.CREATED,
)
self.private_computation_service.instance_repository.read = MagicMock(
return_value=private_computation_instance
)
with patch.object(
CppLiftIdSpineCombinerService,
"combine_on_container_async",
) as mock_combine, patch.object(
CppShardingService,
"shard_on_container_async",
) as mock_shard:
# call prepare_data
self.private_computation_service.prepare_data(
instance_id=self.test_private_computation_id,
dry_run=True,
)
binary_config = self.onedocker_binary_config_map[
OneDockerBinaryNames.LIFT_ID_SPINE_COMBINER.value
]
mock_combine.assert_called_once_with(
spine_path=private_computation_instance.pid_stage_output_spine_path,
data_path=private_computation_instance.pid_stage_output_data_path,
output_path=private_computation_instance.data_processing_output_path
+ "_combine",
num_shards=self.test_num_containers,
onedocker_svc=self.onedocker_service,
binary_version=binary_config.binary_version,
tmp_directory=binary_config.tmp_directory,
)
mock_shard.assert_called()
def test_prepare_data_tasks_skipped(self):
private_computation_instance = self.create_sample_instance(
status=PrivateComputationInstanceStatus.COMPUTATION_FAILED,
)
private_computation_instance.partial_container_retry_enabled = True
self.private_computation_service.instance_repository.read = MagicMock(
return_value=private_computation_instance
)
with patch.object(
CppLiftIdSpineCombinerService,
"combine_on_container_async",
) as mock_combine, patch.object(
CppShardingService,
"shard_on_container_async",
) as mock_shard:
# call prepare_data
self.private_computation_service.prepare_data(
instance_id=self.test_private_computation_id,
)
# expect combining and sharding skipped because this private_computation_instance has
# status PrivateComputationInstanceStatus.COMPUTATION_FAILED, so this run
# is to recover from a previous compute metrics failure, meaning data
# preparation should have been done
mock_combine.assert_not_called()
mock_shard.assert_not_called()
def test_validate_metrics_results_doesnt_match(self):
self.private_computation_service.pid_svc.storage_svc.read = MagicMock()
self.private_computation_service.pid_svc.storage_svc.read.side_effect = [
'{"subGroupMetrics":[],"metrics":{"controlClicks":1,"testSpend":0,"controlImpressions":0,"testImpressions":0,"controlMatchCount":0,"testMatchCount":0,"controlNumConvSquared":0,"testNumConvSquared":0,"testValueSquared":0,"controlValue":0,"testValue":0,"testConverters":0,"testConversions":0,"testPopulation":0,"controlClickers":0,"testClickers":0,"controlReach":0,"testReach":0,"controlSpend":0,"testClicks":0,"controlValueSquared":0,"controlConverters":0,"controlConversions":0,"controlPopulation":0}}',
'{"subGroupMetrics":[],"metrics":{"testSpend":0,"controlClicks":0,"controlImpressions":0,"testImpressions":0,"controlMatchCount":0,"testMatchCount":0,"controlNumConvSquared":0,"testNumConvSquared":0,"testValueSquared":0,"controlValue":0,"testValue":0,"testConverters":0,"testConversions":0,"testPopulation":0,"controlClickers":0,"testClickers":0,"controlReach":0,"testReach":0,"controlSpend":0,"testClicks":0,"controlValueSquared":0,"controlConverters":0,"controlConversions":0,"controlPopulation":0}}',
]
with self.assertRaises(PrivateComputationServiceValidationError):
self.private_computation_service.validate_metrics(
instance_id="test_id",
aggregated_result_path="aggregated_result_path",
expected_result_path="expected_result_path",
)
def test_cancel_current_stage(self):
test_mpc_id = self.test_private_computation_id + "_compute_metrics"
test_game_name = GameNames.LIFT.value
test_mpc_party = MPCParty.CLIENT
# prepare the pl instance that will be read in to memory from the repository
# at the beginning of the cancel_current_stage function
mpc_instance_started = PCSMPCInstance.create_instance(
instance_id=test_mpc_id,
game_name=test_game_name,
mpc_party=test_mpc_party,
num_workers=self.test_num_containers,
status=MPCInstanceStatus.STARTED,
)
private_computation_instance = self.create_sample_instance(
status=PrivateComputationInstanceStatus.COMPUTATION_STARTED,
role=PrivateComputationRole.PARTNER,
instances=[mpc_instance_started],
)
self.private_computation_service.instance_repository.read = MagicMock(
return_value=private_computation_instance
)
# prepare the mpc instance that's returned from mpc_service.stop_instance()
mpc_instance_canceled = PCSMPCInstance.create_instance(
instance_id=test_mpc_id,
game_name=test_game_name,
mpc_party=test_mpc_party,
num_workers=self.test_num_containers,
status=MPCInstanceStatus.CANCELED,
)
self.private_computation_service.mpc_svc.stop_instance = MagicMock(
return_value=mpc_instance_canceled
)
self.private_computation_service.mpc_svc.instance_repository.read = MagicMock(
return_value=mpc_instance_canceled
)
# call cancel, expect no exception
private_computation_instance = (
self.private_computation_service.cancel_current_stage(
instance_id=self.test_private_computation_id,
)
)
# assert the pl instance returned has the correct status
self.assertEqual(
PrivateComputationInstanceStatus.COMPUTATION_FAILED,
private_computation_instance.status,
)
def test_gen_game_args_to_retry(self):
test_input = "test_input_retry"
mpc_instance = PCSMPCInstance.create_instance(
instance_id="mpc_instance",
game_name=GameNames.LIFT.value,
mpc_party=MPCParty.SERVER,
num_workers=2,
status=MPCInstanceStatus.FAILED,
containers=[
ContainerInstance(
instance_id="container_instance_0",
status=ContainerInstanceStatus.FAILED,
),
ContainerInstance(
instance_id="container_instance_1",
status=ContainerInstanceStatus.COMPLETED,
),
],
game_args=[
{
"input_filenames": test_input,
},
{
"input_filenames": "input_filenames",
},
],
)
private_computation_instance = self.create_sample_instance(
status=PrivateComputationInstanceStatus.COMPUTATION_FAILED,
instances=[mpc_instance],
)
game_args = gen_mpc_game_args_to_retry(
private_computation_instance
)
self.assertEqual(1, len(game_args)) # only 1 failed container
self.assertEqual(test_input, game_args[0]["input_filenames"])
def create_sample_instance(
self,
status: PrivateComputationInstanceStatus,
role: PrivateComputationRole = PrivateComputationRole.PUBLISHER,
instances: Optional[List[UnionedPCInstance]] = None,
) -> PrivateComputationInstance:
return PrivateComputationInstance(
instance_id=self.test_private_computation_id,
role=role,
instances=instances or [],
status=status,
status_update_ts=1600000000,
num_pid_containers=self.test_num_containers,
num_mpc_containers=self.test_num_containers,
concurrency=self.test_concurrency,
num_files_per_mpc_container=NUM_NEW_SHARDS_PER_FILE,
game_type=PrivateComputationGameType.LIFT,
input_path=self.test_input_path,
output_dir=self.test_output_dir,
fail_fast=True,
k_anonymity_threshold=DEFAULT_K_ANONYMITY_THRESHOLD,
)
| 41.558691 | 515 | 0.677222 |
53009d254c6a8eb6bd006c01a55209b99684addf | 174 | py | Python | app.py | Eubule/Store-Manager-With-Datastructure | c21a7307cc59b53516fb40437b1a359ca48a4f2e | [
"MIT"
] | null | null | null | app.py | Eubule/Store-Manager-With-Datastructure | c21a7307cc59b53516fb40437b1a359ca48a4f2e | [
"MIT"
] | 4 | 2018-10-25T07:33:03.000Z | 2019-10-18T17:20:55.000Z | app.py | Eubule/Store-Manager-With-Datastructure | c21a7307cc59b53516fb40437b1a359ca48a4f2e | [
"MIT"
] | null | null | null | from app import app
from app.database.db import Database
if __name__ == "__main__":
db = Database()
db.create_tables()
db.create_admin()
app.run(debug=True) | 19.333333 | 36 | 0.683908 |
5300ca645e4b887401ddf050ab62d54008d81798 | 680 | py | Python | src/main.py | ryuichi1208/scraping-py | 43036dff75cc47d3169e012096f0de70dea0296b | [
"MIT"
] | 2 | 2018-12-19T22:40:19.000Z | 2019-06-19T06:38:18.000Z | src/main.py | ryuichi1208/scraping_py | 43036dff75cc47d3169e012096f0de70dea0296b | [
"MIT"
] | null | null | null | src/main.py | ryuichi1208/scraping_py | 43036dff75cc47d3169e012096f0de70dea0296b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# flake8: noqa
from flask import Flask
from flask_themes2 import Themes
import config
from util.auth import is_admin
from util.converter import RegexConverter
from util.csrf import generate_csrf_token
app = Flask(__name__.split('.')[0])
app.secret_key = config.SECRET_KEY
app.url_map.converters['regex'] = RegexConverter
app.jinja_env.globals['config'] = config
app.jinja_env.globals['csrf_token'] = generate_csrf_token
app.jinja_env.globals['is_admin'] = is_admin
Themes(app, app_identifier='yelplove')
# if debug property is present, let's use it
try:
app.debug = config.DEBUG
except AttributeError:
app.debug = False
import views
| 24.285714 | 57 | 0.757353 |
5302a9b7f4d36ed1d8c39a9e74b3775344df1bd4 | 2,028 | py | Python | HoursSelect.py | Maxahoy/ClassVolumeSilencer | 9a05f9dd4efbbbddc74377a27027fa40b2167d02 | [
"MIT"
] | null | null | null | HoursSelect.py | Maxahoy/ClassVolumeSilencer | 9a05f9dd4efbbbddc74377a27027fa40b2167d02 | [
"MIT"
] | null | null | null | HoursSelect.py | Maxahoy/ClassVolumeSilencer | 9a05f9dd4efbbbddc74377a27027fa40b2167d02 | [
"MIT"
] | null | null | null | """
This is how I'm gonna schedule hours
IDEA: import the format example file that I'm using and is saved in the same directory
"""
import csv
import pprint
from tkinter import *
from tkinter.filedialog import askopenfilename
import StringProcessing
"""
Receives a file location, opens the csv
The format looks like this:
CLASS STARTS,Class name (optional),MON,TUES,WED,THURS,FRI,,CLASS ENDS,MON,TUES,WED,THURS,FRI
1, Stats, 10:20:00 AM,,10:20:00 AM,,10:20:00 AM,,,11:15:00 AM,,11:15:00 AM,,11:15:00 AM
2,,,09:35:00 AM,,09:35:00 AM,,,,,10:55:00 AM,,10:55:00 AM,
3,,,11:30:00 AM,11:30:00 AM,11:30:00 AM,11:30:00 AM,,,,12:25:00 PM,12:25:00 PM,12:25:00 PM,12:25:00 PM
4,,,,,,09:10:00 AM,,,,,,,10:05:00 AM
5,,12:00:00 PM,01:00:00 PM,01:00:00 PM,01:00:00 PM,01:00:00 PM,,,,04:30:00 PM,04:30:00 PM,04:30:00 PM,04:30:00 PM
6,,,,,,,,,,,,,
7,,,,,,,,,,,,,
8,,,,,,,,,,,,,
9,,,,,,,,,,,,,
10,,,,,,,,,,,,,
11,,,,,,,,,,,,,
12,,,,,,,,,,,,,
13,,,,,,,,,,,,,
14,,,,,,,,,,,,,
15,,,,,,,,,,,,,
"""
| 28.971429 | 113 | 0.607988 |
5305fe341e34b46ded5f2b3c486985048c12d0b5 | 415 | py | Python | src/bsmu/bone_age/models/dense_net/configs.py | IvanKosik/bone-age-models | 07f20a94951a3b584ee7b6d9a11805c37878214a | [
"BSD-3-Clause"
] | null | null | null | src/bsmu/bone_age/models/dense_net/configs.py | IvanKosik/bone-age-models | 07f20a94951a3b584ee7b6d9a11805c37878214a | [
"BSD-3-Clause"
] | null | null | null | src/bsmu/bone_age/models/dense_net/configs.py | IvanKosik/bone-age-models | 07f20a94951a3b584ee7b6d9a11805c37878214a | [
"BSD-3-Clause"
] | null | null | null | from pathlib import Path
from bsmu.bone_age.models import constants
IMAGE_DIR = Path('C:/MyDiskBackup/Projects/BoneAge/Data/SmallImages500_NoPads')
TRAIN_DATA_CSV_PATH = constants.TRAIN_DATA_CSV_PATH
VALID_DATA_CSV_PATH = constants.VALID_DATA_CSV_PATH
TEST_DATA_CSV_PATH = constants.TEST_DATA_CSV_PATH
BATCH_SIZE = 7
MODEL_NAME_PREFIX = 'DenseNet169'
MODEL_NAME_POSTFIX = 'AllImages3_MoreAugments'
| 29.642857 | 80 | 0.826506 |
53066ef029d7bd3ef7be8bb9baad9578898d6c71 | 2,325 | py | Python | projection.py | ogawan/nisa | d758e41e4983cc35477e81d944689b0226f00ef5 | [
"MIT"
] | null | null | null | projection.py | ogawan/nisa | d758e41e4983cc35477e81d944689b0226f00ef5 | [
"MIT"
] | null | null | null | projection.py | ogawan/nisa | d758e41e4983cc35477e81d944689b0226f00ef5 | [
"MIT"
] | null | null | null | from matplotlib import pyplot as plt
def nisa_projection(years=30, annual_deposit=80, initial_budget=100):
"""
This is a function to plot deposit of TSUMITATE NISA
Parameters:
---------------
years: integer
How many years are you going to continue?
annual_depoist: integer
Annual deposit into the NISA account.
initial_budget: integer
The initial budget.
Returns:
--------------
matplotlib figure
"""
for j in [1.00,1.01, 1.02, 1.03, 1.04, 1.05]:
original = initial_budget
ganbon = []
box = []
for i in range(0,years):
if i == 0:
box.append(original)
ganbon.append(original)
gan = ganbon[-1] + annual_deposit
original = original * j + annual_deposit
if i > 0:
box.append(original)
ganbon.append(gan)
plt.scatter(list(range(0,years)), box)
plt.legend(["0%", "1%", "2%", "3%", "4%", "5%"])
plt.xlabel("Years")
plt.ylabel("Money (Man yen)")
# Reference: https://plotly.com/python/figure-labels/
import pandas as pd
import plotly.graph_objects as go
def nisa_projection_plotly(years=30, annual_deposit=80, initial_budget=100):
"""
This is a function to plot deposit of TSUMITATE NISA
Parameters:
---------------
years: integer
How many years are you going to continue?
annual_depoist: integer
Annual deposit into the NISA account.
initial_budget: integer
The initial budget.
Returns:
--------------
plotly figures.
"""
dic_ = {}
for j in [1.00,1.01, 1.02, 1.03, 1.04, 1.05]:
original = initial_budget
ganbon = []
box = []
for i in range(0,years):
if i == 0:
box.append(original)
ganbon.append(original)
gan = ganbon[-1] + annual_deposit
original = original * j + annual_deposit
if i > 0:
box.append(original)
ganbon.append(gan)
dic_["{} %".format(str(j)[-1])] = box
df = pd.DataFrame(dic_)
fig = go.Figure()
for i in df.columns:
fig.add_trace(go.Scatter(x=df.index, y=df[i],name=i))
fig.update_layout(
title="NISA PLOT",
xaxis_title="Years",
yaxis_title="Man Yen",
width=500,
height=400,
)
fig.show()
nisa_projection(30, 80, 100)
nisa_projection_plotly(30, 80, 100)
| 21.728972 | 76 | 0.594409 |
530685b38b63bb864c23c036e780f7efc9f20c41 | 440,655 | py | Python | tensorflow-ops-generator/resources/gen_ops/gen_math_ops.py | wumo/sim-world | 2a3a5118239b27eeb268cd1e7bdbfe5f5604dab6 | [
"MIT"
] | 1 | 2019-01-12T13:17:32.000Z | 2019-01-12T13:17:32.000Z | rcnn/lib/python3.6/site-packages/tensorflow/python/ops/gen_math_ops.py | dreamingweaver/making_passportImage | 68f23411780ff82abe934dfae5fc04acb80f2c49 | [
"MIT"
] | null | null | null | rcnn/lib/python3.6/site-packages/tensorflow/python/ops/gen_math_ops.py | dreamingweaver/making_passportImage | 68f23411780ff82abe934dfae5fc04acb80f2c49 | [
"MIT"
] | null | null | null | """Python wrappers around TensorFlow ops.
This file is MACHINE GENERATED! Do not edit.
Original C++ source file: math_ops.cc
"""
import collections as _collections
import six as _six
from tensorflow.python import pywrap_tensorflow as _pywrap_tensorflow
from tensorflow.python.eager import context as _context
from tensorflow.python.eager import core as _core
from tensorflow.python.eager import execute as _execute
from tensorflow.python.framework import dtypes as _dtypes
from tensorflow.python.framework import errors as _errors
from tensorflow.python.framework import tensor_shape as _tensor_shape
from tensorflow.core.framework import op_def_pb2 as _op_def_pb2
# Needed to trigger the call to _set_call_cpp_shape_fn.
from tensorflow.python.framework import common_shapes as _common_shapes
from tensorflow.python.framework import op_def_registry as _op_def_registry
from tensorflow.python.framework import ops as _ops
from tensorflow.python.framework import op_def_library as _op_def_library
from tensorflow.python.util.tf_export import tf_export
def _abs(x, name=None):
r"""Computes the absolute value of a tensor.
Given a tensor `x`, this operation returns a tensor containing the absolute
value of each element in `x`. For example, if x is an input element and y is
an output element, this operation computes \\(y = |x|\\).
Args:
x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int32`, `int64`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"Abs", x=x, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"Abs", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Abs", name,
_ctx._post_execution_callbacks, x)
return _result
except _core._FallbackException:
return _abs_eager_fallback(
x, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def _abs_eager_fallback(x, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function _abs
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
_inputs_flat = [x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Abs", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Abs", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def accumulate_nv2(inputs, shape, name=None):
r"""Returns the element-wise sum of a list of tensors.
`tf.accumulate_n_v2` performs the same operation as `tf.add_n`, but does not
wait for all of its inputs to be ready before beginning to sum. This can
save memory if inputs are ready at different times, since minimum temporary
storage is proportional to the output size rather than the inputs size.
Unlike the original `accumulate_n`, `accumulate_n_v2` is differentiable.
Returns a `Tensor` of same shape and type as the elements of `inputs`.
Args:
inputs: A list of at least 1 `Tensor` objects with the same type in: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
A list of `Tensor` objects, each with same shape and type.
shape: A `tf.TensorShape` or list of `ints`.
Shape of elements of `inputs`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `inputs`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
if not isinstance(inputs, (list, tuple)):
raise TypeError(
"Expected list for 'inputs' argument to "
"'accumulate_nv2' Op, not %r." % inputs)
_attr_N = len(inputs)
shape = _execute.make_shape(shape, "shape")
_, _, _op = _op_def_lib._apply_op_helper(
"AccumulateNV2", inputs=inputs, shape=shape, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("N", _op.get_attr("N"), "T", _op.get_attr("T"), "shape",
_op.get_attr("shape"))
_execute.record_gradient(
"AccumulateNV2", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"AccumulateNV2", name, _ctx._post_execution_callbacks, inputs,
"shape", shape)
return _result
except _core._FallbackException:
return accumulate_nv2_eager_fallback(
inputs, shape=shape, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def accumulate_nv2_eager_fallback(inputs, shape, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function accumulate_nv2
"""
_ctx = ctx if ctx else _context.context()
if not isinstance(inputs, (list, tuple)):
raise TypeError(
"Expected list for 'inputs' argument to "
"'accumulate_nv2' Op, not %r." % inputs)
_attr_N = len(inputs)
shape = _execute.make_shape(shape, "shape")
_attr_T, inputs = _execute.args_to_matching_eager(list(inputs), _ctx)
_inputs_flat = list(inputs)
_attrs = ("N", _attr_N, "T", _attr_T, "shape", shape)
_result = _execute.execute(b"AccumulateNV2", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"AccumulateNV2", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def acos_eager_fallback(x, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function acos
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
_inputs_flat = [x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Acos", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Acos", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def acosh_eager_fallback(x, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function acosh
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
_inputs_flat = [x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Acosh", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Acosh", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def add_eager_fallback(x, y, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function add
"""
_ctx = ctx if ctx else _context.context()
_attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx)
(x, y) = _inputs_T
_inputs_flat = [x, y]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Add", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Add", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def add_n(inputs, name=None):
r"""Add all input tensors element wise.
Args:
inputs: A list of at least 1 `Tensor` objects with the same type in: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`, `variant`.
Must all be the same size and shape.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `inputs`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
if not isinstance(inputs, (list, tuple)):
raise TypeError(
"Expected list for 'inputs' argument to "
"'add_n' Op, not %r." % inputs)
_attr_N = len(inputs)
_, _, _op = _op_def_lib._apply_op_helper(
"AddN", inputs=inputs, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("N", _op.get_attr("N"), "T", _op.get_attr("T"))
_execute.record_gradient(
"AddN", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "AddN", name,
_ctx._post_execution_callbacks, inputs)
return _result
except _core._FallbackException:
return add_n_eager_fallback(
inputs, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def add_n_eager_fallback(inputs, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function add_n
"""
_ctx = ctx if ctx else _context.context()
if not isinstance(inputs, (list, tuple)):
raise TypeError(
"Expected list for 'inputs' argument to "
"'add_n' Op, not %r." % inputs)
_attr_N = len(inputs)
_attr_T, inputs = _execute.args_to_matching_eager(list(inputs), _ctx)
_inputs_flat = list(inputs)
_attrs = ("N", _attr_N, "T", _attr_T)
_result = _execute.execute(b"AddN", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"AddN", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def add_v2(x, y, name=None):
r"""Returns x + y element-wise.
*NOTE*: `Add` supports broadcasting. `AddN` does not. More about broadcasting
[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
Args:
x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `uint8`, `int8`, `int16`, `int32`, `int64`, `complex64`, `complex128`.
y: A `Tensor`. Must have the same type as `x`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"AddV2", x=x, y=y, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"AddV2", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "AddV2", name,
_ctx._post_execution_callbacks, x, y)
return _result
except _core._FallbackException:
return add_v2_eager_fallback(
x, y, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def add_v2_eager_fallback(x, y, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function add_v2
"""
_ctx = ctx if ctx else _context.context()
_attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx)
(x, y) = _inputs_T
_inputs_flat = [x, y]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"AddV2", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"AddV2", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def _all(input, axis, keep_dims=False, name=None):
r"""Computes the "logical and" of elements across dimensions of a tensor.
Reduces `input` along the dimensions given in `axis`. Unless
`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
`axis`. If `keep_dims` is true, the reduced dimensions are
retained with length 1.
Args:
input: A `Tensor` of type `bool`. The tensor to reduce.
axis: A `Tensor`. Must be one of the following types: `int32`, `int64`.
The dimensions to reduce. Must be in the range
`[-rank(input), rank(input))`.
keep_dims: An optional `bool`. Defaults to `False`.
If true, retain reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `bool`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
if keep_dims is None:
keep_dims = False
keep_dims = _execute.make_bool(keep_dims, "keep_dims")
_, _, _op = _op_def_lib._apply_op_helper(
"All", input=input, reduction_indices=axis, keep_dims=keep_dims,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("keep_dims", _op.get_attr("keep_dims"), "Tidx",
_op.get_attr("Tidx"))
_execute.record_gradient(
"All", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "All", name,
_ctx._post_execution_callbacks, input, axis, "keep_dims", keep_dims)
return _result
except _core._FallbackException:
return _all_eager_fallback(
input, axis, keep_dims=keep_dims, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def _all_eager_fallback(input, axis, keep_dims=False, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function _all
"""
_ctx = ctx if ctx else _context.context()
if keep_dims is None:
keep_dims = False
keep_dims = _execute.make_bool(keep_dims, "keep_dims")
_attr_Tidx, (axis,) = _execute.args_to_matching_eager([axis], _ctx, _dtypes.int32)
input = _ops.convert_to_tensor(input, _dtypes.bool)
_inputs_flat = [input, axis]
_attrs = ("keep_dims", keep_dims, "Tidx", _attr_Tidx)
_result = _execute.execute(b"All", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"All", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def angle(input, Tout=_dtypes.float32, name=None):
r"""Returns the argument of a complex number.
Given a tensor `input` of complex numbers, this operation returns a tensor of
type `float` that is the argument of each element in `input`. All elements in
`input` must be complex numbers of the form \\(a + bj\\), where *a*
is the real part and *b* is the imaginary part.
The argument returned by this operation is of the form \\(atan2(b, a)\\).
For example:
```
# tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
tf.angle(input) ==> [2.0132, 1.056]
```
@compatibility(numpy)
Equivalent to np.angle.
@end_compatibility
Args:
input: A `Tensor`. Must be one of the following types: `complex64`, `complex128`.
Tout: An optional `tf.DType` from: `tf.float32, tf.float64`. Defaults to `tf.float32`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `Tout`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
if Tout is None:
Tout = _dtypes.float32
Tout = _execute.make_type(Tout, "Tout")
_, _, _op = _op_def_lib._apply_op_helper(
"Angle", input=input, Tout=Tout, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "Tout", _op.get_attr("Tout"))
_execute.record_gradient(
"Angle", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Angle", name,
_ctx._post_execution_callbacks, input, "Tout", Tout)
return _result
except _core._FallbackException:
return angle_eager_fallback(
input, Tout=Tout, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def angle_eager_fallback(input, Tout=_dtypes.float32, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function angle
"""
_ctx = ctx if ctx else _context.context()
if Tout is None:
Tout = _dtypes.float32
Tout = _execute.make_type(Tout, "Tout")
_attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx, _dtypes.complex64)
_inputs_flat = [input]
_attrs = ("T", _attr_T, "Tout", Tout)
_result = _execute.execute(b"Angle", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Angle", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def _any(input, axis, keep_dims=False, name=None):
r"""Computes the "logical or" of elements across dimensions of a tensor.
Reduces `input` along the dimensions given in `axis`. Unless
`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
`axis`. If `keep_dims` is true, the reduced dimensions are
retained with length 1.
Args:
input: A `Tensor` of type `bool`. The tensor to reduce.
axis: A `Tensor`. Must be one of the following types: `int32`, `int64`.
The dimensions to reduce. Must be in the range
`[-rank(input), rank(input))`.
keep_dims: An optional `bool`. Defaults to `False`.
If true, retain reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `bool`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
if keep_dims is None:
keep_dims = False
keep_dims = _execute.make_bool(keep_dims, "keep_dims")
_, _, _op = _op_def_lib._apply_op_helper(
"Any", input=input, reduction_indices=axis, keep_dims=keep_dims,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("keep_dims", _op.get_attr("keep_dims"), "Tidx",
_op.get_attr("Tidx"))
_execute.record_gradient(
"Any", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Any", name,
_ctx._post_execution_callbacks, input, axis, "keep_dims", keep_dims)
return _result
except _core._FallbackException:
return _any_eager_fallback(
input, axis, keep_dims=keep_dims, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def _any_eager_fallback(input, axis, keep_dims=False, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function _any
"""
_ctx = ctx if ctx else _context.context()
if keep_dims is None:
keep_dims = False
keep_dims = _execute.make_bool(keep_dims, "keep_dims")
_attr_Tidx, (axis,) = _execute.args_to_matching_eager([axis], _ctx, _dtypes.int32)
input = _ops.convert_to_tensor(input, _dtypes.bool)
_inputs_flat = [input, axis]
_attrs = ("keep_dims", keep_dims, "Tidx", _attr_Tidx)
_result = _execute.execute(b"Any", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Any", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def approximate_equal(x, y, tolerance=1e-05, name=None):
r"""Returns the truth value of abs(x-y) < tolerance element-wise.
Args:
x: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
y: A `Tensor`. Must have the same type as `x`.
tolerance: An optional `float`. Defaults to `1e-05`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `bool`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
if tolerance is None:
tolerance = 1e-05
tolerance = _execute.make_float(tolerance, "tolerance")
_, _, _op = _op_def_lib._apply_op_helper(
"ApproximateEqual", x=x, y=y, tolerance=tolerance, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "tolerance", _op.get_attr("tolerance"))
_execute.record_gradient(
"ApproximateEqual", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"ApproximateEqual", name, _ctx._post_execution_callbacks, x, y,
"tolerance", tolerance)
return _result
except _core._FallbackException:
return approximate_equal_eager_fallback(
x, y, tolerance=tolerance, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def approximate_equal_eager_fallback(x, y, tolerance=1e-05, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function approximate_equal
"""
_ctx = ctx if ctx else _context.context()
if tolerance is None:
tolerance = 1e-05
tolerance = _execute.make_float(tolerance, "tolerance")
_attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx)
(x, y) = _inputs_T
_inputs_flat = [x, y]
_attrs = ("T", _attr_T, "tolerance", tolerance)
_result = _execute.execute(b"ApproximateEqual", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"ApproximateEqual", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def arg_max(input, dimension, output_type=_dtypes.int64, name=None):
r"""Returns the index with the largest value across dimensions of a tensor.
Note that in case of ties the identity of the return value is not guaranteed.
Args:
input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
dimension: A `Tensor`. Must be one of the following types: `int32`, `int64`.
int32 or int64, must be in the range `[-rank(input), rank(input))`.
Describes which dimension of the input Tensor to reduce across. For vectors,
use dimension = 0.
output_type: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to `tf.int64`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `output_type`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
if output_type is None:
output_type = _dtypes.int64
output_type = _execute.make_type(output_type, "output_type")
_, _, _op = _op_def_lib._apply_op_helper(
"ArgMax", input=input, dimension=dimension, output_type=output_type,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "Tidx", _op.get_attr("Tidx"),
"output_type", _op.get_attr("output_type"))
_execute.record_gradient(
"ArgMax", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "ArgMax", name,
_ctx._post_execution_callbacks, input, dimension, "output_type",
output_type)
return _result
except _core._FallbackException:
return arg_max_eager_fallback(
input, dimension, output_type=output_type, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def arg_max_eager_fallback(input, dimension, output_type=_dtypes.int64, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function arg_max
"""
_ctx = ctx if ctx else _context.context()
if output_type is None:
output_type = _dtypes.int64
output_type = _execute.make_type(output_type, "output_type")
_attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
_attr_Tidx, (dimension,) = _execute.args_to_matching_eager([dimension], _ctx, _dtypes.int32)
_inputs_flat = [input, dimension]
_attrs = ("T", _attr_T, "Tidx", _attr_Tidx, "output_type", output_type)
_result = _execute.execute(b"ArgMax", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"ArgMax", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def arg_min(input, dimension, output_type=_dtypes.int64, name=None):
r"""Returns the index with the smallest value across dimensions of a tensor.
Note that in case of ties the identity of the return value is not guaranteed.
Args:
input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
dimension: A `Tensor`. Must be one of the following types: `int32`, `int64`.
int32 or int64, must be in the range `[-rank(input), rank(input))`.
Describes which dimension of the input Tensor to reduce across. For vectors,
use dimension = 0.
output_type: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to `tf.int64`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `output_type`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
if output_type is None:
output_type = _dtypes.int64
output_type = _execute.make_type(output_type, "output_type")
_, _, _op = _op_def_lib._apply_op_helper(
"ArgMin", input=input, dimension=dimension, output_type=output_type,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "Tidx", _op.get_attr("Tidx"),
"output_type", _op.get_attr("output_type"))
_execute.record_gradient(
"ArgMin", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "ArgMin", name,
_ctx._post_execution_callbacks, input, dimension, "output_type",
output_type)
return _result
except _core._FallbackException:
return arg_min_eager_fallback(
input, dimension, output_type=output_type, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def arg_min_eager_fallback(input, dimension, output_type=_dtypes.int64, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function arg_min
"""
_ctx = ctx if ctx else _context.context()
if output_type is None:
output_type = _dtypes.int64
output_type = _execute.make_type(output_type, "output_type")
_attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
_attr_Tidx, (dimension,) = _execute.args_to_matching_eager([dimension], _ctx, _dtypes.int32)
_inputs_flat = [input, dimension]
_attrs = ("T", _attr_T, "Tidx", _attr_Tidx, "output_type", output_type)
_result = _execute.execute(b"ArgMin", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"ArgMin", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def asin_eager_fallback(x, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function asin
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
_inputs_flat = [x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Asin", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Asin", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def asinh_eager_fallback(x, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function asinh
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
_inputs_flat = [x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Asinh", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Asinh", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def atan_eager_fallback(x, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function atan
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
_inputs_flat = [x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Atan", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Atan", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def atan2_eager_fallback(y, x, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function atan2
"""
_ctx = ctx if ctx else _context.context()
_attr_T, _inputs_T = _execute.args_to_matching_eager([y, x], _ctx)
(y, x) = _inputs_T
_inputs_flat = [y, x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Atan2", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Atan2", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def atanh_eager_fallback(x, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function atanh
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
_inputs_flat = [x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Atanh", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Atanh", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def batch_mat_mul(x, y, adj_x=False, adj_y=False, name=None):
r"""Multiplies slices of two tensors in batches.
Multiplies all slices of `Tensor` `x` and `y` (each slice can be
viewed as an element of a batch), and arranges the individual results
in a single output tensor of the same batch size. Each of the
individual slices can optionally be adjointed (to adjoint a matrix
means to transpose and conjugate it) before multiplication by setting
the `adj_x` or `adj_y` flag to `True`, which are by default `False`.
The input tensors `x` and `y` are 2-D or higher with shape `[..., r_x, c_x]`
and `[..., r_y, c_y]`.
The output tensor is 2-D or higher with shape `[..., r_o, c_o]`, where:
r_o = c_x if adj_x else r_x
c_o = r_y if adj_y else c_y
It is computed as:
output[..., :, :] = matrix(x[..., :, :]) * matrix(y[..., :, :])
Args:
x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int32`, `complex64`, `complex128`.
2-D or higher with shape `[..., r_x, c_x]`.
y: A `Tensor`. Must have the same type as `x`.
2-D or higher with shape `[..., r_y, c_y]`.
adj_x: An optional `bool`. Defaults to `False`.
If `True`, adjoint the slices of `x`. Defaults to `False`.
adj_y: An optional `bool`. Defaults to `False`.
If `True`, adjoint the slices of `y`. Defaults to `False`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
if adj_x is None:
adj_x = False
adj_x = _execute.make_bool(adj_x, "adj_x")
if adj_y is None:
adj_y = False
adj_y = _execute.make_bool(adj_y, "adj_y")
_, _, _op = _op_def_lib._apply_op_helper(
"BatchMatMul", x=x, y=y, adj_x=adj_x, adj_y=adj_y, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "adj_x", _op.get_attr("adj_x"), "adj_y",
_op.get_attr("adj_y"))
_execute.record_gradient(
"BatchMatMul", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "BatchMatMul",
name, _ctx._post_execution_callbacks, x, y, "adj_x", adj_x, "adj_y",
adj_y)
return _result
except _core._FallbackException:
return batch_mat_mul_eager_fallback(
x, y, adj_x=adj_x, adj_y=adj_y, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def batch_mat_mul_eager_fallback(x, y, adj_x=False, adj_y=False, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function batch_mat_mul
"""
_ctx = ctx if ctx else _context.context()
if adj_x is None:
adj_x = False
adj_x = _execute.make_bool(adj_x, "adj_x")
if adj_y is None:
adj_y = False
adj_y = _execute.make_bool(adj_y, "adj_y")
_attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx)
(x, y) = _inputs_T
_inputs_flat = [x, y]
_attrs = ("T", _attr_T, "adj_x", adj_x, "adj_y", adj_y)
_result = _execute.execute(b"BatchMatMul", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"BatchMatMul", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def bessel_i0e(x, name=None):
r"""Computes the Bessel i0e function of `x` element-wise.
Exponentially scaled modified Bessel function of order 0 defined as
`bessel_i0e(x) = exp(-abs(x)) bessel_i0(x)`.
This function is faster and numerically stabler than `bessel_i0(x)`.
Args:
x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"BesselI0e", x=x, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"BesselI0e", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "BesselI0e",
name, _ctx._post_execution_callbacks, x)
return _result
except _core._FallbackException:
return bessel_i0e_eager_fallback(
x, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def bessel_i0e_eager_fallback(x, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function bessel_i0e
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
_inputs_flat = [x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"BesselI0e", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"BesselI0e", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def bessel_i1e(x, name=None):
r"""Computes the Bessel i1e function of `x` element-wise.
Exponentially scaled modified Bessel function of order 0 defined as
`bessel_i1e(x) = exp(-abs(x)) bessel_i1(x)`.
This function is faster and numerically stabler than `bessel_i1(x)`.
Args:
x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"BesselI1e", x=x, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"BesselI1e", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "BesselI1e",
name, _ctx._post_execution_callbacks, x)
return _result
except _core._FallbackException:
return bessel_i1e_eager_fallback(
x, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def bessel_i1e_eager_fallback(x, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function bessel_i1e
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
_inputs_flat = [x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"BesselI1e", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"BesselI1e", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def betainc_eager_fallback(a, b, x, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function betainc
"""
_ctx = ctx if ctx else _context.context()
_attr_T, _inputs_T = _execute.args_to_matching_eager([a, b, x], _ctx)
(a, b, x) = _inputs_T
_inputs_flat = [a, b, x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Betainc", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Betainc", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def bincount(arr, size, weights, name=None):
r"""Counts the number of occurrences of each value in an integer array.
Outputs a vector with length `size` and the same dtype as `weights`. If
`weights` are empty, then index `i` stores the number of times the value `i` is
counted in `arr`. If `weights` are non-empty, then index `i` stores the sum of
the value in `weights` at each index where the corresponding value in `arr` is
`i`.
Values in `arr` outside of the range [0, size) are ignored.
Args:
arr: A `Tensor` of type `int32`. int32 `Tensor`.
size: A `Tensor` of type `int32`. non-negative int32 scalar `Tensor`.
weights: A `Tensor`. Must be one of the following types: `int32`, `int64`, `float32`, `float64`.
is an int32, int64, float32, or float64 `Tensor` with the same
shape as `arr`, or a length-0 `Tensor`, in which case it acts as all weights
equal to 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `weights`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"Bincount", arr=arr, size=size, weights=weights, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"Bincount", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Bincount",
name, _ctx._post_execution_callbacks, arr, size, weights)
return _result
except _core._FallbackException:
return bincount_eager_fallback(
arr, size, weights, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def bincount_eager_fallback(arr, size, weights, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function bincount
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (weights,) = _execute.args_to_matching_eager([weights], _ctx)
arr = _ops.convert_to_tensor(arr, _dtypes.int32)
size = _ops.convert_to_tensor(size, _dtypes.int32)
_inputs_flat = [arr, size, weights]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Bincount", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"Bincount", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def bucketize(input, boundaries, name=None):
r"""Bucketizes 'input' based on 'boundaries'.
For example, if the inputs are
boundaries = [0, 10, 100]
input = [[-5, 10000]
[150, 10]
[5, 100]]
then the output will be
output = [[0, 3]
[3, 2]
[1, 3]]
Args:
input: A `Tensor`. Must be one of the following types: `int32`, `int64`, `float32`, `float64`.
Any shape of Tensor contains with int or float type.
boundaries: A list of `floats`.
A sorted list of floats gives the boundary of the buckets.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `int32`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
if not isinstance(boundaries, (list, tuple)):
raise TypeError(
"Expected list for 'boundaries' argument to "
"'bucketize' Op, not %r." % boundaries)
boundaries = [_execute.make_float(_f, "boundaries") for _f in boundaries]
_, _, _op = _op_def_lib._apply_op_helper(
"Bucketize", input=input, boundaries=boundaries, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "boundaries",
_op.get_attr("boundaries"))
_execute.record_gradient(
"Bucketize", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Bucketize",
name, _ctx._post_execution_callbacks, input, "boundaries", boundaries)
return _result
except _core._FallbackException:
return bucketize_eager_fallback(
input, boundaries=boundaries, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def bucketize_eager_fallback(input, boundaries, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function bucketize
"""
_ctx = ctx if ctx else _context.context()
if not isinstance(boundaries, (list, tuple)):
raise TypeError(
"Expected list for 'boundaries' argument to "
"'bucketize' Op, not %r." % boundaries)
boundaries = [_execute.make_float(_f, "boundaries") for _f in boundaries]
_attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
_inputs_flat = [input]
_attrs = ("T", _attr_T, "boundaries", boundaries)
_result = _execute.execute(b"Bucketize", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"Bucketize", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def cast(x, DstT, name=None):
r"""Cast x of type SrcT to y of DstT.
Args:
x: A `Tensor`.
DstT: A `tf.DType`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `DstT`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
DstT = _execute.make_type(DstT, "DstT")
_, _, _op = _op_def_lib._apply_op_helper(
"Cast", x=x, DstT=DstT, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("SrcT", _op.get_attr("SrcT"), "DstT", _op.get_attr("DstT"))
_execute.record_gradient(
"Cast", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Cast", name,
_ctx._post_execution_callbacks, x, "DstT", DstT)
return _result
except _core._FallbackException:
return cast_eager_fallback(
x, DstT=DstT, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def cast_eager_fallback(x, DstT, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function cast
"""
_ctx = ctx if ctx else _context.context()
DstT = _execute.make_type(DstT, "DstT")
_attr_SrcT, (x,) = _execute.args_to_matching_eager([x], _ctx)
_inputs_flat = [x]
_attrs = ("SrcT", _attr_SrcT, "DstT", DstT)
_result = _execute.execute(b"Cast", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Cast", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def ceil_eager_fallback(x, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function ceil
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
_inputs_flat = [x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Ceil", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Ceil", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def _clip_by_value(t, clip_value_min, clip_value_max, name=None):
r"""Clips tensor values to a specified min and max.
Given a tensor `t`, this operation returns a tensor of the same type and
shape as `t` with its values clipped to `clip_value_min` and `clip_value_max`.
Any values less than `clip_value_min` are set to `clip_value_min`. Any values
greater than `clip_value_max` are set to `clip_value_max`.
Args:
t: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
A `Tensor`.
clip_value_min: A `Tensor`. Must have the same type as `t`.
A 0-D (scalar) `Tensor`, or a `Tensor` with the same shape
as `t`. The minimum value to clip by.
clip_value_max: A `Tensor`. Must have the same type as `t`.
A 0-D (scalar) `Tensor`, or a `Tensor` with the same shape
as `t`. The maximum value to clip by.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `t`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"ClipByValue", t=t, clip_value_min=clip_value_min,
clip_value_max=clip_value_max, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"ClipByValue", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "ClipByValue",
name, _ctx._post_execution_callbacks, t, clip_value_min,
clip_value_max)
return _result
except _core._FallbackException:
return _clip_by_value_eager_fallback(
t, clip_value_min, clip_value_max, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def _clip_by_value_eager_fallback(t, clip_value_min, clip_value_max, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function _clip_by_value
"""
_ctx = ctx if ctx else _context.context()
_attr_T, _inputs_T = _execute.args_to_matching_eager([t, clip_value_min, clip_value_max], _ctx)
(t, clip_value_min, clip_value_max) = _inputs_T
_inputs_flat = [t, clip_value_min, clip_value_max]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"ClipByValue", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"ClipByValue", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def compare_and_bitpack(input, threshold, name=None):
r"""Compare values of `input` to `threshold` and pack resulting bits into a `uint8`.
Each comparison returns a boolean `true` (if `input_value > threshold`)
or and `false` otherwise.
This operation is useful for Locality-Sensitive-Hashing (LSH) and other
algorithms that use hashing approximations of cosine and `L2` distances;
codes can be generated from an input via:
```python
codebook_size = 50
codebook_bits = codebook_size * 32
codebook = tf.get_variable('codebook', [x.shape[-1].value, codebook_bits],
dtype=x.dtype,
initializer=tf.orthogonal_initializer())
codes = compare_and_threshold(tf.matmul(x, codebook), threshold=0.)
codes = tf.bitcast(codes, tf.int32) # go from uint8 to int32
# now codes has shape x.shape[:-1] + [codebook_size]
```
**NOTE**: Currently, the innermost dimension of the tensor must be divisible
by 8.
Given an `input` shaped `[s0, s1, ..., s_n]`, the output is
a `uint8` tensor shaped `[s0, s1, ..., s_n / 8]`.
Args:
input: A `Tensor`. Must be one of the following types: `bool`, `half`, `float32`, `float64`, `int8`, `int16`, `int32`, `int64`.
Values to compare against `threshold` and bitpack.
threshold: A `Tensor`. Must have the same type as `input`.
Threshold to compare against.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `uint8`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"CompareAndBitpack", input=input, threshold=threshold, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"CompareAndBitpack", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"CompareAndBitpack", name, _ctx._post_execution_callbacks, input,
threshold)
return _result
except _core._FallbackException:
return compare_and_bitpack_eager_fallback(
input, threshold, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def compare_and_bitpack_eager_fallback(input, threshold, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function compare_and_bitpack
"""
_ctx = ctx if ctx else _context.context()
_attr_T, _inputs_T = _execute.args_to_matching_eager([input, threshold], _ctx)
(input, threshold) = _inputs_T
_inputs_flat = [input, threshold]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"CompareAndBitpack", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"CompareAndBitpack", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def _complex(real, imag, Tout=_dtypes.complex64, name=None):
r"""Converts two real numbers to a complex number.
Given a tensor `real` representing the real part of a complex number, and a
tensor `imag` representing the imaginary part of a complex number, this
operation returns complex numbers elementwise of the form \\(a + bj\\), where
*a* represents the `real` part and *b* represents the `imag` part.
The input tensors `real` and `imag` must have the same shape.
For example:
```
# tensor 'real' is [2.25, 3.25]
# tensor `imag` is [4.75, 5.75]
tf.complex(real, imag) ==> [[2.25 + 4.75j], [3.25 + 5.75j]]
```
Args:
real: A `Tensor`. Must be one of the following types: `float32`, `float64`.
imag: A `Tensor`. Must have the same type as `real`.
Tout: An optional `tf.DType` from: `tf.complex64, tf.complex128`. Defaults to `tf.complex64`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `Tout`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
if Tout is None:
Tout = _dtypes.complex64
Tout = _execute.make_type(Tout, "Tout")
_, _, _op = _op_def_lib._apply_op_helper(
"Complex", real=real, imag=imag, Tout=Tout, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "Tout", _op.get_attr("Tout"))
_execute.record_gradient(
"Complex", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Complex",
name, _ctx._post_execution_callbacks, real, imag, "Tout", Tout)
return _result
except _core._FallbackException:
return _complex_eager_fallback(
real, imag, Tout=Tout, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def _complex_eager_fallback(real, imag, Tout=_dtypes.complex64, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function _complex
"""
_ctx = ctx if ctx else _context.context()
if Tout is None:
Tout = _dtypes.complex64
Tout = _execute.make_type(Tout, "Tout")
_attr_T, _inputs_T = _execute.args_to_matching_eager([real, imag], _ctx, _dtypes.float32)
(real, imag) = _inputs_T
_inputs_flat = [real, imag]
_attrs = ("T", _attr_T, "Tout", Tout)
_result = _execute.execute(b"Complex", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Complex", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def complex_abs(x, Tout=_dtypes.float32, name=None):
r"""Computes the complex absolute value of a tensor.
Given a tensor `x` of complex numbers, this operation returns a tensor of type
`float` or `double` that is the absolute value of each element in `x`. All
elements in `x` must be complex numbers of the form \\(a + bj\\). The absolute
value is computed as \\( \sqrt{a^2 + b^2}\\).
Args:
x: A `Tensor`. Must be one of the following types: `complex64`, `complex128`.
Tout: An optional `tf.DType` from: `tf.float32, tf.float64`. Defaults to `tf.float32`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `Tout`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
if Tout is None:
Tout = _dtypes.float32
Tout = _execute.make_type(Tout, "Tout")
_, _, _op = _op_def_lib._apply_op_helper(
"ComplexAbs", x=x, Tout=Tout, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "Tout", _op.get_attr("Tout"))
_execute.record_gradient(
"ComplexAbs", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "ComplexAbs",
name, _ctx._post_execution_callbacks, x, "Tout", Tout)
return _result
except _core._FallbackException:
return complex_abs_eager_fallback(
x, Tout=Tout, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def complex_abs_eager_fallback(x, Tout=_dtypes.float32, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function complex_abs
"""
_ctx = ctx if ctx else _context.context()
if Tout is None:
Tout = _dtypes.float32
Tout = _execute.make_type(Tout, "Tout")
_attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx, _dtypes.complex64)
_inputs_flat = [x]
_attrs = ("T", _attr_T, "Tout", Tout)
_result = _execute.execute(b"ComplexAbs", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"ComplexAbs", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def conj(input, name=None):
r"""Returns the complex conjugate of a complex number.
Given a tensor `input` of complex numbers, this operation returns a tensor of
complex numbers that are the complex conjugate of each element in `input`. The
complex numbers in `input` must be of the form \\(a + bj\\), where *a* is the
real part and *b* is the imaginary part.
The complex conjugate returned by this operation is of the form \\(a - bj\\).
For example:
```
# tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
tf.conj(input) ==> [-2.25 - 4.75j, 3.25 - 5.75j]
```
Args:
input: A `Tensor`. Must be one of the following types: `complex64`, `complex128`, `variant`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"Conj", input=input, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"Conj", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Conj", name,
_ctx._post_execution_callbacks, input)
return _result
except _core._FallbackException:
return conj_eager_fallback(
input, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def conj_eager_fallback(input, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function conj
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx, _dtypes.complex64)
_inputs_flat = [input]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Conj", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Conj", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def cos_eager_fallback(x, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function cos
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
_inputs_flat = [x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Cos", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Cos", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def cosh_eager_fallback(x, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function cosh
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
_inputs_flat = [x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Cosh", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Cosh", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def cross_eager_fallback(a, b, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function cross
"""
_ctx = ctx if ctx else _context.context()
_attr_T, _inputs_T = _execute.args_to_matching_eager([a, b], _ctx)
(a, b) = _inputs_T
_inputs_flat = [a, b]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Cross", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Cross", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def cumprod(x, axis, exclusive=False, reverse=False, name=None):
r"""Compute the cumulative product of the tensor `x` along `axis`.
By default, this op performs an inclusive cumprod, which means that the first
element of the input is identical to the first element of the output:
```python
tf.cumprod([a, b, c]) # => [a, a * b, a * b * c]
```
By setting the `exclusive` kwarg to `True`, an exclusive cumprod is
performed instead:
```python
tf.cumprod([a, b, c], exclusive=True) # => [1, a, a * b]
```
By setting the `reverse` kwarg to `True`, the cumprod is performed in the
opposite direction:
```python
tf.cumprod([a, b, c], reverse=True) # => [a * b * c, b * c, c]
```
This is more efficient than using separate `tf.reverse` ops.
The `reverse` and `exclusive` kwargs can also be combined:
```python
tf.cumprod([a, b, c], exclusive=True, reverse=True) # => [b * c, c, 1]
```
Args:
x: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
A `Tensor`. Must be one of the following types: `float32`, `float64`,
`int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,
`complex128`, `qint8`, `quint8`, `qint32`, `half`.
axis: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A `Tensor` of type `int32` (default: 0). Must be in the range
`[-rank(x), rank(x))`.
exclusive: An optional `bool`. Defaults to `False`.
If `True`, perform exclusive cumprod.
reverse: An optional `bool`. Defaults to `False`.
A `bool` (default: False).
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
if exclusive is None:
exclusive = False
exclusive = _execute.make_bool(exclusive, "exclusive")
if reverse is None:
reverse = False
reverse = _execute.make_bool(reverse, "reverse")
_, _, _op = _op_def_lib._apply_op_helper(
"Cumprod", x=x, axis=axis, exclusive=exclusive, reverse=reverse,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("exclusive", _op.get_attr("exclusive"), "reverse",
_op.get_attr("reverse"), "T", _op.get_attr("T"), "Tidx",
_op.get_attr("Tidx"))
_execute.record_gradient(
"Cumprod", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Cumprod",
name, _ctx._post_execution_callbacks, x, axis, "exclusive", exclusive,
"reverse", reverse)
return _result
except _core._FallbackException:
return cumprod_eager_fallback(
x, axis, exclusive=exclusive, reverse=reverse, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def cumprod_eager_fallback(x, axis, exclusive=False, reverse=False, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function cumprod
"""
_ctx = ctx if ctx else _context.context()
if exclusive is None:
exclusive = False
exclusive = _execute.make_bool(exclusive, "exclusive")
if reverse is None:
reverse = False
reverse = _execute.make_bool(reverse, "reverse")
_attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
_attr_Tidx, (axis,) = _execute.args_to_matching_eager([axis], _ctx, _dtypes.int32)
_inputs_flat = [x, axis]
_attrs = ("exclusive", exclusive, "reverse", reverse, "T", _attr_T, "Tidx",
_attr_Tidx)
_result = _execute.execute(b"Cumprod", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Cumprod", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def cumsum(x, axis, exclusive=False, reverse=False, name=None):
r"""Compute the cumulative sum of the tensor `x` along `axis`.
By default, this op performs an inclusive cumsum, which means that the first
element of the input is identical to the first element of the output:
```python
tf.cumsum([a, b, c]) # => [a, a + b, a + b + c]
```
By setting the `exclusive` kwarg to `True`, an exclusive cumsum is
performed instead:
```python
tf.cumsum([a, b, c], exclusive=True) # => [0, a, a + b]
```
By setting the `reverse` kwarg to `True`, the cumsum is performed in the
opposite direction:
```python
tf.cumsum([a, b, c], reverse=True) # => [a + b + c, b + c, c]
```
This is more efficient than using separate `tf.reverse` ops.
The `reverse` and `exclusive` kwargs can also be combined:
```python
tf.cumsum([a, b, c], exclusive=True, reverse=True) # => [b + c, c, 0]
```
Args:
x: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
A `Tensor`. Must be one of the following types: `float32`, `float64`,
`int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,
`complex128`, `qint8`, `quint8`, `qint32`, `half`.
axis: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A `Tensor` of type `int32` (default: 0). Must be in the range
`[-rank(x), rank(x))`.
exclusive: An optional `bool`. Defaults to `False`.
If `True`, perform exclusive cumsum.
reverse: An optional `bool`. Defaults to `False`.
A `bool` (default: False).
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
if exclusive is None:
exclusive = False
exclusive = _execute.make_bool(exclusive, "exclusive")
if reverse is None:
reverse = False
reverse = _execute.make_bool(reverse, "reverse")
_, _, _op = _op_def_lib._apply_op_helper(
"Cumsum", x=x, axis=axis, exclusive=exclusive, reverse=reverse,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("exclusive", _op.get_attr("exclusive"), "reverse",
_op.get_attr("reverse"), "T", _op.get_attr("T"), "Tidx",
_op.get_attr("Tidx"))
_execute.record_gradient(
"Cumsum", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Cumsum", name,
_ctx._post_execution_callbacks, x, axis, "exclusive", exclusive,
"reverse", reverse)
return _result
except _core._FallbackException:
return cumsum_eager_fallback(
x, axis, exclusive=exclusive, reverse=reverse, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def cumsum_eager_fallback(x, axis, exclusive=False, reverse=False, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function cumsum
"""
_ctx = ctx if ctx else _context.context()
if exclusive is None:
exclusive = False
exclusive = _execute.make_bool(exclusive, "exclusive")
if reverse is None:
reverse = False
reverse = _execute.make_bool(reverse, "reverse")
_attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
_attr_Tidx, (axis,) = _execute.args_to_matching_eager([axis], _ctx, _dtypes.int32)
_inputs_flat = [x, axis]
_attrs = ("exclusive", exclusive, "reverse", reverse, "T", _attr_T, "Tidx",
_attr_Tidx)
_result = _execute.execute(b"Cumsum", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Cumsum", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def digamma_eager_fallback(x, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function digamma
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
_inputs_flat = [x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Digamma", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Digamma", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def div(x, y, name=None):
r"""Returns x / y element-wise.
*NOTE*: `Div` supports broadcasting. More about broadcasting
[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
Args:
x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `uint8`, `int8`, `uint16`, `int16`, `int32`, `int64`, `complex64`, `complex128`.
y: A `Tensor`. Must have the same type as `x`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"Div", x=x, y=y, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"Div", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Div", name,
_ctx._post_execution_callbacks, x, y)
return _result
except _core._FallbackException:
return div_eager_fallback(
x, y, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def div_eager_fallback(x, y, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function div
"""
_ctx = ctx if ctx else _context.context()
_attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx)
(x, y) = _inputs_T
_inputs_flat = [x, y]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Div", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Div", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def equal_eager_fallback(x, y, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function equal
"""
_ctx = ctx if ctx else _context.context()
_attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx)
(x, y) = _inputs_T
_inputs_flat = [x, y]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Equal", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Equal", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def erf(x, name=None):
r"""Computes the Gauss error function of `x` element-wise.
Args:
x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"Erf", x=x, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"Erf", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Erf", name,
_ctx._post_execution_callbacks, x)
return _result
except _core._FallbackException:
return erf_eager_fallback(
x, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def erf_eager_fallback(x, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function erf
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
_inputs_flat = [x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Erf", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Erf", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def erfc_eager_fallback(x, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function erfc
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
_inputs_flat = [x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Erfc", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Erfc", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def exp_eager_fallback(x, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function exp
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
_inputs_flat = [x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Exp", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Exp", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def expm1_eager_fallback(x, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function expm1
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
_inputs_flat = [x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Expm1", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Expm1", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def floor_eager_fallback(x, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function floor
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
_inputs_flat = [x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Floor", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Floor", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def floor_div(x, y, name=None):
r"""Returns x // y element-wise.
*NOTE*: `FloorDiv` supports broadcasting. More about broadcasting
[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
Args:
x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `uint8`, `int8`, `uint16`, `int16`, `int32`, `int64`, `complex64`, `complex128`.
y: A `Tensor`. Must have the same type as `x`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"FloorDiv", x=x, y=y, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"FloorDiv", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "FloorDiv",
name, _ctx._post_execution_callbacks, x, y)
return _result
except _core._FallbackException:
return floor_div_eager_fallback(
x, y, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def floor_div_eager_fallback(x, y, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function floor_div
"""
_ctx = ctx if ctx else _context.context()
_attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx)
(x, y) = _inputs_T
_inputs_flat = [x, y]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"FloorDiv", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"FloorDiv", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def floor_mod(x, y, name=None):
r"""Returns element-wise remainder of division. When `x < 0` xor `y < 0` is
true, this follows Python semantics in that the result here is consistent
with a flooring divide. E.g. `floor(x / y) * y + mod(x, y) = x`.
*NOTE*: `FloorMod` supports broadcasting. More about broadcasting
[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
Args:
x: A `Tensor`. Must be one of the following types: `int32`, `int64`, `bfloat16`, `half`, `float32`, `float64`.
y: A `Tensor`. Must have the same type as `x`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"FloorMod", x=x, y=y, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"FloorMod", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "FloorMod",
name, _ctx._post_execution_callbacks, x, y)
return _result
except _core._FallbackException:
return floor_mod_eager_fallback(
x, y, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def floor_mod_eager_fallback(x, y, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function floor_mod
"""
_ctx = ctx if ctx else _context.context()
_attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx)
(x, y) = _inputs_T
_inputs_flat = [x, y]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"FloorMod", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"FloorMod", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def greater_eager_fallback(x, y, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function greater
"""
_ctx = ctx if ctx else _context.context()
_attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx)
(x, y) = _inputs_T
_inputs_flat = [x, y]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Greater", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Greater", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def greater_equal_eager_fallback(x, y, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function greater_equal
"""
_ctx = ctx if ctx else _context.context()
_attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx)
(x, y) = _inputs_T
_inputs_flat = [x, y]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"GreaterEqual", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"GreaterEqual", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def _histogram_fixed_width(values, value_range, nbins, dtype=_dtypes.int32, name=None):
r"""Return histogram of values.
Given the tensor `values`, this operation returns a rank 1 histogram counting
the number of entries in `values` that fall into every bin. The bins are
equal width and determined by the arguments `value_range` and `nbins`.
```python
# Bins will be: (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf)
nbins = 5
value_range = [0.0, 5.0]
new_values = [-1.0, 0.0, 1.5, 2.0, 5.0, 15]
with tf.get_default_session() as sess:
hist = tf.histogram_fixed_width(new_values, value_range, nbins=5)
variables.global_variables_initializer().run()
sess.run(hist) => [2, 1, 1, 0, 2]
```
Args:
values: A `Tensor`. Must be one of the following types: `int32`, `int64`, `float32`, `float64`.
Numeric `Tensor`.
value_range: A `Tensor`. Must have the same type as `values`.
Shape [2] `Tensor` of same `dtype` as `values`.
values <= value_range[0] will be mapped to hist[0],
values >= value_range[1] will be mapped to hist[-1].
nbins: A `Tensor` of type `int32`.
Scalar `int32 Tensor`. Number of histogram bins.
dtype: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to `tf.int32`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `dtype`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
if dtype is None:
dtype = _dtypes.int32
dtype = _execute.make_type(dtype, "dtype")
_, _, _op = _op_def_lib._apply_op_helper(
"HistogramFixedWidth", values=values, value_range=value_range,
nbins=nbins, dtype=dtype, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "dtype", _op.get_attr("dtype"))
_execute.record_gradient(
"HistogramFixedWidth", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"HistogramFixedWidth", name, _ctx._post_execution_callbacks, values,
value_range, nbins, "dtype", dtype)
return _result
except _core._FallbackException:
return _histogram_fixed_width_eager_fallback(
values, value_range, nbins, dtype=dtype, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def _histogram_fixed_width_eager_fallback(values, value_range, nbins, dtype=_dtypes.int32, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function _histogram_fixed_width
"""
_ctx = ctx if ctx else _context.context()
if dtype is None:
dtype = _dtypes.int32
dtype = _execute.make_type(dtype, "dtype")
_attr_T, _inputs_T = _execute.args_to_matching_eager([values, value_range], _ctx)
(values, value_range) = _inputs_T
nbins = _ops.convert_to_tensor(nbins, _dtypes.int32)
_inputs_flat = [values, value_range, nbins]
_attrs = ("T", _attr_T, "dtype", dtype)
_result = _execute.execute(b"HistogramFixedWidth", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"HistogramFixedWidth", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def igamma_eager_fallback(a, x, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function igamma
"""
_ctx = ctx if ctx else _context.context()
_attr_T, _inputs_T = _execute.args_to_matching_eager([a, x], _ctx)
(a, x) = _inputs_T
_inputs_flat = [a, x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Igamma", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Igamma", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def igamma_grad_a(a, x, name=None):
r"""Computes the gradient of `igamma(a, x)` wrt `a`.
Args:
a: A `Tensor`. Must be one of the following types: `float32`, `float64`.
x: A `Tensor`. Must have the same type as `a`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `a`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"IgammaGradA", a=a, x=x, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"IgammaGradA", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "IgammaGradA",
name, _ctx._post_execution_callbacks, a, x)
return _result
except _core._FallbackException:
return igamma_grad_a_eager_fallback(
a, x, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def igamma_grad_a_eager_fallback(a, x, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function igamma_grad_a
"""
_ctx = ctx if ctx else _context.context()
_attr_T, _inputs_T = _execute.args_to_matching_eager([a, x], _ctx)
(a, x) = _inputs_T
_inputs_flat = [a, x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"IgammaGradA", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"IgammaGradA", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def igammac_eager_fallback(a, x, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function igammac
"""
_ctx = ctx if ctx else _context.context()
_attr_T, _inputs_T = _execute.args_to_matching_eager([a, x], _ctx)
(a, x) = _inputs_T
_inputs_flat = [a, x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Igammac", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Igammac", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def imag(input, Tout=_dtypes.float32, name=None):
r"""Returns the imaginary part of a complex number.
Given a tensor `input` of complex numbers, this operation returns a tensor of
type `float` that is the imaginary part of each element in `input`. All
elements in `input` must be complex numbers of the form \\(a + bj\\), where *a*
is the real part and *b* is the imaginary part returned by this operation.
For example:
```
# tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
tf.imag(input) ==> [4.75, 5.75]
```
Args:
input: A `Tensor`. Must be one of the following types: `complex64`, `complex128`.
Tout: An optional `tf.DType` from: `tf.float32, tf.float64`. Defaults to `tf.float32`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `Tout`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
if Tout is None:
Tout = _dtypes.float32
Tout = _execute.make_type(Tout, "Tout")
_, _, _op = _op_def_lib._apply_op_helper(
"Imag", input=input, Tout=Tout, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "Tout", _op.get_attr("Tout"))
_execute.record_gradient(
"Imag", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Imag", name,
_ctx._post_execution_callbacks, input, "Tout", Tout)
return _result
except _core._FallbackException:
return imag_eager_fallback(
input, Tout=Tout, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def imag_eager_fallback(input, Tout=_dtypes.float32, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function imag
"""
_ctx = ctx if ctx else _context.context()
if Tout is None:
Tout = _dtypes.float32
Tout = _execute.make_type(Tout, "Tout")
_attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx, _dtypes.complex64)
_inputs_flat = [input]
_attrs = ("T", _attr_T, "Tout", Tout)
_result = _execute.execute(b"Imag", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Imag", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def inv(x, name=None):
r"""Computes the reciprocal of x element-wise.
I.e., \\(y = 1 / x\\).
Args:
x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"Inv", x=x, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"Inv", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Inv", name,
_ctx._post_execution_callbacks, x)
return _result
except _core._FallbackException:
return inv_eager_fallback(
x, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def inv_eager_fallback(x, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function inv
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
_inputs_flat = [x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Inv", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Inv", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def inv_grad(y, dy, name=None):
r"""Computes the gradient for the inverse of `x` wrt its input.
Specifically, `grad = -dy * y*y`, where `y = 1/x`, and `dy`
is the corresponding input gradient.
Args:
y: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`.
dy: A `Tensor`. Must have the same type as `y`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `y`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"InvGrad", y=y, dy=dy, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"InvGrad", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "InvGrad",
name, _ctx._post_execution_callbacks, y, dy)
return _result
except _core._FallbackException:
return inv_grad_eager_fallback(
y, dy, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def inv_grad_eager_fallback(y, dy, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function inv_grad
"""
_ctx = ctx if ctx else _context.context()
_attr_T, _inputs_T = _execute.args_to_matching_eager([y, dy], _ctx)
(y, dy) = _inputs_T
_inputs_flat = [y, dy]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"InvGrad", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"InvGrad", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def is_finite_eager_fallback(x, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function is_finite
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
_inputs_flat = [x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"IsFinite", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"IsFinite", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def is_inf_eager_fallback(x, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function is_inf
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
_inputs_flat = [x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"IsInf", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"IsInf", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def is_nan_eager_fallback(x, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function is_nan
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
_inputs_flat = [x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"IsNan", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"IsNan", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def less_eager_fallback(x, y, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function less
"""
_ctx = ctx if ctx else _context.context()
_attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx)
(x, y) = _inputs_T
_inputs_flat = [x, y]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Less", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Less", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def less_equal_eager_fallback(x, y, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function less_equal
"""
_ctx = ctx if ctx else _context.context()
_attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx)
(x, y) = _inputs_T
_inputs_flat = [x, y]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"LessEqual", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"LessEqual", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def lgamma_eager_fallback(x, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function lgamma
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
_inputs_flat = [x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Lgamma", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Lgamma", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def lin_space_eager_fallback(start, stop, num, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function lin_space
"""
_ctx = ctx if ctx else _context.context()
_attr_T, _inputs_T = _execute.args_to_matching_eager([start, stop], _ctx)
(start, stop) = _inputs_T
_attr_Tidx, (num,) = _execute.args_to_matching_eager([num], _ctx, _dtypes.int32)
_inputs_flat = [start, stop, num]
_attrs = ("T", _attr_T, "Tidx", _attr_Tidx)
_result = _execute.execute(b"LinSpace", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"LinSpace", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def log_eager_fallback(x, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function log
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
_inputs_flat = [x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Log", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Log", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def log1p_eager_fallback(x, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function log1p
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
_inputs_flat = [x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Log1p", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Log1p", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def logical_and_eager_fallback(x, y, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function logical_and
"""
_ctx = ctx if ctx else _context.context()
x = _ops.convert_to_tensor(x, _dtypes.bool)
y = _ops.convert_to_tensor(y, _dtypes.bool)
_inputs_flat = [x, y]
_attrs = None
_result = _execute.execute(b"LogicalAnd", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"LogicalAnd", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def logical_not_eager_fallback(x, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function logical_not
"""
_ctx = ctx if ctx else _context.context()
x = _ops.convert_to_tensor(x, _dtypes.bool)
_inputs_flat = [x]
_attrs = None
_result = _execute.execute(b"LogicalNot", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"LogicalNot", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def logical_or_eager_fallback(x, y, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function logical_or
"""
_ctx = ctx if ctx else _context.context()
x = _ops.convert_to_tensor(x, _dtypes.bool)
y = _ops.convert_to_tensor(y, _dtypes.bool)
_inputs_flat = [x, y]
_attrs = None
_result = _execute.execute(b"LogicalOr", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"LogicalOr", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def mat_mul(a, b, transpose_a=False, transpose_b=False, name=None):
r"""Multiply the matrix "a" by the matrix "b".
The inputs must be two-dimensional matrices and the inner dimension of
"a" (after being transposed if transpose_a is true) must match the
outer dimension of "b" (after being transposed if transposed_b is
true).
*Note*: The default kernel implementation for MatMul on GPUs uses
cublas.
Args:
a: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int32`, `complex64`, `complex128`.
b: A `Tensor`. Must have the same type as `a`.
transpose_a: An optional `bool`. Defaults to `False`.
If true, "a" is transposed before multiplication.
transpose_b: An optional `bool`. Defaults to `False`.
If true, "b" is transposed before multiplication.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `a`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
if transpose_a is None:
transpose_a = False
transpose_a = _execute.make_bool(transpose_a, "transpose_a")
if transpose_b is None:
transpose_b = False
transpose_b = _execute.make_bool(transpose_b, "transpose_b")
_, _, _op = _op_def_lib._apply_op_helper(
"MatMul", a=a, b=b, transpose_a=transpose_a, transpose_b=transpose_b,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("transpose_a", _op.get_attr("transpose_a"), "transpose_b",
_op.get_attr("transpose_b"), "T", _op.get_attr("T"))
_execute.record_gradient(
"MatMul", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "MatMul", name,
_ctx._post_execution_callbacks, a, b, "transpose_a", transpose_a,
"transpose_b", transpose_b)
return _result
except _core._FallbackException:
return mat_mul_eager_fallback(
a, b, transpose_a=transpose_a, transpose_b=transpose_b, name=name,
ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def mat_mul_eager_fallback(a, b, transpose_a=False, transpose_b=False, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function mat_mul
"""
_ctx = ctx if ctx else _context.context()
if transpose_a is None:
transpose_a = False
transpose_a = _execute.make_bool(transpose_a, "transpose_a")
if transpose_b is None:
transpose_b = False
transpose_b = _execute.make_bool(transpose_b, "transpose_b")
_attr_T, _inputs_T = _execute.args_to_matching_eager([a, b], _ctx)
(a, b) = _inputs_T
_inputs_flat = [a, b]
_attrs = ("transpose_a", transpose_a, "transpose_b", transpose_b, "T",
_attr_T)
_result = _execute.execute(b"MatMul", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"MatMul", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def _max(input, axis, keep_dims=False, name=None):
r"""Computes the maximum of elements across dimensions of a tensor.
Reduces `input` along the dimensions given in `axis`. Unless
`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
`axis`. If `keep_dims` is true, the reduced dimensions are
retained with length 1.
Args:
input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
The tensor to reduce.
axis: A `Tensor`. Must be one of the following types: `int32`, `int64`.
The dimensions to reduce. Must be in the range
`[-rank(input), rank(input))`.
keep_dims: An optional `bool`. Defaults to `False`.
If true, retain reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
if keep_dims is None:
keep_dims = False
keep_dims = _execute.make_bool(keep_dims, "keep_dims")
_, _, _op = _op_def_lib._apply_op_helper(
"Max", input=input, reduction_indices=axis, keep_dims=keep_dims,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("keep_dims", _op.get_attr("keep_dims"), "T", _op.get_attr("T"),
"Tidx", _op.get_attr("Tidx"))
_execute.record_gradient(
"Max", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Max", name,
_ctx._post_execution_callbacks, input, axis, "keep_dims", keep_dims)
return _result
except _core._FallbackException:
return _max_eager_fallback(
input, axis, keep_dims=keep_dims, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def _max_eager_fallback(input, axis, keep_dims=False, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function _max
"""
_ctx = ctx if ctx else _context.context()
if keep_dims is None:
keep_dims = False
keep_dims = _execute.make_bool(keep_dims, "keep_dims")
_attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
_attr_Tidx, (axis,) = _execute.args_to_matching_eager([axis], _ctx, _dtypes.int32)
_inputs_flat = [input, axis]
_attrs = ("keep_dims", keep_dims, "T", _attr_T, "Tidx", _attr_Tidx)
_result = _execute.execute(b"Max", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Max", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def maximum_eager_fallback(x, y, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function maximum
"""
_ctx = ctx if ctx else _context.context()
_attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx)
(x, y) = _inputs_T
_inputs_flat = [x, y]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Maximum", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Maximum", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def mean(input, axis, keep_dims=False, name=None):
r"""Computes the mean of elements across dimensions of a tensor.
Reduces `input` along the dimensions given in `axis`. Unless
`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
`axis`. If `keep_dims` is true, the reduced dimensions are
retained with length 1.
Args:
input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
The tensor to reduce.
axis: A `Tensor`. Must be one of the following types: `int32`, `int64`.
The dimensions to reduce. Must be in the range
`[-rank(input), rank(input))`.
keep_dims: An optional `bool`. Defaults to `False`.
If true, retain reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
if keep_dims is None:
keep_dims = False
keep_dims = _execute.make_bool(keep_dims, "keep_dims")
_, _, _op = _op_def_lib._apply_op_helper(
"Mean", input=input, reduction_indices=axis, keep_dims=keep_dims,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("keep_dims", _op.get_attr("keep_dims"), "T", _op.get_attr("T"),
"Tidx", _op.get_attr("Tidx"))
_execute.record_gradient(
"Mean", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Mean", name,
_ctx._post_execution_callbacks, input, axis, "keep_dims", keep_dims)
return _result
except _core._FallbackException:
return mean_eager_fallback(
input, axis, keep_dims=keep_dims, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def mean_eager_fallback(input, axis, keep_dims=False, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function mean
"""
_ctx = ctx if ctx else _context.context()
if keep_dims is None:
keep_dims = False
keep_dims = _execute.make_bool(keep_dims, "keep_dims")
_attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
_attr_Tidx, (axis,) = _execute.args_to_matching_eager([axis], _ctx, _dtypes.int32)
_inputs_flat = [input, axis]
_attrs = ("keep_dims", keep_dims, "T", _attr_T, "Tidx", _attr_Tidx)
_result = _execute.execute(b"Mean", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Mean", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def _min(input, axis, keep_dims=False, name=None):
r"""Computes the minimum of elements across dimensions of a tensor.
Reduces `input` along the dimensions given in `axis`. Unless
`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
`axis`. If `keep_dims` is true, the reduced dimensions are
retained with length 1.
Args:
input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
The tensor to reduce.
axis: A `Tensor`. Must be one of the following types: `int32`, `int64`.
The dimensions to reduce. Must be in the range
`[-rank(input), rank(input))`.
keep_dims: An optional `bool`. Defaults to `False`.
If true, retain reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
if keep_dims is None:
keep_dims = False
keep_dims = _execute.make_bool(keep_dims, "keep_dims")
_, _, _op = _op_def_lib._apply_op_helper(
"Min", input=input, reduction_indices=axis, keep_dims=keep_dims,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("keep_dims", _op.get_attr("keep_dims"), "T", _op.get_attr("T"),
"Tidx", _op.get_attr("Tidx"))
_execute.record_gradient(
"Min", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Min", name,
_ctx._post_execution_callbacks, input, axis, "keep_dims", keep_dims)
return _result
except _core._FallbackException:
return _min_eager_fallback(
input, axis, keep_dims=keep_dims, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def _min_eager_fallback(input, axis, keep_dims=False, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function _min
"""
_ctx = ctx if ctx else _context.context()
if keep_dims is None:
keep_dims = False
keep_dims = _execute.make_bool(keep_dims, "keep_dims")
_attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
_attr_Tidx, (axis,) = _execute.args_to_matching_eager([axis], _ctx, _dtypes.int32)
_inputs_flat = [input, axis]
_attrs = ("keep_dims", keep_dims, "T", _attr_T, "Tidx", _attr_Tidx)
_result = _execute.execute(b"Min", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Min", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def minimum_eager_fallback(x, y, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function minimum
"""
_ctx = ctx if ctx else _context.context()
_attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx)
(x, y) = _inputs_T
_inputs_flat = [x, y]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Minimum", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Minimum", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def mod(x, y, name=None):
r"""Returns element-wise remainder of division. This emulates C semantics in that
the result here is consistent with a truncating divide. E.g.
`tf.truncatediv(x, y) * y + truncate_mod(x, y) = x`.
*NOTE*: `Mod` supports broadcasting. More about broadcasting
[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
Args:
x: A `Tensor`. Must be one of the following types: `int32`, `int64`, `half`, `half`, `bfloat16`, `float32`, `float64`.
y: A `Tensor`. Must have the same type as `x`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"Mod", x=x, y=y, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"Mod", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Mod", name,
_ctx._post_execution_callbacks, x, y)
return _result
except _core._FallbackException:
return mod_eager_fallback(
x, y, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def mod_eager_fallback(x, y, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function mod
"""
_ctx = ctx if ctx else _context.context()
_attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx)
(x, y) = _inputs_T
_inputs_flat = [x, y]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Mod", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Mod", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def mul(x, y, name=None):
r"""Returns x * y element-wise.
*NOTE*: `Multiply` supports broadcasting. More about broadcasting
[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
Args:
x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `uint8`, `int8`, `uint16`, `int16`, `int32`, `int64`, `complex64`, `complex128`.
y: A `Tensor`. Must have the same type as `x`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"Mul", x=x, y=y, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"Mul", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Mul", name,
_ctx._post_execution_callbacks, x, y)
return _result
except _core._FallbackException:
return mul_eager_fallback(
x, y, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def mul_eager_fallback(x, y, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function mul
"""
_ctx = ctx if ctx else _context.context()
_attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx)
(x, y) = _inputs_T
_inputs_flat = [x, y]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Mul", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Mul", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def neg(x, name=None):
r"""Computes numerical negative value element-wise.
I.e., \\(y = -x\\).
Args:
x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"Neg", x=x, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"Neg", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Neg", name,
_ctx._post_execution_callbacks, x)
return _result
except _core._FallbackException:
return neg_eager_fallback(
x, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def neg_eager_fallback(x, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function neg
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
_inputs_flat = [x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Neg", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Neg", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def not_equal_eager_fallback(x, y, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function not_equal
"""
_ctx = ctx if ctx else _context.context()
_attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx)
(x, y) = _inputs_T
_inputs_flat = [x, y]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"NotEqual", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"NotEqual", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def polygamma_eager_fallback(a, x, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function polygamma
"""
_ctx = ctx if ctx else _context.context()
_attr_T, _inputs_T = _execute.args_to_matching_eager([a, x], _ctx)
(a, x) = _inputs_T
_inputs_flat = [a, x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Polygamma", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"Polygamma", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def _pow(x, y, name=None):
r"""Computes the power of one value to another.
Given a tensor `x` and a tensor `y`, this operation computes \\(x^y\\) for
corresponding elements in `x` and `y`. For example:
```
# tensor 'x' is [[2, 2]], [3, 3]]
# tensor 'y' is [[8, 16], [2, 3]]
tf.pow(x, y) ==> [[256, 65536], [9, 27]]
```
Args:
x: A `Tensor`. Must be one of the following types: `bfloat16`, `float32`, `half`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
y: A `Tensor`. Must have the same type as `x`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"Pow", x=x, y=y, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"Pow", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Pow", name,
_ctx._post_execution_callbacks, x, y)
return _result
except _core._FallbackException:
return _pow_eager_fallback(
x, y, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def _pow_eager_fallback(x, y, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function _pow
"""
_ctx = ctx if ctx else _context.context()
_attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx)
(x, y) = _inputs_T
_inputs_flat = [x, y]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Pow", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Pow", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def prod(input, axis, keep_dims=False, name=None):
r"""Computes the product of elements across dimensions of a tensor.
Reduces `input` along the dimensions given in `axis`. Unless
`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
`axis`. If `keep_dims` is true, the reduced dimensions are
retained with length 1.
Args:
input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
The tensor to reduce.
axis: A `Tensor`. Must be one of the following types: `int32`, `int64`.
The dimensions to reduce. Must be in the range
`[-rank(input), rank(input))`.
keep_dims: An optional `bool`. Defaults to `False`.
If true, retain reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
if keep_dims is None:
keep_dims = False
keep_dims = _execute.make_bool(keep_dims, "keep_dims")
_, _, _op = _op_def_lib._apply_op_helper(
"Prod", input=input, reduction_indices=axis, keep_dims=keep_dims,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("keep_dims", _op.get_attr("keep_dims"), "T", _op.get_attr("T"),
"Tidx", _op.get_attr("Tidx"))
_execute.record_gradient(
"Prod", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Prod", name,
_ctx._post_execution_callbacks, input, axis, "keep_dims", keep_dims)
return _result
except _core._FallbackException:
return prod_eager_fallback(
input, axis, keep_dims=keep_dims, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def prod_eager_fallback(input, axis, keep_dims=False, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function prod
"""
_ctx = ctx if ctx else _context.context()
if keep_dims is None:
keep_dims = False
keep_dims = _execute.make_bool(keep_dims, "keep_dims")
_attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
_attr_Tidx, (axis,) = _execute.args_to_matching_eager([axis], _ctx, _dtypes.int32)
_inputs_flat = [input, axis]
_attrs = ("keep_dims", keep_dims, "T", _attr_T, "Tidx", _attr_Tidx)
_result = _execute.execute(b"Prod", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Prod", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
_quantize_down_and_shrink_range_outputs = ["output", "output_min",
"output_max"]
_QuantizeDownAndShrinkRangeOutput = _collections.namedtuple(
"QuantizeDownAndShrinkRange", _quantize_down_and_shrink_range_outputs)
def quantize_down_and_shrink_range(input, input_min, input_max, out_type, name=None):
r"""Convert the quantized 'input' tensor into a lower-precision 'output', using the
actual distribution of the values to maximize the usage of the lower bit depth
and adjusting the output min and max ranges accordingly.
[input_min, input_max] are scalar floats that specify the range for the float
interpretation of the 'input' data. For example, if input_min is -1.0f and
input_max is 1.0f, and we are dealing with quint16 quantized data, then a 0
value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f.
This operator tries to squeeze as much precision as possible into an output with
a lower bit depth by calculating the actual min and max values found in the
data. For example, maybe that quint16 input has no values lower than 16,384 and
none higher than 49,152. That means only half the range is actually needed, all
the float interpretations are between -0.5f and 0.5f, so if we want to compress
the data into a quint8 output, we can use that range rather than the theoretical
-1.0f to 1.0f that is suggested by the input min and max.
In practice, this is most useful for taking output from operations like
QuantizedMatMul that can produce higher bit-depth outputs than their inputs and
may have large potential output ranges, but in practice have a distribution of
input values that only uses a small fraction of the possible range. By feeding
that output into this operator, we can reduce it from 32 bits down to 8 with
minimal loss of accuracy.
Args:
input: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
input_min: A `Tensor` of type `float32`.
The float value that the minimum quantized input value represents.
input_max: A `Tensor` of type `float32`.
The float value that the maximum quantized input value represents.
out_type: A `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`.
The type of the output. Should be a lower bit depth than Tinput.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (output, output_min, output_max).
output: A `Tensor` of type `out_type`.
output_min: A `Tensor` of type `float32`.
output_max: A `Tensor` of type `float32`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
out_type = _execute.make_type(out_type, "out_type")
_, _, _op = _op_def_lib._apply_op_helper(
"QuantizeDownAndShrinkRange", input=input, input_min=input_min,
input_max=input_max, out_type=out_type, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("Tinput", _op.get_attr("Tinput"), "out_type",
_op.get_attr("out_type"))
_execute.record_gradient(
"QuantizeDownAndShrinkRange", _inputs_flat, _attrs, _result, name)
_result = _QuantizeDownAndShrinkRangeOutput._make(_result)
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"QuantizeDownAndShrinkRange", name, _ctx._post_execution_callbacks,
input, input_min, input_max, "out_type", out_type)
_result = _QuantizeDownAndShrinkRangeOutput._make(_result)
return _result
except _core._FallbackException:
return quantize_down_and_shrink_range_eager_fallback(
input, input_min, input_max, out_type=out_type, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def quantize_down_and_shrink_range_eager_fallback(input, input_min, input_max, out_type, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function quantize_down_and_shrink_range
"""
_ctx = ctx if ctx else _context.context()
out_type = _execute.make_type(out_type, "out_type")
_attr_Tinput, (input,) = _execute.args_to_matching_eager([input], _ctx)
input_min = _ops.convert_to_tensor(input_min, _dtypes.float32)
input_max = _ops.convert_to_tensor(input_max, _dtypes.float32)
_inputs_flat = [input, input_min, input_max]
_attrs = ("Tinput", _attr_Tinput, "out_type", out_type)
_result = _execute.execute(b"QuantizeDownAndShrinkRange", 3,
inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_execute.record_gradient(
"QuantizeDownAndShrinkRange", _inputs_flat, _attrs, _result, name)
_result = _QuantizeDownAndShrinkRangeOutput._make(_result)
return _result
_quantized_add_outputs = ["z", "min_z", "max_z"]
_QuantizedAddOutput = _collections.namedtuple(
"QuantizedAdd", _quantized_add_outputs)
def quantized_add(x, y, min_x, max_x, min_y, max_y, Toutput=_dtypes.qint32, name=None):
r"""Returns x + y element-wise, working on quantized buffers.
Args:
x: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
y: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
min_x: A `Tensor` of type `float32`.
The float value that the lowest quantized `x` value represents.
max_x: A `Tensor` of type `float32`.
The float value that the highest quantized `x` value represents.
min_y: A `Tensor` of type `float32`.
The float value that the lowest quantized `y` value represents.
max_y: A `Tensor` of type `float32`.
The float value that the highest quantized `y` value represents.
Toutput: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.qint32`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (z, min_z, max_z).
z: A `Tensor` of type `Toutput`.
min_z: A `Tensor` of type `float32`.
max_z: A `Tensor` of type `float32`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
if Toutput is None:
Toutput = _dtypes.qint32
Toutput = _execute.make_type(Toutput, "Toutput")
_, _, _op = _op_def_lib._apply_op_helper(
"QuantizedAdd", x=x, y=y, min_x=min_x, max_x=max_x, min_y=min_y,
max_y=max_y, Toutput=Toutput, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T1", _op.get_attr("T1"), "T2", _op.get_attr("T2"), "Toutput",
_op.get_attr("Toutput"))
_execute.record_gradient(
"QuantizedAdd", _inputs_flat, _attrs, _result, name)
_result = _QuantizedAddOutput._make(_result)
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "QuantizedAdd",
name, _ctx._post_execution_callbacks, x, y, min_x, max_x, min_y,
max_y, "Toutput", Toutput)
_result = _QuantizedAddOutput._make(_result)
return _result
except _core._FallbackException:
return quantized_add_eager_fallback(
x, y, min_x, max_x, min_y, max_y, Toutput=Toutput, name=name,
ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def quantized_add_eager_fallback(x, y, min_x, max_x, min_y, max_y, Toutput=_dtypes.qint32, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function quantized_add
"""
_ctx = ctx if ctx else _context.context()
if Toutput is None:
Toutput = _dtypes.qint32
Toutput = _execute.make_type(Toutput, "Toutput")
_attr_T1, (x,) = _execute.args_to_matching_eager([x], _ctx)
_attr_T2, (y,) = _execute.args_to_matching_eager([y], _ctx)
min_x = _ops.convert_to_tensor(min_x, _dtypes.float32)
max_x = _ops.convert_to_tensor(max_x, _dtypes.float32)
min_y = _ops.convert_to_tensor(min_y, _dtypes.float32)
max_y = _ops.convert_to_tensor(max_y, _dtypes.float32)
_inputs_flat = [x, y, min_x, max_x, min_y, max_y]
_attrs = ("T1", _attr_T1, "T2", _attr_T2, "Toutput", Toutput)
_result = _execute.execute(b"QuantizedAdd", 3, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"QuantizedAdd", _inputs_flat, _attrs, _result, name)
_result = _QuantizedAddOutput._make(_result)
return _result
_quantized_mat_mul_outputs = ["out", "min_out", "max_out"]
_QuantizedMatMulOutput = _collections.namedtuple(
"QuantizedMatMul", _quantized_mat_mul_outputs)
def quantized_mat_mul(a, b, min_a, max_a, min_b, max_b, Toutput=_dtypes.qint32, transpose_a=False, transpose_b=False, Tactivation=_dtypes.quint8, name=None):
r"""Perform a quantized matrix multiplication of `a` by the matrix `b`.
The inputs must be two-dimensional matrices and the inner dimension of
`a` (after being transposed if `transpose_a` is non-zero) must match the
outer dimension of `b` (after being transposed if `transposed_b` is
non-zero).
Args:
a: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
Must be a two-dimensional tensor.
b: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
Must be a two-dimensional tensor.
min_a: A `Tensor` of type `float32`.
The float value that the lowest quantized `a` value represents.
max_a: A `Tensor` of type `float32`.
The float value that the highest quantized `a` value represents.
min_b: A `Tensor` of type `float32`.
The float value that the lowest quantized `b` value represents.
max_b: A `Tensor` of type `float32`.
The float value that the highest quantized `b` value represents.
Toutput: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.qint32`.
transpose_a: An optional `bool`. Defaults to `False`.
If true, `a` is transposed before multiplication.
transpose_b: An optional `bool`. Defaults to `False`.
If true, `b` is transposed before multiplication.
Tactivation: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.quint8`.
The type of output produced by activation function
following this operation.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (out, min_out, max_out).
out: A `Tensor` of type `Toutput`.
min_out: A `Tensor` of type `float32`.
max_out: A `Tensor` of type `float32`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
if Toutput is None:
Toutput = _dtypes.qint32
Toutput = _execute.make_type(Toutput, "Toutput")
if transpose_a is None:
transpose_a = False
transpose_a = _execute.make_bool(transpose_a, "transpose_a")
if transpose_b is None:
transpose_b = False
transpose_b = _execute.make_bool(transpose_b, "transpose_b")
if Tactivation is None:
Tactivation = _dtypes.quint8
Tactivation = _execute.make_type(Tactivation, "Tactivation")
_, _, _op = _op_def_lib._apply_op_helper(
"QuantizedMatMul", a=a, b=b, min_a=min_a, max_a=max_a, min_b=min_b,
max_b=max_b, Toutput=Toutput, transpose_a=transpose_a,
transpose_b=transpose_b, Tactivation=Tactivation, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T1", _op.get_attr("T1"), "T2", _op.get_attr("T2"), "Toutput",
_op.get_attr("Toutput"), "transpose_a",
_op.get_attr("transpose_a"), "transpose_b",
_op.get_attr("transpose_b"), "Tactivation",
_op.get_attr("Tactivation"))
_execute.record_gradient(
"QuantizedMatMul", _inputs_flat, _attrs, _result, name)
_result = _QuantizedMatMulOutput._make(_result)
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"QuantizedMatMul", name, _ctx._post_execution_callbacks, a, b, min_a,
max_a, min_b, max_b, "Toutput", Toutput, "transpose_a", transpose_a,
"transpose_b", transpose_b, "Tactivation", Tactivation)
_result = _QuantizedMatMulOutput._make(_result)
return _result
except _core._FallbackException:
return quantized_mat_mul_eager_fallback(
a, b, min_a, max_a, min_b, max_b, Toutput=Toutput,
transpose_a=transpose_a, transpose_b=transpose_b,
Tactivation=Tactivation, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def quantized_mat_mul_eager_fallback(a, b, min_a, max_a, min_b, max_b, Toutput=_dtypes.qint32, transpose_a=False, transpose_b=False, Tactivation=_dtypes.quint8, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function quantized_mat_mul
"""
_ctx = ctx if ctx else _context.context()
if Toutput is None:
Toutput = _dtypes.qint32
Toutput = _execute.make_type(Toutput, "Toutput")
if transpose_a is None:
transpose_a = False
transpose_a = _execute.make_bool(transpose_a, "transpose_a")
if transpose_b is None:
transpose_b = False
transpose_b = _execute.make_bool(transpose_b, "transpose_b")
if Tactivation is None:
Tactivation = _dtypes.quint8
Tactivation = _execute.make_type(Tactivation, "Tactivation")
_attr_T1, (a,) = _execute.args_to_matching_eager([a], _ctx)
_attr_T2, (b,) = _execute.args_to_matching_eager([b], _ctx)
min_a = _ops.convert_to_tensor(min_a, _dtypes.float32)
max_a = _ops.convert_to_tensor(max_a, _dtypes.float32)
min_b = _ops.convert_to_tensor(min_b, _dtypes.float32)
max_b = _ops.convert_to_tensor(max_b, _dtypes.float32)
_inputs_flat = [a, b, min_a, max_a, min_b, max_b]
_attrs = ("T1", _attr_T1, "T2", _attr_T2, "Toutput", Toutput, "transpose_a",
transpose_a, "transpose_b", transpose_b, "Tactivation", Tactivation)
_result = _execute.execute(b"QuantizedMatMul", 3, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"QuantizedMatMul", _inputs_flat, _attrs, _result, name)
_result = _QuantizedMatMulOutput._make(_result)
return _result
_quantized_mul_outputs = ["z", "min_z", "max_z"]
_QuantizedMulOutput = _collections.namedtuple(
"QuantizedMul", _quantized_mul_outputs)
def quantized_mul(x, y, min_x, max_x, min_y, max_y, Toutput=_dtypes.qint32, name=None):
r"""Returns x * y element-wise, working on quantized buffers.
Args:
x: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
y: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
min_x: A `Tensor` of type `float32`.
The float value that the lowest quantized `x` value represents.
max_x: A `Tensor` of type `float32`.
The float value that the highest quantized `x` value represents.
min_y: A `Tensor` of type `float32`.
The float value that the lowest quantized `y` value represents.
max_y: A `Tensor` of type `float32`.
The float value that the highest quantized `y` value represents.
Toutput: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.qint32`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (z, min_z, max_z).
z: A `Tensor` of type `Toutput`.
min_z: A `Tensor` of type `float32`.
max_z: A `Tensor` of type `float32`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
if Toutput is None:
Toutput = _dtypes.qint32
Toutput = _execute.make_type(Toutput, "Toutput")
_, _, _op = _op_def_lib._apply_op_helper(
"QuantizedMul", x=x, y=y, min_x=min_x, max_x=max_x, min_y=min_y,
max_y=max_y, Toutput=Toutput, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T1", _op.get_attr("T1"), "T2", _op.get_attr("T2"), "Toutput",
_op.get_attr("Toutput"))
_execute.record_gradient(
"QuantizedMul", _inputs_flat, _attrs, _result, name)
_result = _QuantizedMulOutput._make(_result)
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "QuantizedMul",
name, _ctx._post_execution_callbacks, x, y, min_x, max_x, min_y,
max_y, "Toutput", Toutput)
_result = _QuantizedMulOutput._make(_result)
return _result
except _core._FallbackException:
return quantized_mul_eager_fallback(
x, y, min_x, max_x, min_y, max_y, Toutput=Toutput, name=name,
ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def quantized_mul_eager_fallback(x, y, min_x, max_x, min_y, max_y, Toutput=_dtypes.qint32, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function quantized_mul
"""
_ctx = ctx if ctx else _context.context()
if Toutput is None:
Toutput = _dtypes.qint32
Toutput = _execute.make_type(Toutput, "Toutput")
_attr_T1, (x,) = _execute.args_to_matching_eager([x], _ctx)
_attr_T2, (y,) = _execute.args_to_matching_eager([y], _ctx)
min_x = _ops.convert_to_tensor(min_x, _dtypes.float32)
max_x = _ops.convert_to_tensor(max_x, _dtypes.float32)
min_y = _ops.convert_to_tensor(min_y, _dtypes.float32)
max_y = _ops.convert_to_tensor(max_y, _dtypes.float32)
_inputs_flat = [x, y, min_x, max_x, min_y, max_y]
_attrs = ("T1", _attr_T1, "T2", _attr_T2, "Toutput", Toutput)
_result = _execute.execute(b"QuantizedMul", 3, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"QuantizedMul", _inputs_flat, _attrs, _result, name)
_result = _QuantizedMulOutput._make(_result)
return _result
def _range(start, limit, delta, name=None):
r"""Creates a sequence of numbers.
This operation creates a sequence of numbers that begins at `start` and
extends by increments of `delta` up to but not including `limit`.
For example:
```
# 'start' is 3
# 'limit' is 18
# 'delta' is 3
tf.range(start, limit, delta) ==> [3, 6, 9, 12, 15]
```
Args:
start: A `Tensor`. Must be one of the following types: `bfloat16`, `float32`, `float64`, `int32`, `int64`.
0-D (scalar). First entry in the sequence.
limit: A `Tensor`. Must have the same type as `start`.
0-D (scalar). Upper limit of sequence, exclusive.
delta: A `Tensor`. Must have the same type as `start`.
0-D (scalar). Optional. Default is 1. Number that increments `start`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `start`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"Range", start=start, limit=limit, delta=delta, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("Tidx", _op.get_attr("Tidx"))
_execute.record_gradient(
"Range", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Range", name,
_ctx._post_execution_callbacks, start, limit, delta)
return _result
except _core._FallbackException:
return _range_eager_fallback(
start, limit, delta, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def _range_eager_fallback(start, limit, delta, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function _range
"""
_ctx = ctx if ctx else _context.context()
_attr_Tidx, _inputs_Tidx = _execute.args_to_matching_eager([start, limit, delta], _ctx, _dtypes.int32)
(start, limit, delta) = _inputs_Tidx
_inputs_flat = [start, limit, delta]
_attrs = ("Tidx", _attr_Tidx)
_result = _execute.execute(b"Range", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Range", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def real(input, Tout=_dtypes.float32, name=None):
r"""Returns the real part of a complex number.
Given a tensor `input` of complex numbers, this operation returns a tensor of
type `float` that is the real part of each element in `input`. All elements in
`input` must be complex numbers of the form \\(a + bj\\), where *a* is the real
part returned by this operation and *b* is the imaginary part.
For example:
```
# tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
tf.real(input) ==> [-2.25, 3.25]
```
Args:
input: A `Tensor`. Must be one of the following types: `complex64`, `complex128`.
Tout: An optional `tf.DType` from: `tf.float32, tf.float64`. Defaults to `tf.float32`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `Tout`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
if Tout is None:
Tout = _dtypes.float32
Tout = _execute.make_type(Tout, "Tout")
_, _, _op = _op_def_lib._apply_op_helper(
"Real", input=input, Tout=Tout, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "Tout", _op.get_attr("Tout"))
_execute.record_gradient(
"Real", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Real", name,
_ctx._post_execution_callbacks, input, "Tout", Tout)
return _result
except _core._FallbackException:
return real_eager_fallback(
input, Tout=Tout, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def real_eager_fallback(input, Tout=_dtypes.float32, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function real
"""
_ctx = ctx if ctx else _context.context()
if Tout is None:
Tout = _dtypes.float32
Tout = _execute.make_type(Tout, "Tout")
_attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx, _dtypes.complex64)
_inputs_flat = [input]
_attrs = ("T", _attr_T, "Tout", Tout)
_result = _execute.execute(b"Real", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Real", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def real_div(x, y, name=None):
r"""Returns x / y element-wise for real types.
If `x` and `y` are reals, this will return the floating-point division.
*NOTE*: `Div` supports broadcasting. More about broadcasting
[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
Args:
x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `uint8`, `int8`, `uint16`, `int16`, `int32`, `int64`, `complex64`, `complex128`.
y: A `Tensor`. Must have the same type as `x`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"RealDiv", x=x, y=y, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"RealDiv", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "RealDiv",
name, _ctx._post_execution_callbacks, x, y)
return _result
except _core._FallbackException:
return real_div_eager_fallback(
x, y, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def real_div_eager_fallback(x, y, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function real_div
"""
_ctx = ctx if ctx else _context.context()
_attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx)
(x, y) = _inputs_T
_inputs_flat = [x, y]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"RealDiv", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"RealDiv", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def reciprocal_eager_fallback(x, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function reciprocal
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
_inputs_flat = [x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Reciprocal", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"Reciprocal", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def reciprocal_grad(y, dy, name=None):
r"""Computes the gradient for the inverse of `x` wrt its input.
Specifically, `grad = -dy * y*y`, where `y = 1/x`, and `dy`
is the corresponding input gradient.
Args:
y: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`.
dy: A `Tensor`. Must have the same type as `y`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `y`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"ReciprocalGrad", y=y, dy=dy, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"ReciprocalGrad", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"ReciprocalGrad", name, _ctx._post_execution_callbacks, y, dy)
return _result
except _core._FallbackException:
return reciprocal_grad_eager_fallback(
y, dy, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def reciprocal_grad_eager_fallback(y, dy, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function reciprocal_grad
"""
_ctx = ctx if ctx else _context.context()
_attr_T, _inputs_T = _execute.args_to_matching_eager([y, dy], _ctx)
(y, dy) = _inputs_T
_inputs_flat = [y, dy]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"ReciprocalGrad", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"ReciprocalGrad", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
_requantization_range_outputs = ["output_min", "output_max"]
_RequantizationRangeOutput = _collections.namedtuple(
"RequantizationRange", _requantization_range_outputs)
def requantization_range(input, input_min, input_max, name=None):
r"""Given a quantized tensor described by (input, input_min, input_max), outputs a
range that covers the actual values present in that tensor. This op is
typically used to produce the requested_output_min and requested_output_max for
Requantize.
Args:
input: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
input_min: A `Tensor` of type `float32`.
The float value that the minimum quantized input value represents.
input_max: A `Tensor` of type `float32`.
The float value that the maximum quantized input value represents.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (output_min, output_max).
output_min: A `Tensor` of type `float32`.
output_max: A `Tensor` of type `float32`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"RequantizationRange", input=input, input_min=input_min,
input_max=input_max, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("Tinput", _op.get_attr("Tinput"))
_execute.record_gradient(
"RequantizationRange", _inputs_flat, _attrs, _result, name)
_result = _RequantizationRangeOutput._make(_result)
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"RequantizationRange", name, _ctx._post_execution_callbacks, input,
input_min, input_max)
_result = _RequantizationRangeOutput._make(_result)
return _result
except _core._FallbackException:
return requantization_range_eager_fallback(
input, input_min, input_max, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def requantization_range_eager_fallback(input, input_min, input_max, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function requantization_range
"""
_ctx = ctx if ctx else _context.context()
_attr_Tinput, (input,) = _execute.args_to_matching_eager([input], _ctx)
input_min = _ops.convert_to_tensor(input_min, _dtypes.float32)
input_max = _ops.convert_to_tensor(input_max, _dtypes.float32)
_inputs_flat = [input, input_min, input_max]
_attrs = ("Tinput", _attr_Tinput)
_result = _execute.execute(b"RequantizationRange", 2, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"RequantizationRange", _inputs_flat, _attrs, _result, name)
_result = _RequantizationRangeOutput._make(_result)
return _result
_requantize_outputs = ["output", "output_min", "output_max"]
_RequantizeOutput = _collections.namedtuple(
"Requantize", _requantize_outputs)
def requantize(input, input_min, input_max, requested_output_min, requested_output_max, out_type, name=None):
r"""Convert the quantized 'input' tensor into a lower-precision 'output', using the
output range specified with 'requested_output_min' and 'requested_output_max'.
[input_min, input_max] are scalar floats that specify the range for the float
interpretation of the 'input' data. For example, if input_min is -1.0f and
input_max is 1.0f, and we are dealing with quint16 quantized data, then a 0
value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f.
Args:
input: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
input_min: A `Tensor` of type `float32`.
The float value that the minimum quantized input value represents.
input_max: A `Tensor` of type `float32`.
The float value that the maximum quantized input value represents.
requested_output_min: A `Tensor` of type `float32`.
The float value that the minimum quantized output value represents.
requested_output_max: A `Tensor` of type `float32`.
The float value that the maximum quantized output value represents.
out_type: A `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`.
The type of the output. Should be a lower bit depth than Tinput.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (output, output_min, output_max).
output: A `Tensor` of type `out_type`.
output_min: A `Tensor` of type `float32`.
output_max: A `Tensor` of type `float32`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
out_type = _execute.make_type(out_type, "out_type")
_, _, _op = _op_def_lib._apply_op_helper(
"Requantize", input=input, input_min=input_min, input_max=input_max,
requested_output_min=requested_output_min,
requested_output_max=requested_output_max, out_type=out_type,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("Tinput", _op.get_attr("Tinput"), "out_type",
_op.get_attr("out_type"))
_execute.record_gradient(
"Requantize", _inputs_flat, _attrs, _result, name)
_result = _RequantizeOutput._make(_result)
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Requantize",
name, _ctx._post_execution_callbacks, input, input_min, input_max,
requested_output_min, requested_output_max, "out_type", out_type)
_result = _RequantizeOutput._make(_result)
return _result
except _core._FallbackException:
return requantize_eager_fallback(
input, input_min, input_max, requested_output_min,
requested_output_max, out_type=out_type, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def requantize_eager_fallback(input, input_min, input_max, requested_output_min, requested_output_max, out_type, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function requantize
"""
_ctx = ctx if ctx else _context.context()
out_type = _execute.make_type(out_type, "out_type")
_attr_Tinput, (input,) = _execute.args_to_matching_eager([input], _ctx)
input_min = _ops.convert_to_tensor(input_min, _dtypes.float32)
input_max = _ops.convert_to_tensor(input_max, _dtypes.float32)
requested_output_min = _ops.convert_to_tensor(requested_output_min, _dtypes.float32)
requested_output_max = _ops.convert_to_tensor(requested_output_max, _dtypes.float32)
_inputs_flat = [input, input_min, input_max, requested_output_min, requested_output_max]
_attrs = ("Tinput", _attr_Tinput, "out_type", out_type)
_result = _execute.execute(b"Requantize", 3, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"Requantize", _inputs_flat, _attrs, _result, name)
_result = _RequantizeOutput._make(_result)
return _result
def rint_eager_fallback(x, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function rint
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
_inputs_flat = [x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Rint", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Rint", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def round(x, name=None):
r"""Rounds the values of a tensor to the nearest integer, element-wise.
Rounds half to even. Also known as bankers rounding. If you want to round
according to the current system rounding mode use std::cint.
Args:
x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"Round", x=x, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"Round", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Round", name,
_ctx._post_execution_callbacks, x)
return _result
except _core._FallbackException:
return round_eager_fallback(
x, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def round_eager_fallback(x, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function round
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
_inputs_flat = [x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Round", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Round", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def rsqrt_eager_fallback(x, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function rsqrt
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
_inputs_flat = [x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Rsqrt", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Rsqrt", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def rsqrt_grad(y, dy, name=None):
r"""Computes the gradient for the rsqrt of `x` wrt its input.
Specifically, `grad = dy * -0.5 * y^3`, where `y = rsqrt(x)`, and `dy`
is the corresponding input gradient.
Args:
y: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`.
dy: A `Tensor`. Must have the same type as `y`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `y`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"RsqrtGrad", y=y, dy=dy, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"RsqrtGrad", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "RsqrtGrad",
name, _ctx._post_execution_callbacks, y, dy)
return _result
except _core._FallbackException:
return rsqrt_grad_eager_fallback(
y, dy, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def rsqrt_grad_eager_fallback(y, dy, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function rsqrt_grad
"""
_ctx = ctx if ctx else _context.context()
_attr_T, _inputs_T = _execute.args_to_matching_eager([y, dy], _ctx)
(y, dy) = _inputs_T
_inputs_flat = [y, dy]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"RsqrtGrad", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"RsqrtGrad", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def segment_max_eager_fallback(data, segment_ids, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function segment_max
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (data,) = _execute.args_to_matching_eager([data], _ctx)
_attr_Tindices, (segment_ids,) = _execute.args_to_matching_eager([segment_ids], _ctx)
_inputs_flat = [data, segment_ids]
_attrs = ("T", _attr_T, "Tindices", _attr_Tindices)
_result = _execute.execute(b"SegmentMax", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"SegmentMax", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def segment_mean_eager_fallback(data, segment_ids, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function segment_mean
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (data,) = _execute.args_to_matching_eager([data], _ctx)
_attr_Tindices, (segment_ids,) = _execute.args_to_matching_eager([segment_ids], _ctx)
_inputs_flat = [data, segment_ids]
_attrs = ("T", _attr_T, "Tindices", _attr_Tindices)
_result = _execute.execute(b"SegmentMean", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"SegmentMean", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def segment_min_eager_fallback(data, segment_ids, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function segment_min
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (data,) = _execute.args_to_matching_eager([data], _ctx)
_attr_Tindices, (segment_ids,) = _execute.args_to_matching_eager([segment_ids], _ctx)
_inputs_flat = [data, segment_ids]
_attrs = ("T", _attr_T, "Tindices", _attr_Tindices)
_result = _execute.execute(b"SegmentMin", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"SegmentMin", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def segment_prod_eager_fallback(data, segment_ids, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function segment_prod
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (data,) = _execute.args_to_matching_eager([data], _ctx)
_attr_Tindices, (segment_ids,) = _execute.args_to_matching_eager([segment_ids], _ctx)
_inputs_flat = [data, segment_ids]
_attrs = ("T", _attr_T, "Tindices", _attr_Tindices)
_result = _execute.execute(b"SegmentProd", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"SegmentProd", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def segment_sum_eager_fallback(data, segment_ids, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function segment_sum
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (data,) = _execute.args_to_matching_eager([data], _ctx)
_attr_Tindices, (segment_ids,) = _execute.args_to_matching_eager([segment_ids], _ctx)
_inputs_flat = [data, segment_ids]
_attrs = ("T", _attr_T, "Tindices", _attr_Tindices)
_result = _execute.execute(b"SegmentSum", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"SegmentSum", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def select(condition, x, y, name=None):
r"""Selects elements from `x` or `y`, depending on `condition`.
The `x`, and `y` tensors must all have the same shape, and the
output will also have that shape.
The `condition` tensor must be a scalar if `x` and `y` are scalars.
If `x` and `y` are vectors or higher rank, then `condition` must be either a
scalar, a vector with size matching the first dimension of `x`, or must have
the same shape as `x`.
The `condition` tensor acts as a mask that chooses, based on the value at each
element, whether the corresponding element / row in the output should be
taken from `x` (if true) or `y` (if false).
If `condition` is a vector and `x` and `y` are higher rank matrices, then
it chooses which row (outer dimension) to copy from `x` and `y`.
If `condition` has the same shape as `x` and `y`, then it chooses which
element to copy from `x` and `y`.
For example:
```python
# 'condition' tensor is [[True, False]
# [False, True]]
# 't' is [[1, 2],
# [3, 4]]
# 'e' is [[5, 6],
# [7, 8]]
select(condition, t, e) # => [[1, 6], [7, 4]]
# 'condition' tensor is [True, False]
# 't' is [[1, 2],
# [3, 4]]
# 'e' is [[5, 6],
# [7, 8]]
select(condition, t, e) ==> [[1, 2],
[7, 8]]
```
Args:
condition: A `Tensor` of type `bool`.
x: A `Tensor` which may have the same shape as `condition`.
If `condition` is rank 1, `x` may have higher rank,
but its first dimension must match the size of `condition`.
y: A `Tensor` with the same type and shape as `x`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `t`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"Select", condition=condition, t=x, e=y, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"Select", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Select", name,
_ctx._post_execution_callbacks, condition, x, y)
return _result
except _core._FallbackException:
return select_eager_fallback(
condition, x, y, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def select_eager_fallback(condition, x, y, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function select
"""
_ctx = ctx if ctx else _context.context()
_attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx)
(x, y) = _inputs_T
condition = _ops.convert_to_tensor(condition, _dtypes.bool)
_inputs_flat = [condition, x, y]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Select", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Select", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def sigmoid(x, name=None):
r"""Computes sigmoid of `x` element-wise.
Specifically, `y = 1 / (1 + exp(-x))`.
Args:
x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"Sigmoid", x=x, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"Sigmoid", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Sigmoid",
name, _ctx._post_execution_callbacks, x)
return _result
except _core._FallbackException:
return sigmoid_eager_fallback(
x, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def sigmoid_eager_fallback(x, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function sigmoid
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
_inputs_flat = [x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Sigmoid", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Sigmoid", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def sigmoid_grad(y, dy, name=None):
r"""Computes the gradient of the sigmoid of `x` wrt its input.
Specifically, `grad = dy * y * (1 - y)`, where `y = sigmoid(x)`, and
`dy` is the corresponding input gradient.
Args:
y: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`.
dy: A `Tensor`. Must have the same type as `y`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `y`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"SigmoidGrad", y=y, dy=dy, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"SigmoidGrad", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "SigmoidGrad",
name, _ctx._post_execution_callbacks, y, dy)
return _result
except _core._FallbackException:
return sigmoid_grad_eager_fallback(
y, dy, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def sigmoid_grad_eager_fallback(y, dy, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function sigmoid_grad
"""
_ctx = ctx if ctx else _context.context()
_attr_T, _inputs_T = _execute.args_to_matching_eager([y, dy], _ctx)
(y, dy) = _inputs_T
_inputs_flat = [y, dy]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"SigmoidGrad", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"SigmoidGrad", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def sign(x, name=None):
r"""Returns an element-wise indication of the sign of a number.
`y = sign(x) = -1` if `x < 0`; 0 if `x == 0`; 1 if `x > 0`.
For complex numbers, `y = sign(x) = x / |x|` if `x != 0`, otherwise `y = 0`.
Args:
x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"Sign", x=x, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"Sign", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Sign", name,
_ctx._post_execution_callbacks, x)
return _result
except _core._FallbackException:
return sign_eager_fallback(
x, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def sign_eager_fallback(x, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function sign
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
_inputs_flat = [x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Sign", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Sign", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def sin_eager_fallback(x, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function sin
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
_inputs_flat = [x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Sin", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Sin", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def sinh_eager_fallback(x, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function sinh
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
_inputs_flat = [x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Sinh", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Sinh", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def sparse_mat_mul(a, b, transpose_a=False, transpose_b=False, a_is_sparse=False, b_is_sparse=False, name=None):
r"""Multiply matrix "a" by matrix "b".
The inputs must be two-dimensional matrices and the inner dimension of "a" must
match the outer dimension of "b". Both "a" and "b" must be `Tensor`s not
`SparseTensor`s. This op is optimized for the case where at least one of "a" or
"b" is sparse, in the sense that they have a large proportion of zero values.
The breakeven for using this versus a dense matrix multiply on one platform was
30% zero values in the sparse matrix.
The gradient computation of this operation will only take advantage of sparsity
in the input gradient when that gradient comes from a Relu.
Args:
a: A `Tensor`. Must be one of the following types: `float32`, `bfloat16`.
b: A `Tensor`. Must be one of the following types: `float32`, `bfloat16`.
transpose_a: An optional `bool`. Defaults to `False`.
transpose_b: An optional `bool`. Defaults to `False`.
a_is_sparse: An optional `bool`. Defaults to `False`.
b_is_sparse: An optional `bool`. Defaults to `False`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
if transpose_a is None:
transpose_a = False
transpose_a = _execute.make_bool(transpose_a, "transpose_a")
if transpose_b is None:
transpose_b = False
transpose_b = _execute.make_bool(transpose_b, "transpose_b")
if a_is_sparse is None:
a_is_sparse = False
a_is_sparse = _execute.make_bool(a_is_sparse, "a_is_sparse")
if b_is_sparse is None:
b_is_sparse = False
b_is_sparse = _execute.make_bool(b_is_sparse, "b_is_sparse")
_, _, _op = _op_def_lib._apply_op_helper(
"SparseMatMul", a=a, b=b, transpose_a=transpose_a,
transpose_b=transpose_b, a_is_sparse=a_is_sparse,
b_is_sparse=b_is_sparse, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("transpose_a", _op.get_attr("transpose_a"), "transpose_b",
_op.get_attr("transpose_b"), "a_is_sparse",
_op.get_attr("a_is_sparse"), "b_is_sparse",
_op.get_attr("b_is_sparse"), "Ta", _op.get_attr("Ta"), "Tb",
_op.get_attr("Tb"))
_execute.record_gradient(
"SparseMatMul", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "SparseMatMul",
name, _ctx._post_execution_callbacks, a, b, "transpose_a",
transpose_a, "transpose_b", transpose_b, "a_is_sparse", a_is_sparse,
"b_is_sparse", b_is_sparse)
return _result
except _core._FallbackException:
return sparse_mat_mul_eager_fallback(
a, b, transpose_a=transpose_a, transpose_b=transpose_b,
a_is_sparse=a_is_sparse, b_is_sparse=b_is_sparse, name=name,
ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def sparse_mat_mul_eager_fallback(a, b, transpose_a=False, transpose_b=False, a_is_sparse=False, b_is_sparse=False, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function sparse_mat_mul
"""
_ctx = ctx if ctx else _context.context()
if transpose_a is None:
transpose_a = False
transpose_a = _execute.make_bool(transpose_a, "transpose_a")
if transpose_b is None:
transpose_b = False
transpose_b = _execute.make_bool(transpose_b, "transpose_b")
if a_is_sparse is None:
a_is_sparse = False
a_is_sparse = _execute.make_bool(a_is_sparse, "a_is_sparse")
if b_is_sparse is None:
b_is_sparse = False
b_is_sparse = _execute.make_bool(b_is_sparse, "b_is_sparse")
_attr_Ta, (a,) = _execute.args_to_matching_eager([a], _ctx, _dtypes.float32)
_attr_Tb, (b,) = _execute.args_to_matching_eager([b], _ctx, _dtypes.float32)
_inputs_flat = [a, b]
_attrs = ("transpose_a", transpose_a, "transpose_b", transpose_b,
"a_is_sparse", a_is_sparse, "b_is_sparse", b_is_sparse, "Ta", _attr_Ta,
"Tb", _attr_Tb)
_result = _execute.execute(b"SparseMatMul", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"SparseMatMul", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def sparse_segment_mean(data, indices, segment_ids, name=None):
r"""Computes the mean along sparse segments of a tensor.
Read @{$math_ops#Segmentation$the section on segmentation} for an explanation of
segments.
Like `SegmentMean`, but `segment_ids` can have rank less than `data`'s first
dimension, selecting a subset of dimension 0, specified by `indices`.
Args:
data: A `Tensor`. Must be one of the following types: `float32`, `float64`.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A 1-D tensor. Has same rank as `segment_ids`.
segment_ids: A `Tensor` of type `int32`.
A 1-D tensor. Values should be sorted and can be repeated.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `data`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"SparseSegmentMean", data=data, indices=indices,
segment_ids=segment_ids, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "Tidx", _op.get_attr("Tidx"))
_execute.record_gradient(
"SparseSegmentMean", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"SparseSegmentMean", name, _ctx._post_execution_callbacks, data,
indices, segment_ids)
return _result
except _core._FallbackException:
return sparse_segment_mean_eager_fallback(
data, indices, segment_ids, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def sparse_segment_mean_eager_fallback(data, indices, segment_ids, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function sparse_segment_mean
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (data,) = _execute.args_to_matching_eager([data], _ctx)
_attr_Tidx, (indices,) = _execute.args_to_matching_eager([indices], _ctx, _dtypes.int32)
segment_ids = _ops.convert_to_tensor(segment_ids, _dtypes.int32)
_inputs_flat = [data, indices, segment_ids]
_attrs = ("T", _attr_T, "Tidx", _attr_Tidx)
_result = _execute.execute(b"SparseSegmentMean", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"SparseSegmentMean", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def sparse_segment_mean_grad(grad, indices, segment_ids, output_dim0, name=None):
r"""Computes gradients for SparseSegmentMean.
Returns tensor "output" with same shape as grad, except for dimension 0 whose
value is output_dim0.
Args:
grad: A `Tensor`. Must be one of the following types: `float32`, `float64`.
gradient propagated to the SparseSegmentMean op.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
indices passed to the corresponding SparseSegmentMean op.
segment_ids: A `Tensor` of type `int32`.
segment_ids passed to the corresponding SparseSegmentMean op.
output_dim0: A `Tensor` of type `int32`.
dimension 0 of "data" passed to SparseSegmentMean op.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `grad`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"SparseSegmentMeanGrad", grad=grad, indices=indices,
segment_ids=segment_ids, output_dim0=output_dim0, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "Tidx", _op.get_attr("Tidx"))
_execute.record_gradient(
"SparseSegmentMeanGrad", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"SparseSegmentMeanGrad", name, _ctx._post_execution_callbacks, grad,
indices, segment_ids, output_dim0)
return _result
except _core._FallbackException:
return sparse_segment_mean_grad_eager_fallback(
grad, indices, segment_ids, output_dim0, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def sparse_segment_mean_grad_eager_fallback(grad, indices, segment_ids, output_dim0, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function sparse_segment_mean_grad
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (grad,) = _execute.args_to_matching_eager([grad], _ctx)
_attr_Tidx, (indices,) = _execute.args_to_matching_eager([indices], _ctx, _dtypes.int32)
segment_ids = _ops.convert_to_tensor(segment_ids, _dtypes.int32)
output_dim0 = _ops.convert_to_tensor(output_dim0, _dtypes.int32)
_inputs_flat = [grad, indices, segment_ids, output_dim0]
_attrs = ("T", _attr_T, "Tidx", _attr_Tidx)
_result = _execute.execute(b"SparseSegmentMeanGrad", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"SparseSegmentMeanGrad", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def sparse_segment_mean_with_num_segments(data, indices, segment_ids, num_segments, name=None):
r"""Computes the mean along sparse segments of a tensor.
Like `SparseSegmentMean`, but allows missing ids in `segment_ids`. If an id is
misisng, the `output` tensor at that position will be zeroed.
Read @{$math_ops#Segmentation$the section on segmentation} for an explanation of
segments.
Args:
data: A `Tensor`. Must be one of the following types: `float32`, `float64`.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A 1-D tensor. Has same rank as `segment_ids`.
segment_ids: A `Tensor` of type `int32`.
A 1-D tensor. Values should be sorted and can be repeated.
num_segments: A `Tensor`. Must be one of the following types: `int32`, `int64`.
Should equal the number of distinct segment IDs.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `data`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"SparseSegmentMeanWithNumSegments", data=data, indices=indices,
segment_ids=segment_ids, num_segments=num_segments, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "Tidx", _op.get_attr("Tidx"),
"Tnumsegments", _op.get_attr("Tnumsegments"))
_execute.record_gradient(
"SparseSegmentMeanWithNumSegments", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"SparseSegmentMeanWithNumSegments", name,
_ctx._post_execution_callbacks, data, indices, segment_ids,
num_segments)
return _result
except _core._FallbackException:
return sparse_segment_mean_with_num_segments_eager_fallback(
data, indices, segment_ids, num_segments, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def sparse_segment_mean_with_num_segments_eager_fallback(data, indices, segment_ids, num_segments, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function sparse_segment_mean_with_num_segments
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (data,) = _execute.args_to_matching_eager([data], _ctx)
_attr_Tidx, (indices,) = _execute.args_to_matching_eager([indices], _ctx, _dtypes.int32)
_attr_Tnumsegments, (num_segments,) = _execute.args_to_matching_eager([num_segments], _ctx, _dtypes.int32)
segment_ids = _ops.convert_to_tensor(segment_ids, _dtypes.int32)
_inputs_flat = [data, indices, segment_ids, num_segments]
_attrs = ("T", _attr_T, "Tidx", _attr_Tidx, "Tnumsegments",
_attr_Tnumsegments)
_result = _execute.execute(b"SparseSegmentMeanWithNumSegments", 1,
inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_execute.record_gradient(
"SparseSegmentMeanWithNumSegments", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def sparse_segment_sqrt_n(data, indices, segment_ids, name=None):
r"""Computes the sum along sparse segments of a tensor divided by the sqrt of N.
N is the size of the segment being reduced.
Read @{$math_ops#Segmentation$the section on segmentation} for an explanation of
segments.
Args:
data: A `Tensor`. Must be one of the following types: `float32`, `float64`.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A 1-D tensor. Has same rank as `segment_ids`.
segment_ids: A `Tensor` of type `int32`.
A 1-D tensor. Values should be sorted and can be repeated.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `data`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"SparseSegmentSqrtN", data=data, indices=indices,
segment_ids=segment_ids, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "Tidx", _op.get_attr("Tidx"))
_execute.record_gradient(
"SparseSegmentSqrtN", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"SparseSegmentSqrtN", name, _ctx._post_execution_callbacks, data,
indices, segment_ids)
return _result
except _core._FallbackException:
return sparse_segment_sqrt_n_eager_fallback(
data, indices, segment_ids, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def sparse_segment_sqrt_n_eager_fallback(data, indices, segment_ids, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function sparse_segment_sqrt_n
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (data,) = _execute.args_to_matching_eager([data], _ctx)
_attr_Tidx, (indices,) = _execute.args_to_matching_eager([indices], _ctx, _dtypes.int32)
segment_ids = _ops.convert_to_tensor(segment_ids, _dtypes.int32)
_inputs_flat = [data, indices, segment_ids]
_attrs = ("T", _attr_T, "Tidx", _attr_Tidx)
_result = _execute.execute(b"SparseSegmentSqrtN", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"SparseSegmentSqrtN", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def sparse_segment_sqrt_n_grad(grad, indices, segment_ids, output_dim0, name=None):
r"""Computes gradients for SparseSegmentSqrtN.
Returns tensor "output" with same shape as grad, except for dimension 0 whose
value is output_dim0.
Args:
grad: A `Tensor`. Must be one of the following types: `float32`, `float64`.
gradient propagated to the SparseSegmentSqrtN op.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
indices passed to the corresponding SparseSegmentSqrtN op.
segment_ids: A `Tensor` of type `int32`.
segment_ids passed to the corresponding SparseSegmentSqrtN op.
output_dim0: A `Tensor` of type `int32`.
dimension 0 of "data" passed to SparseSegmentSqrtN op.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `grad`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"SparseSegmentSqrtNGrad", grad=grad, indices=indices,
segment_ids=segment_ids, output_dim0=output_dim0, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "Tidx", _op.get_attr("Tidx"))
_execute.record_gradient(
"SparseSegmentSqrtNGrad", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"SparseSegmentSqrtNGrad", name, _ctx._post_execution_callbacks, grad,
indices, segment_ids, output_dim0)
return _result
except _core._FallbackException:
return sparse_segment_sqrt_n_grad_eager_fallback(
grad, indices, segment_ids, output_dim0, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def sparse_segment_sqrt_n_grad_eager_fallback(grad, indices, segment_ids, output_dim0, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function sparse_segment_sqrt_n_grad
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (grad,) = _execute.args_to_matching_eager([grad], _ctx)
_attr_Tidx, (indices,) = _execute.args_to_matching_eager([indices], _ctx, _dtypes.int32)
segment_ids = _ops.convert_to_tensor(segment_ids, _dtypes.int32)
output_dim0 = _ops.convert_to_tensor(output_dim0, _dtypes.int32)
_inputs_flat = [grad, indices, segment_ids, output_dim0]
_attrs = ("T", _attr_T, "Tidx", _attr_Tidx)
_result = _execute.execute(b"SparseSegmentSqrtNGrad", 1,
inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_execute.record_gradient(
"SparseSegmentSqrtNGrad", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def sparse_segment_sqrt_n_with_num_segments(data, indices, segment_ids, num_segments, name=None):
r"""Computes the sum along sparse segments of a tensor divided by the sqrt of N.
N is the size of the segment being reduced.
Like `SparseSegmentSqrtN`, but allows missing ids in `segment_ids`. If an id is
misisng, the `output` tensor at that position will be zeroed.
Read @{$math_ops#Segmentation$the section on segmentation} for an explanation of
segments.
Args:
data: A `Tensor`. Must be one of the following types: `float32`, `float64`.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A 1-D tensor. Has same rank as `segment_ids`.
segment_ids: A `Tensor` of type `int32`.
A 1-D tensor. Values should be sorted and can be repeated.
num_segments: A `Tensor`. Must be one of the following types: `int32`, `int64`.
Should equal the number of distinct segment IDs.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `data`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"SparseSegmentSqrtNWithNumSegments", data=data, indices=indices,
segment_ids=segment_ids, num_segments=num_segments, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "Tidx", _op.get_attr("Tidx"),
"Tnumsegments", _op.get_attr("Tnumsegments"))
_execute.record_gradient(
"SparseSegmentSqrtNWithNumSegments", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"SparseSegmentSqrtNWithNumSegments", name,
_ctx._post_execution_callbacks, data, indices, segment_ids,
num_segments)
return _result
except _core._FallbackException:
return sparse_segment_sqrt_n_with_num_segments_eager_fallback(
data, indices, segment_ids, num_segments, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def sparse_segment_sqrt_n_with_num_segments_eager_fallback(data, indices, segment_ids, num_segments, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function sparse_segment_sqrt_n_with_num_segments
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (data,) = _execute.args_to_matching_eager([data], _ctx)
_attr_Tidx, (indices,) = _execute.args_to_matching_eager([indices], _ctx, _dtypes.int32)
_attr_Tnumsegments, (num_segments,) = _execute.args_to_matching_eager([num_segments], _ctx, _dtypes.int32)
segment_ids = _ops.convert_to_tensor(segment_ids, _dtypes.int32)
_inputs_flat = [data, indices, segment_ids, num_segments]
_attrs = ("T", _attr_T, "Tidx", _attr_Tidx, "Tnumsegments",
_attr_Tnumsegments)
_result = _execute.execute(b"SparseSegmentSqrtNWithNumSegments", 1,
inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_execute.record_gradient(
"SparseSegmentSqrtNWithNumSegments", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def sparse_segment_sum(data, indices, segment_ids, name=None):
r"""Computes the sum along sparse segments of a tensor.
Read @{$math_ops#Segmentation$the section on segmentation} for an explanation of
segments.
Like `SegmentSum`, but `segment_ids` can have rank less than `data`'s first
dimension, selecting a subset of dimension 0, specified by `indices`.
For example:
```python
c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
# Select two rows, one segment.
tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 0]))
# => [[0 0 0 0]]
# Select two rows, two segment.
tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 1]))
# => [[ 1 2 3 4]
# [-1 -2 -3 -4]]
# Select all rows, two segments.
tf.sparse_segment_sum(c, tf.constant([0, 1, 2]), tf.constant([0, 0, 1]))
# => [[0 0 0 0]
# [5 6 7 8]]
# Which is equivalent to:
tf.segment_sum(c, tf.constant([0, 0, 1]))
```
Args:
data: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A 1-D tensor. Has same rank as `segment_ids`.
segment_ids: A `Tensor` of type `int32`.
A 1-D tensor. Values should be sorted and can be repeated.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `data`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"SparseSegmentSum", data=data, indices=indices,
segment_ids=segment_ids, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "Tidx", _op.get_attr("Tidx"))
_execute.record_gradient(
"SparseSegmentSum", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"SparseSegmentSum", name, _ctx._post_execution_callbacks, data,
indices, segment_ids)
return _result
except _core._FallbackException:
return sparse_segment_sum_eager_fallback(
data, indices, segment_ids, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def sparse_segment_sum_eager_fallback(data, indices, segment_ids, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function sparse_segment_sum
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (data,) = _execute.args_to_matching_eager([data], _ctx)
_attr_Tidx, (indices,) = _execute.args_to_matching_eager([indices], _ctx, _dtypes.int32)
segment_ids = _ops.convert_to_tensor(segment_ids, _dtypes.int32)
_inputs_flat = [data, indices, segment_ids]
_attrs = ("T", _attr_T, "Tidx", _attr_Tidx)
_result = _execute.execute(b"SparseSegmentSum", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"SparseSegmentSum", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def sparse_segment_sum_with_num_segments(data, indices, segment_ids, num_segments, name=None):
r"""Computes the sum along sparse segments of a tensor.
Like `SparseSegmentSum`, but allows missing ids in `segment_ids`. If an id is
misisng, the `output` tensor at that position will be zeroed.
Read @{$math_ops#Segmentation$the section on segmentation} for an explanation of
segments.
For example:
```python
c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
tf.sparse_segment_sum_with_num_segments(
c, tf.constant([0, 1]), tf.constant([0, 0]), num_segments=3)
# => [[0 0 0 0]
# [0 0 0 0]
# [0 0 0 0]]
tf.sparse_segment_sum_with_num_segments(c,
tf.constant([0, 1]),
tf.constant([0, 2],
num_segments=4))
# => [[ 1 2 3 4]
# [ 0 0 0 0]
# [-1 -2 -3 -4]
# [ 0 0 0 0]]
```
Args:
data: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A 1-D tensor. Has same rank as `segment_ids`.
segment_ids: A `Tensor` of type `int32`.
A 1-D tensor. Values should be sorted and can be repeated.
num_segments: A `Tensor`. Must be one of the following types: `int32`, `int64`.
Should equal the number of distinct segment IDs.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `data`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"SparseSegmentSumWithNumSegments", data=data, indices=indices,
segment_ids=segment_ids, num_segments=num_segments, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "Tidx", _op.get_attr("Tidx"),
"Tnumsegments", _op.get_attr("Tnumsegments"))
_execute.record_gradient(
"SparseSegmentSumWithNumSegments", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"SparseSegmentSumWithNumSegments", name,
_ctx._post_execution_callbacks, data, indices, segment_ids,
num_segments)
return _result
except _core._FallbackException:
return sparse_segment_sum_with_num_segments_eager_fallback(
data, indices, segment_ids, num_segments, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def sparse_segment_sum_with_num_segments_eager_fallback(data, indices, segment_ids, num_segments, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function sparse_segment_sum_with_num_segments
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (data,) = _execute.args_to_matching_eager([data], _ctx)
_attr_Tidx, (indices,) = _execute.args_to_matching_eager([indices], _ctx, _dtypes.int32)
_attr_Tnumsegments, (num_segments,) = _execute.args_to_matching_eager([num_segments], _ctx, _dtypes.int32)
segment_ids = _ops.convert_to_tensor(segment_ids, _dtypes.int32)
_inputs_flat = [data, indices, segment_ids, num_segments]
_attrs = ("T", _attr_T, "Tidx", _attr_Tidx, "Tnumsegments",
_attr_Tnumsegments)
_result = _execute.execute(b"SparseSegmentSumWithNumSegments", 1,
inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_execute.record_gradient(
"SparseSegmentSumWithNumSegments", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def sqrt(x, name=None):
r"""Computes square root of x element-wise.
I.e., \\(y = \sqrt{x} = x^{1/2}\\).
Args:
x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"Sqrt", x=x, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"Sqrt", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Sqrt", name,
_ctx._post_execution_callbacks, x)
return _result
except _core._FallbackException:
return sqrt_eager_fallback(
x, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def sqrt_eager_fallback(x, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function sqrt
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
_inputs_flat = [x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Sqrt", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Sqrt", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def sqrt_grad(y, dy, name=None):
r"""Computes the gradient for the sqrt of `x` wrt its input.
Specifically, `grad = dy * 0.5 / y`, where `y = sqrt(x)`, and `dy`
is the corresponding input gradient.
Args:
y: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`.
dy: A `Tensor`. Must have the same type as `y`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `y`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"SqrtGrad", y=y, dy=dy, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"SqrtGrad", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "SqrtGrad",
name, _ctx._post_execution_callbacks, y, dy)
return _result
except _core._FallbackException:
return sqrt_grad_eager_fallback(
y, dy, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def sqrt_grad_eager_fallback(y, dy, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function sqrt_grad
"""
_ctx = ctx if ctx else _context.context()
_attr_T, _inputs_T = _execute.args_to_matching_eager([y, dy], _ctx)
(y, dy) = _inputs_T
_inputs_flat = [y, dy]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"SqrtGrad", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"SqrtGrad", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def square(x, name=None):
r"""Computes square of x element-wise.
I.e., \\(y = x * x = x^2\\).
Args:
x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"Square", x=x, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"Square", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Square", name,
_ctx._post_execution_callbacks, x)
return _result
except _core._FallbackException:
return square_eager_fallback(
x, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def square_eager_fallback(x, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function square
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
_inputs_flat = [x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Square", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Square", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def squared_difference_eager_fallback(x, y, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function squared_difference
"""
_ctx = ctx if ctx else _context.context()
_attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx)
(x, y) = _inputs_T
_inputs_flat = [x, y]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"SquaredDifference", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"SquaredDifference", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def sub(x, y, name=None):
r"""Returns x - y element-wise.
*NOTE*: `Subtract` supports broadcasting. More about broadcasting
[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
Args:
x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `uint8`, `int8`, `uint16`, `int16`, `int32`, `int64`, `complex64`, `complex128`.
y: A `Tensor`. Must have the same type as `x`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"Sub", x=x, y=y, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"Sub", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Sub", name,
_ctx._post_execution_callbacks, x, y)
return _result
except _core._FallbackException:
return sub_eager_fallback(
x, y, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def sub_eager_fallback(x, y, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function sub
"""
_ctx = ctx if ctx else _context.context()
_attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx)
(x, y) = _inputs_T
_inputs_flat = [x, y]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Sub", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Sub", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def _sum(input, axis, keep_dims=False, name=None):
r"""Computes the sum of elements across dimensions of a tensor.
Reduces `input` along the dimensions given in `axis`. Unless
`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
`axis`. If `keep_dims` is true, the reduced dimensions are
retained with length 1.
Args:
input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
The tensor to reduce.
axis: A `Tensor`. Must be one of the following types: `int32`, `int64`.
The dimensions to reduce. Must be in the range
`[-rank(input), rank(input))`.
keep_dims: An optional `bool`. Defaults to `False`.
If true, retain reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
if keep_dims is None:
keep_dims = False
keep_dims = _execute.make_bool(keep_dims, "keep_dims")
_, _, _op = _op_def_lib._apply_op_helper(
"Sum", input=input, reduction_indices=axis, keep_dims=keep_dims,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("keep_dims", _op.get_attr("keep_dims"), "T", _op.get_attr("T"),
"Tidx", _op.get_attr("Tidx"))
_execute.record_gradient(
"Sum", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Sum", name,
_ctx._post_execution_callbacks, input, axis, "keep_dims", keep_dims)
return _result
except _core._FallbackException:
return _sum_eager_fallback(
input, axis, keep_dims=keep_dims, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def _sum_eager_fallback(input, axis, keep_dims=False, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function _sum
"""
_ctx = ctx if ctx else _context.context()
if keep_dims is None:
keep_dims = False
keep_dims = _execute.make_bool(keep_dims, "keep_dims")
_attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
_attr_Tidx, (axis,) = _execute.args_to_matching_eager([axis], _ctx, _dtypes.int32)
_inputs_flat = [input, axis]
_attrs = ("keep_dims", keep_dims, "T", _attr_T, "Tidx", _attr_Tidx)
_result = _execute.execute(b"Sum", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Sum", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def tan_eager_fallback(x, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function tan
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
_inputs_flat = [x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Tan", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Tan", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def tanh(x, name=None):
r"""Computes hyperbolic tangent of `x` element-wise.
Args:
x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"Tanh", x=x, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"Tanh", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Tanh", name,
_ctx._post_execution_callbacks, x)
return _result
except _core._FallbackException:
return tanh_eager_fallback(
x, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def tanh_eager_fallback(x, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function tanh
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
_inputs_flat = [x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Tanh", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Tanh", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def tanh_grad(y, dy, name=None):
r"""Computes the gradient for the tanh of `x` wrt its input.
Specifically, `grad = dy * (1 - y*y)`, where `y = tanh(x)`, and `dy`
is the corresponding input gradient.
Args:
y: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`.
dy: A `Tensor`. Must have the same type as `y`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `y`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"TanhGrad", y=y, dy=dy, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"TanhGrad", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "TanhGrad",
name, _ctx._post_execution_callbacks, y, dy)
return _result
except _core._FallbackException:
return tanh_grad_eager_fallback(
y, dy, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def tanh_grad_eager_fallback(y, dy, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function tanh_grad
"""
_ctx = ctx if ctx else _context.context()
_attr_T, _inputs_T = _execute.args_to_matching_eager([y, dy], _ctx)
(y, dy) = _inputs_T
_inputs_flat = [y, dy]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"TanhGrad", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"TanhGrad", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def truncate_div(x, y, name=None):
r"""Returns x / y element-wise for integer types.
Truncation designates that negative numbers will round fractional quantities
toward zero. I.e. -7 / 5 = -1. This matches C semantics but it is different
than Python semantics. See `FloorDiv` for a division function that matches
Python Semantics.
*NOTE*: `TruncateDiv` supports broadcasting. More about broadcasting
[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
Args:
x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `uint8`, `int8`, `uint16`, `int16`, `int32`, `int64`, `complex64`, `complex128`.
y: A `Tensor`. Must have the same type as `x`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"TruncateDiv", x=x, y=y, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"TruncateDiv", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "TruncateDiv",
name, _ctx._post_execution_callbacks, x, y)
return _result
except _core._FallbackException:
return truncate_div_eager_fallback(
x, y, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def truncate_div_eager_fallback(x, y, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function truncate_div
"""
_ctx = ctx if ctx else _context.context()
_attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx)
(x, y) = _inputs_T
_inputs_flat = [x, y]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"TruncateDiv", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"TruncateDiv", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def truncate_mod(x, y, name=None):
r"""Returns element-wise remainder of division. This emulates C semantics in that
the result here is consistent with a truncating divide. E.g. `truncate(x / y) *
y + truncate_mod(x, y) = x`.
*NOTE*: `TruncateMod` supports broadcasting. More about broadcasting
[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
Args:
x: A `Tensor`. Must be one of the following types: `int32`, `int64`, `bfloat16`, `half`, `float32`, `float64`.
y: A `Tensor`. Must have the same type as `x`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"TruncateMod", x=x, y=y, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"TruncateMod", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "TruncateMod",
name, _ctx._post_execution_callbacks, x, y)
return _result
except _core._FallbackException:
return truncate_mod_eager_fallback(
x, y, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def truncate_mod_eager_fallback(x, y, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function truncate_mod
"""
_ctx = ctx if ctx else _context.context()
_attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx)
(x, y) = _inputs_T
_inputs_flat = [x, y]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"TruncateMod", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"TruncateMod", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def unsorted_segment_max_eager_fallback(data, segment_ids, num_segments, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function unsorted_segment_max
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (data,) = _execute.args_to_matching_eager([data], _ctx)
_attr_Tindices, (segment_ids,) = _execute.args_to_matching_eager([segment_ids], _ctx)
_attr_Tnumsegments, (num_segments,) = _execute.args_to_matching_eager([num_segments], _ctx, _dtypes.int32)
_inputs_flat = [data, segment_ids, num_segments]
_attrs = ("T", _attr_T, "Tindices", _attr_Tindices, "Tnumsegments",
_attr_Tnumsegments)
_result = _execute.execute(b"UnsortedSegmentMax", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"UnsortedSegmentMax", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def unsorted_segment_min_eager_fallback(data, segment_ids, num_segments, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function unsorted_segment_min
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (data,) = _execute.args_to_matching_eager([data], _ctx)
_attr_Tindices, (segment_ids,) = _execute.args_to_matching_eager([segment_ids], _ctx)
_attr_Tnumsegments, (num_segments,) = _execute.args_to_matching_eager([num_segments], _ctx, _dtypes.int32)
_inputs_flat = [data, segment_ids, num_segments]
_attrs = ("T", _attr_T, "Tindices", _attr_Tindices, "Tnumsegments",
_attr_Tnumsegments)
_result = _execute.execute(b"UnsortedSegmentMin", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"UnsortedSegmentMin", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def unsorted_segment_prod_eager_fallback(data, segment_ids, num_segments, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function unsorted_segment_prod
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (data,) = _execute.args_to_matching_eager([data], _ctx)
_attr_Tindices, (segment_ids,) = _execute.args_to_matching_eager([segment_ids], _ctx)
_attr_Tnumsegments, (num_segments,) = _execute.args_to_matching_eager([num_segments], _ctx, _dtypes.int32)
_inputs_flat = [data, segment_ids, num_segments]
_attrs = ("T", _attr_T, "Tindices", _attr_Tindices, "Tnumsegments",
_attr_Tnumsegments)
_result = _execute.execute(b"UnsortedSegmentProd", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"UnsortedSegmentProd", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def unsorted_segment_sum_eager_fallback(data, segment_ids, num_segments, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function unsorted_segment_sum
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (data,) = _execute.args_to_matching_eager([data], _ctx)
_attr_Tindices, (segment_ids,) = _execute.args_to_matching_eager([segment_ids], _ctx)
_attr_Tnumsegments, (num_segments,) = _execute.args_to_matching_eager([num_segments], _ctx, _dtypes.int32)
_inputs_flat = [data, segment_ids, num_segments]
_attrs = ("T", _attr_T, "Tindices", _attr_Tindices, "Tnumsegments",
_attr_Tnumsegments)
_result = _execute.execute(b"UnsortedSegmentSum", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"UnsortedSegmentSum", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def zeta_eager_fallback(x, q, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function zeta
"""
_ctx = ctx if ctx else _context.context()
_attr_T, _inputs_T = _execute.args_to_matching_eager([x, q], _ctx)
(x, q) = _inputs_T
_inputs_flat = [x, q]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Zeta", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Zeta", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
# op {
# name: "Abs"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "y"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# }
# op {
# name: "AccumulateNV2"
# input_arg {
# name: "inputs"
# type_attr: "T"
# number_attr: "N"
# }
# output_arg {
# name: "sum"
# type_attr: "T"
# }
# attr {
# name: "N"
# type: "int"
# has_minimum: true
# minimum: 1
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "shape"
# type: "shape"
# }
# is_aggregate: true
# is_commutative: true
# }
# op {
# name: "Acos"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "y"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_INT64
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# }
# op {
# name: "Acosh"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "y"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# }
# op {
# name: "Add"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# input_arg {
# name: "y"
# type_attr: "T"
# }
# output_arg {
# name: "z"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_UINT8
# type: DT_INT8
# type: DT_INT16
# type: DT_INT32
# type: DT_INT64
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# type: DT_STRING
# }
# }
# }
# }
# op {
# name: "AddN"
# input_arg {
# name: "inputs"
# type_attr: "T"
# number_attr: "N"
# }
# output_arg {
# name: "sum"
# type_attr: "T"
# }
# attr {
# name: "N"
# type: "int"
# has_minimum: true
# minimum: 1
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# type: DT_VARIANT
# }
# }
# }
# is_aggregate: true
# is_commutative: true
# }
# op {
# name: "AddV2"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# input_arg {
# name: "y"
# type_attr: "T"
# }
# output_arg {
# name: "z"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_UINT8
# type: DT_INT8
# type: DT_INT16
# type: DT_INT32
# type: DT_INT64
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# is_aggregate: true
# is_commutative: true
# }
# op {
# name: "All"
# input_arg {
# name: "input"
# type: DT_BOOL
# }
# input_arg {
# name: "reduction_indices"
# type_attr: "Tidx"
# }
# output_arg {
# name: "output"
# type: DT_BOOL
# }
# attr {
# name: "keep_dims"
# type: "bool"
# default_value {
# b: false
# }
# }
# attr {
# name: "Tidx"
# type: "type"
# default_value {
# type: DT_INT32
# }
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# }
# op {
# name: "Angle"
# input_arg {
# name: "input"
# type_attr: "T"
# }
# output_arg {
# name: "output"
# type_attr: "Tout"
# }
# attr {
# name: "T"
# type: "type"
# default_value {
# type: DT_COMPLEX64
# }
# allowed_values {
# list {
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# attr {
# name: "Tout"
# type: "type"
# default_value {
# type: DT_FLOAT
# }
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# }
# op {
# name: "Any"
# input_arg {
# name: "input"
# type: DT_BOOL
# }
# input_arg {
# name: "reduction_indices"
# type_attr: "Tidx"
# }
# output_arg {
# name: "output"
# type: DT_BOOL
# }
# attr {
# name: "keep_dims"
# type: "bool"
# default_value {
# b: false
# }
# }
# attr {
# name: "Tidx"
# type: "type"
# default_value {
# type: DT_INT32
# }
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# }
# op {
# name: "ApproximateEqual"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# input_arg {
# name: "y"
# type_attr: "T"
# }
# output_arg {
# name: "z"
# type: DT_BOOL
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "tolerance"
# type: "float"
# default_value {
# f: 1e-05
# }
# }
# is_commutative: true
# }
# op {
# name: "ArgMax"
# input_arg {
# name: "input"
# type_attr: "T"
# }
# input_arg {
# name: "dimension"
# type_attr: "Tidx"
# }
# output_arg {
# name: "output"
# type_attr: "output_type"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "Tidx"
# type: "type"
# default_value {
# type: DT_INT32
# }
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# attr {
# name: "output_type"
# type: "type"
# default_value {
# type: DT_INT64
# }
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# }
# op {
# name: "ArgMin"
# input_arg {
# name: "input"
# type_attr: "T"
# }
# input_arg {
# name: "dimension"
# type_attr: "Tidx"
# }
# output_arg {
# name: "output"
# type_attr: "output_type"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "Tidx"
# type: "type"
# default_value {
# type: DT_INT32
# }
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# attr {
# name: "output_type"
# type: "type"
# default_value {
# type: DT_INT64
# }
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# }
# op {
# name: "Asin"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "y"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_INT64
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# }
# op {
# name: "Asinh"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "y"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# }
# op {
# name: "Atan"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "y"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_INT64
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# }
# op {
# name: "Atan2"
# input_arg {
# name: "y"
# type_attr: "T"
# }
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "z"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# }
# op {
# name: "Atanh"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "y"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# }
# op {
# name: "BatchMatMul"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# input_arg {
# name: "y"
# type_attr: "T"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# attr {
# name: "adj_x"
# type: "bool"
# default_value {
# b: false
# }
# }
# attr {
# name: "adj_y"
# type: "bool"
# default_value {
# b: false
# }
# }
# }
# op {
# name: "BesselI0e"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "y"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# }
# op {
# name: "BesselI1e"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "y"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# }
# op {
# name: "Betainc"
# input_arg {
# name: "a"
# type_attr: "T"
# }
# input_arg {
# name: "b"
# type_attr: "T"
# }
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "z"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# }
# op {
# name: "Bincount"
# input_arg {
# name: "arr"
# type: DT_INT32
# }
# input_arg {
# name: "size"
# type: DT_INT32
# }
# input_arg {
# name: "weights"
# type_attr: "T"
# }
# output_arg {
# name: "bins"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# }
# op {
# name: "Bucketize"
# input_arg {
# name: "input"
# type_attr: "T"
# }
# output_arg {
# name: "output"
# type: DT_INT32
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# attr {
# name: "boundaries"
# type: "list(float)"
# }
# }
# op {
# name: "Cast"
# input_arg {
# name: "x"
# type_attr: "SrcT"
# }
# output_arg {
# name: "y"
# type_attr: "DstT"
# }
# attr {
# name: "SrcT"
# type: "type"
# }
# attr {
# name: "DstT"
# type: "type"
# }
# }
# op {
# name: "Ceil"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "y"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# }
# op {
# name: "ClipByValue"
# input_arg {
# name: "t"
# type_attr: "T"
# }
# input_arg {
# name: "clip_value_min"
# type_attr: "T"
# }
# input_arg {
# name: "clip_value_max"
# type_attr: "T"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# }
# op {
# name: "CompareAndBitpack"
# input_arg {
# name: "input"
# type_attr: "T"
# }
# input_arg {
# name: "threshold"
# type_attr: "T"
# }
# output_arg {
# name: "output"
# type: DT_UINT8
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BOOL
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT8
# type: DT_INT16
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# }
# op {
# name: "Complex"
# input_arg {
# name: "real"
# type_attr: "T"
# }
# input_arg {
# name: "imag"
# type_attr: "T"
# }
# output_arg {
# name: "out"
# type_attr: "Tout"
# }
# attr {
# name: "T"
# type: "type"
# default_value {
# type: DT_FLOAT
# }
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# attr {
# name: "Tout"
# type: "type"
# default_value {
# type: DT_COMPLEX64
# }
# allowed_values {
# list {
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# }
# op {
# name: "ComplexAbs"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "y"
# type_attr: "Tout"
# }
# attr {
# name: "T"
# type: "type"
# default_value {
# type: DT_COMPLEX64
# }
# allowed_values {
# list {
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# attr {
# name: "Tout"
# type: "type"
# default_value {
# type: DT_FLOAT
# }
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# }
# op {
# name: "Conj"
# input_arg {
# name: "input"
# type_attr: "T"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# default_value {
# type: DT_COMPLEX64
# }
# allowed_values {
# list {
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# type: DT_VARIANT
# }
# }
# }
# }
# op {
# name: "Cos"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "y"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# }
# op {
# name: "Cosh"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "y"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# }
# op {
# name: "Cross"
# input_arg {
# name: "a"
# type_attr: "T"
# }
# input_arg {
# name: "b"
# type_attr: "T"
# }
# output_arg {
# name: "product"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_INT64
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# }
# op {
# name: "Cumprod"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# input_arg {
# name: "axis"
# type_attr: "Tidx"
# }
# output_arg {
# name: "out"
# type_attr: "T"
# }
# attr {
# name: "exclusive"
# type: "bool"
# default_value {
# b: false
# }
# }
# attr {
# name: "reverse"
# type: "bool"
# default_value {
# b: false
# }
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "Tidx"
# type: "type"
# default_value {
# type: DT_INT32
# }
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# }
# op {
# name: "Cumsum"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# input_arg {
# name: "axis"
# type_attr: "Tidx"
# }
# output_arg {
# name: "out"
# type_attr: "T"
# }
# attr {
# name: "exclusive"
# type: "bool"
# default_value {
# b: false
# }
# }
# attr {
# name: "reverse"
# type: "bool"
# default_value {
# b: false
# }
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "Tidx"
# type: "type"
# default_value {
# type: DT_INT32
# }
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# }
# op {
# name: "Digamma"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "y"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# }
# op {
# name: "Div"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# input_arg {
# name: "y"
# type_attr: "T"
# }
# output_arg {
# name: "z"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_UINT8
# type: DT_INT8
# type: DT_UINT16
# type: DT_INT16
# type: DT_INT32
# type: DT_INT64
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# }
# op {
# name: "Equal"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# input_arg {
# name: "y"
# type_attr: "T"
# }
# output_arg {
# name: "z"
# type: DT_BOOL
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_UINT8
# type: DT_INT8
# type: DT_INT16
# type: DT_INT32
# type: DT_INT64
# type: DT_COMPLEX64
# type: DT_QUINT8
# type: DT_QINT8
# type: DT_QINT32
# type: DT_STRING
# type: DT_BOOL
# type: DT_COMPLEX128
# }
# }
# }
# is_commutative: true
# }
# op {
# name: "Erf"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "y"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# }
# op {
# name: "Erfc"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "y"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# }
# op {
# name: "Exp"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "y"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# }
# op {
# name: "Expm1"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "y"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# }
# op {
# name: "Floor"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "y"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# }
# op {
# name: "FloorDiv"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# input_arg {
# name: "y"
# type_attr: "T"
# }
# output_arg {
# name: "z"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_UINT8
# type: DT_INT8
# type: DT_UINT16
# type: DT_INT16
# type: DT_INT32
# type: DT_INT64
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# }
# op {
# name: "FloorMod"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# input_arg {
# name: "y"
# type_attr: "T"
# }
# output_arg {
# name: "z"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# }
# op {
# name: "Greater"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# input_arg {
# name: "y"
# type_attr: "T"
# }
# output_arg {
# name: "z"
# type: DT_BOOL
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_INT64
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# }
# op {
# name: "GreaterEqual"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# input_arg {
# name: "y"
# type_attr: "T"
# }
# output_arg {
# name: "z"
# type: DT_BOOL
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_INT64
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# }
# op {
# name: "HistogramFixedWidth"
# input_arg {
# name: "values"
# type_attr: "T"
# }
# input_arg {
# name: "value_range"
# type_attr: "T"
# }
# input_arg {
# name: "nbins"
# type: DT_INT32
# }
# output_arg {
# name: "out"
# type_attr: "dtype"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# attr {
# name: "dtype"
# type: "type"
# default_value {
# type: DT_INT32
# }
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# }
# op {
# name: "Igamma"
# input_arg {
# name: "a"
# type_attr: "T"
# }
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "z"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# }
# op {
# name: "IgammaGradA"
# input_arg {
# name: "a"
# type_attr: "T"
# }
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "z"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# }
# op {
# name: "Igammac"
# input_arg {
# name: "a"
# type_attr: "T"
# }
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "z"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# }
# op {
# name: "Imag"
# input_arg {
# name: "input"
# type_attr: "T"
# }
# output_arg {
# name: "output"
# type_attr: "Tout"
# }
# attr {
# name: "T"
# type: "type"
# default_value {
# type: DT_COMPLEX64
# }
# allowed_values {
# list {
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# attr {
# name: "Tout"
# type: "type"
# default_value {
# type: DT_FLOAT
# }
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# }
# op {
# name: "Inv"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "y"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_INT64
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# }
# op {
# name: "InvGrad"
# input_arg {
# name: "y"
# type_attr: "T"
# }
# input_arg {
# name: "dy"
# type_attr: "T"
# }
# output_arg {
# name: "z"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# }
# op {
# name: "IsFinite"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "y"
# type: DT_BOOL
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# }
# op {
# name: "IsInf"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "y"
# type: DT_BOOL
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# }
# op {
# name: "IsNan"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "y"
# type: DT_BOOL
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# }
# op {
# name: "Less"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# input_arg {
# name: "y"
# type_attr: "T"
# }
# output_arg {
# name: "z"
# type: DT_BOOL
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_INT64
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# }
# op {
# name: "LessEqual"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# input_arg {
# name: "y"
# type_attr: "T"
# }
# output_arg {
# name: "z"
# type: DT_BOOL
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_INT64
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# }
# op {
# name: "Lgamma"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "y"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# }
# op {
# name: "LinSpace"
# input_arg {
# name: "start"
# type_attr: "T"
# }
# input_arg {
# name: "stop"
# type_attr: "T"
# }
# input_arg {
# name: "num"
# type_attr: "Tidx"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# attr {
# name: "Tidx"
# type: "type"
# default_value {
# type: DT_INT32
# }
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# }
# op {
# name: "Log"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "y"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# }
# op {
# name: "Log1p"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "y"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# }
# op {
# name: "LogicalAnd"
# input_arg {
# name: "x"
# type: DT_BOOL
# }
# input_arg {
# name: "y"
# type: DT_BOOL
# }
# output_arg {
# name: "z"
# type: DT_BOOL
# }
# is_commutative: true
# }
# op {
# name: "LogicalNot"
# input_arg {
# name: "x"
# type: DT_BOOL
# }
# output_arg {
# name: "y"
# type: DT_BOOL
# }
# }
# op {
# name: "LogicalOr"
# input_arg {
# name: "x"
# type: DT_BOOL
# }
# input_arg {
# name: "y"
# type: DT_BOOL
# }
# output_arg {
# name: "z"
# type: DT_BOOL
# }
# is_commutative: true
# }
# op {
# name: "MatMul"
# input_arg {
# name: "a"
# type_attr: "T"
# }
# input_arg {
# name: "b"
# type_attr: "T"
# }
# output_arg {
# name: "product"
# type_attr: "T"
# }
# attr {
# name: "transpose_a"
# type: "bool"
# default_value {
# b: false
# }
# }
# attr {
# name: "transpose_b"
# type: "bool"
# default_value {
# b: false
# }
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# }
# op {
# name: "Max"
# input_arg {
# name: "input"
# type_attr: "T"
# }
# input_arg {
# name: "reduction_indices"
# type_attr: "Tidx"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "keep_dims"
# type: "bool"
# default_value {
# b: false
# }
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "Tidx"
# type: "type"
# default_value {
# type: DT_INT32
# }
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# }
# op {
# name: "Maximum"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# input_arg {
# name: "y"
# type_attr: "T"
# }
# output_arg {
# name: "z"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# is_commutative: true
# }
# op {
# name: "Mean"
# input_arg {
# name: "input"
# type_attr: "T"
# }
# input_arg {
# name: "reduction_indices"
# type_attr: "Tidx"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "keep_dims"
# type: "bool"
# default_value {
# b: false
# }
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "Tidx"
# type: "type"
# default_value {
# type: DT_INT32
# }
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# }
# op {
# name: "Min"
# input_arg {
# name: "input"
# type_attr: "T"
# }
# input_arg {
# name: "reduction_indices"
# type_attr: "Tidx"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "keep_dims"
# type: "bool"
# default_value {
# b: false
# }
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "Tidx"
# type: "type"
# default_value {
# type: DT_INT32
# }
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# }
# op {
# name: "Minimum"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# input_arg {
# name: "y"
# type_attr: "T"
# }
# output_arg {
# name: "z"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# is_commutative: true
# }
# op {
# name: "Mod"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# input_arg {
# name: "y"
# type_attr: "T"
# }
# output_arg {
# name: "z"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# type: DT_HALF
# type: DT_HALF
# type: DT_BFLOAT16
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# }
# op {
# name: "Mul"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# input_arg {
# name: "y"
# type_attr: "T"
# }
# output_arg {
# name: "z"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_UINT8
# type: DT_INT8
# type: DT_UINT16
# type: DT_INT16
# type: DT_INT32
# type: DT_INT64
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# is_commutative: true
# }
# op {
# name: "Neg"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "y"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_INT64
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# }
# op {
# name: "NotEqual"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# input_arg {
# name: "y"
# type_attr: "T"
# }
# output_arg {
# name: "z"
# type: DT_BOOL
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_UINT8
# type: DT_INT8
# type: DT_INT16
# type: DT_INT32
# type: DT_INT64
# type: DT_COMPLEX64
# type: DT_QUINT8
# type: DT_QINT8
# type: DT_QINT32
# type: DT_STRING
# type: DT_BOOL
# type: DT_COMPLEX128
# }
# }
# }
# is_commutative: true
# }
# op {
# name: "Polygamma"
# input_arg {
# name: "a"
# type_attr: "T"
# }
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "z"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# }
# op {
# name: "Pow"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# input_arg {
# name: "y"
# type_attr: "T"
# }
# output_arg {
# name: "z"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_FLOAT
# type: DT_HALF
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_INT64
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# }
# op {
# name: "Prod"
# input_arg {
# name: "input"
# type_attr: "T"
# }
# input_arg {
# name: "reduction_indices"
# type_attr: "Tidx"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "keep_dims"
# type: "bool"
# default_value {
# b: false
# }
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "Tidx"
# type: "type"
# default_value {
# type: DT_INT32
# }
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# }
# op {
# name: "QuantizeDownAndShrinkRange"
# input_arg {
# name: "input"
# type_attr: "Tinput"
# }
# input_arg {
# name: "input_min"
# type: DT_FLOAT
# }
# input_arg {
# name: "input_max"
# type: DT_FLOAT
# }
# output_arg {
# name: "output"
# type_attr: "out_type"
# }
# output_arg {
# name: "output_min"
# type: DT_FLOAT
# }
# output_arg {
# name: "output_max"
# type: DT_FLOAT
# }
# attr {
# name: "Tinput"
# type: "type"
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# attr {
# name: "out_type"
# type: "type"
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# }
# op {
# name: "QuantizedAdd"
# input_arg {
# name: "x"
# type_attr: "T1"
# }
# input_arg {
# name: "y"
# type_attr: "T2"
# }
# input_arg {
# name: "min_x"
# type: DT_FLOAT
# }
# input_arg {
# name: "max_x"
# type: DT_FLOAT
# }
# input_arg {
# name: "min_y"
# type: DT_FLOAT
# }
# input_arg {
# name: "max_y"
# type: DT_FLOAT
# }
# output_arg {
# name: "z"
# type_attr: "Toutput"
# }
# output_arg {
# name: "min_z"
# type: DT_FLOAT
# }
# output_arg {
# name: "max_z"
# type: DT_FLOAT
# }
# attr {
# name: "T1"
# type: "type"
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# attr {
# name: "T2"
# type: "type"
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# attr {
# name: "Toutput"
# type: "type"
# default_value {
# type: DT_QINT32
# }
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# is_commutative: true
# }
# op {
# name: "QuantizedMatMul"
# input_arg {
# name: "a"
# type_attr: "T1"
# }
# input_arg {
# name: "b"
# type_attr: "T2"
# }
# input_arg {
# name: "min_a"
# type: DT_FLOAT
# }
# input_arg {
# name: "max_a"
# type: DT_FLOAT
# }
# input_arg {
# name: "min_b"
# type: DT_FLOAT
# }
# input_arg {
# name: "max_b"
# type: DT_FLOAT
# }
# output_arg {
# name: "out"
# type_attr: "Toutput"
# }
# output_arg {
# name: "min_out"
# type: DT_FLOAT
# }
# output_arg {
# name: "max_out"
# type: DT_FLOAT
# }
# attr {
# name: "T1"
# type: "type"
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# attr {
# name: "T2"
# type: "type"
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# attr {
# name: "Toutput"
# type: "type"
# default_value {
# type: DT_QINT32
# }
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# attr {
# name: "transpose_a"
# type: "bool"
# default_value {
# b: false
# }
# }
# attr {
# name: "transpose_b"
# type: "bool"
# default_value {
# b: false
# }
# }
# attr {
# name: "Tactivation"
# type: "type"
# default_value {
# type: DT_QUINT8
# }
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# }
# op {
# name: "QuantizedMul"
# input_arg {
# name: "x"
# type_attr: "T1"
# }
# input_arg {
# name: "y"
# type_attr: "T2"
# }
# input_arg {
# name: "min_x"
# type: DT_FLOAT
# }
# input_arg {
# name: "max_x"
# type: DT_FLOAT
# }
# input_arg {
# name: "min_y"
# type: DT_FLOAT
# }
# input_arg {
# name: "max_y"
# type: DT_FLOAT
# }
# output_arg {
# name: "z"
# type_attr: "Toutput"
# }
# output_arg {
# name: "min_z"
# type: DT_FLOAT
# }
# output_arg {
# name: "max_z"
# type: DT_FLOAT
# }
# attr {
# name: "T1"
# type: "type"
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# attr {
# name: "T2"
# type: "type"
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# attr {
# name: "Toutput"
# type: "type"
# default_value {
# type: DT_QINT32
# }
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# is_commutative: true
# }
# op {
# name: "Range"
# input_arg {
# name: "start"
# type_attr: "Tidx"
# }
# input_arg {
# name: "limit"
# type_attr: "Tidx"
# }
# input_arg {
# name: "delta"
# type_attr: "Tidx"
# }
# output_arg {
# name: "output"
# type_attr: "Tidx"
# }
# attr {
# name: "Tidx"
# type: "type"
# default_value {
# type: DT_INT32
# }
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# }
# op {
# name: "Real"
# input_arg {
# name: "input"
# type_attr: "T"
# }
# output_arg {
# name: "output"
# type_attr: "Tout"
# }
# attr {
# name: "T"
# type: "type"
# default_value {
# type: DT_COMPLEX64
# }
# allowed_values {
# list {
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# attr {
# name: "Tout"
# type: "type"
# default_value {
# type: DT_FLOAT
# }
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# }
# op {
# name: "RealDiv"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# input_arg {
# name: "y"
# type_attr: "T"
# }
# output_arg {
# name: "z"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_UINT8
# type: DT_INT8
# type: DT_UINT16
# type: DT_INT16
# type: DT_INT32
# type: DT_INT64
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# }
# op {
# name: "Reciprocal"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "y"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_INT64
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# }
# op {
# name: "ReciprocalGrad"
# input_arg {
# name: "y"
# type_attr: "T"
# }
# input_arg {
# name: "dy"
# type_attr: "T"
# }
# output_arg {
# name: "z"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# }
# op {
# name: "RequantizationRange"
# input_arg {
# name: "input"
# type_attr: "Tinput"
# }
# input_arg {
# name: "input_min"
# type: DT_FLOAT
# }
# input_arg {
# name: "input_max"
# type: DT_FLOAT
# }
# output_arg {
# name: "output_min"
# type: DT_FLOAT
# }
# output_arg {
# name: "output_max"
# type: DT_FLOAT
# }
# attr {
# name: "Tinput"
# type: "type"
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# }
# op {
# name: "Requantize"
# input_arg {
# name: "input"
# type_attr: "Tinput"
# }
# input_arg {
# name: "input_min"
# type: DT_FLOAT
# }
# input_arg {
# name: "input_max"
# type: DT_FLOAT
# }
# input_arg {
# name: "requested_output_min"
# type: DT_FLOAT
# }
# input_arg {
# name: "requested_output_max"
# type: DT_FLOAT
# }
# output_arg {
# name: "output"
# type_attr: "out_type"
# }
# output_arg {
# name: "output_min"
# type: DT_FLOAT
# }
# output_arg {
# name: "output_max"
# type: DT_FLOAT
# }
# attr {
# name: "Tinput"
# type: "type"
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# attr {
# name: "out_type"
# type: "type"
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# }
# op {
# name: "Rint"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "y"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# }
# op {
# name: "Round"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "y"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_INT64
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# }
# op {
# name: "Rsqrt"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "y"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# }
# op {
# name: "RsqrtGrad"
# input_arg {
# name: "y"
# type_attr: "T"
# }
# input_arg {
# name: "dy"
# type_attr: "T"
# }
# output_arg {
# name: "z"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# }
# op {
# name: "SegmentMax"
# input_arg {
# name: "data"
# type_attr: "T"
# }
# input_arg {
# name: "segment_ids"
# type_attr: "Tindices"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_INT64
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "Tindices"
# type: "type"
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# }
# op {
# name: "SegmentMean"
# input_arg {
# name: "data"
# type_attr: "T"
# }
# input_arg {
# name: "segment_ids"
# type_attr: "Tindices"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "Tindices"
# type: "type"
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# }
# op {
# name: "SegmentMin"
# input_arg {
# name: "data"
# type_attr: "T"
# }
# input_arg {
# name: "segment_ids"
# type_attr: "Tindices"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_INT64
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "Tindices"
# type: "type"
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# }
# op {
# name: "SegmentProd"
# input_arg {
# name: "data"
# type_attr: "T"
# }
# input_arg {
# name: "segment_ids"
# type_attr: "Tindices"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "Tindices"
# type: "type"
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# }
# op {
# name: "SegmentSum"
# input_arg {
# name: "data"
# type_attr: "T"
# }
# input_arg {
# name: "segment_ids"
# type_attr: "Tindices"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "Tindices"
# type: "type"
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# }
# op {
# name: "Select"
# input_arg {
# name: "condition"
# type: DT_BOOL
# }
# input_arg {
# name: "t"
# type_attr: "T"
# }
# input_arg {
# name: "e"
# type_attr: "T"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# }
# }
# op {
# name: "Sigmoid"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "y"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# }
# op {
# name: "SigmoidGrad"
# input_arg {
# name: "y"
# type_attr: "T"
# }
# input_arg {
# name: "dy"
# type_attr: "T"
# }
# output_arg {
# name: "z"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# }
# op {
# name: "Sign"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "y"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_INT64
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# }
# op {
# name: "Sin"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "y"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# }
# op {
# name: "Sinh"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "y"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# }
# op {
# name: "SparseMatMul"
# input_arg {
# name: "a"
# type_attr: "Ta"
# }
# input_arg {
# name: "b"
# type_attr: "Tb"
# }
# output_arg {
# name: "product"
# type: DT_FLOAT
# }
# attr {
# name: "transpose_a"
# type: "bool"
# default_value {
# b: false
# }
# }
# attr {
# name: "transpose_b"
# type: "bool"
# default_value {
# b: false
# }
# }
# attr {
# name: "a_is_sparse"
# type: "bool"
# default_value {
# b: false
# }
# }
# attr {
# name: "b_is_sparse"
# type: "bool"
# default_value {
# b: false
# }
# }
# attr {
# name: "Ta"
# type: "type"
# default_value {
# type: DT_FLOAT
# }
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_BFLOAT16
# }
# }
# }
# attr {
# name: "Tb"
# type: "type"
# default_value {
# type: DT_FLOAT
# }
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_BFLOAT16
# }
# }
# }
# }
# op {
# name: "SparseSegmentMean"
# input_arg {
# name: "data"
# type_attr: "T"
# }
# input_arg {
# name: "indices"
# type_attr: "Tidx"
# }
# input_arg {
# name: "segment_ids"
# type: DT_INT32
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# attr {
# name: "Tidx"
# type: "type"
# default_value {
# type: DT_INT32
# }
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# }
# op {
# name: "SparseSegmentMeanGrad"
# input_arg {
# name: "grad"
# type_attr: "T"
# }
# input_arg {
# name: "indices"
# type_attr: "Tidx"
# }
# input_arg {
# name: "segment_ids"
# type: DT_INT32
# }
# input_arg {
# name: "output_dim0"
# type: DT_INT32
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# attr {
# name: "Tidx"
# type: "type"
# default_value {
# type: DT_INT32
# }
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# }
# op {
# name: "SparseSegmentMeanWithNumSegments"
# input_arg {
# name: "data"
# type_attr: "T"
# }
# input_arg {
# name: "indices"
# type_attr: "Tidx"
# }
# input_arg {
# name: "segment_ids"
# type: DT_INT32
# }
# input_arg {
# name: "num_segments"
# type_attr: "Tnumsegments"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# attr {
# name: "Tidx"
# type: "type"
# default_value {
# type: DT_INT32
# }
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# attr {
# name: "Tnumsegments"
# type: "type"
# default_value {
# type: DT_INT32
# }
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# }
# op {
# name: "SparseSegmentSqrtN"
# input_arg {
# name: "data"
# type_attr: "T"
# }
# input_arg {
# name: "indices"
# type_attr: "Tidx"
# }
# input_arg {
# name: "segment_ids"
# type: DT_INT32
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# attr {
# name: "Tidx"
# type: "type"
# default_value {
# type: DT_INT32
# }
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# }
# op {
# name: "SparseSegmentSqrtNGrad"
# input_arg {
# name: "grad"
# type_attr: "T"
# }
# input_arg {
# name: "indices"
# type_attr: "Tidx"
# }
# input_arg {
# name: "segment_ids"
# type: DT_INT32
# }
# input_arg {
# name: "output_dim0"
# type: DT_INT32
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# attr {
# name: "Tidx"
# type: "type"
# default_value {
# type: DT_INT32
# }
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# }
# op {
# name: "SparseSegmentSqrtNWithNumSegments"
# input_arg {
# name: "data"
# type_attr: "T"
# }
# input_arg {
# name: "indices"
# type_attr: "Tidx"
# }
# input_arg {
# name: "segment_ids"
# type: DT_INT32
# }
# input_arg {
# name: "num_segments"
# type_attr: "Tnumsegments"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# attr {
# name: "Tidx"
# type: "type"
# default_value {
# type: DT_INT32
# }
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# attr {
# name: "Tnumsegments"
# type: "type"
# default_value {
# type: DT_INT32
# }
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# }
# op {
# name: "SparseSegmentSum"
# input_arg {
# name: "data"
# type_attr: "T"
# }
# input_arg {
# name: "indices"
# type_attr: "Tidx"
# }
# input_arg {
# name: "segment_ids"
# type: DT_INT32
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_INT64
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "Tidx"
# type: "type"
# default_value {
# type: DT_INT32
# }
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# }
# op {
# name: "SparseSegmentSumWithNumSegments"
# input_arg {
# name: "data"
# type_attr: "T"
# }
# input_arg {
# name: "indices"
# type_attr: "Tidx"
# }
# input_arg {
# name: "segment_ids"
# type: DT_INT32
# }
# input_arg {
# name: "num_segments"
# type_attr: "Tnumsegments"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_INT64
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "Tidx"
# type: "type"
# default_value {
# type: DT_INT32
# }
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# attr {
# name: "Tnumsegments"
# type: "type"
# default_value {
# type: DT_INT32
# }
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# }
# op {
# name: "Sqrt"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "y"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# }
# op {
# name: "SqrtGrad"
# input_arg {
# name: "y"
# type_attr: "T"
# }
# input_arg {
# name: "dy"
# type_attr: "T"
# }
# output_arg {
# name: "z"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# }
# op {
# name: "Square"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "y"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_INT64
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# }
# op {
# name: "SquaredDifference"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# input_arg {
# name: "y"
# type_attr: "T"
# }
# output_arg {
# name: "z"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_INT64
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# is_commutative: true
# }
# op {
# name: "Sub"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# input_arg {
# name: "y"
# type_attr: "T"
# }
# output_arg {
# name: "z"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_UINT8
# type: DT_INT8
# type: DT_UINT16
# type: DT_INT16
# type: DT_INT32
# type: DT_INT64
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# }
# op {
# name: "Sum"
# input_arg {
# name: "input"
# type_attr: "T"
# }
# input_arg {
# name: "reduction_indices"
# type_attr: "Tidx"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "keep_dims"
# type: "bool"
# default_value {
# b: false
# }
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "Tidx"
# type: "type"
# default_value {
# type: DT_INT32
# }
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# }
# op {
# name: "Tan"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "y"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_INT64
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# }
# op {
# name: "Tanh"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "y"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# }
# op {
# name: "TanhGrad"
# input_arg {
# name: "y"
# type_attr: "T"
# }
# input_arg {
# name: "dy"
# type_attr: "T"
# }
# output_arg {
# name: "z"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# }
# op {
# name: "TruncateDiv"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# input_arg {
# name: "y"
# type_attr: "T"
# }
# output_arg {
# name: "z"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_UINT8
# type: DT_INT8
# type: DT_UINT16
# type: DT_INT16
# type: DT_INT32
# type: DT_INT64
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# }
# op {
# name: "TruncateMod"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# input_arg {
# name: "y"
# type_attr: "T"
# }
# output_arg {
# name: "z"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# }
# op {
# name: "UnsortedSegmentMax"
# input_arg {
# name: "data"
# type_attr: "T"
# }
# input_arg {
# name: "segment_ids"
# type_attr: "Tindices"
# }
# input_arg {
# name: "num_segments"
# type_attr: "Tnumsegments"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_INT64
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "Tindices"
# type: "type"
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# attr {
# name: "Tnumsegments"
# type: "type"
# default_value {
# type: DT_INT32
# }
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# }
# op {
# name: "UnsortedSegmentMin"
# input_arg {
# name: "data"
# type_attr: "T"
# }
# input_arg {
# name: "segment_ids"
# type_attr: "Tindices"
# }
# input_arg {
# name: "num_segments"
# type_attr: "Tnumsegments"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_INT64
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "Tindices"
# type: "type"
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# attr {
# name: "Tnumsegments"
# type: "type"
# default_value {
# type: DT_INT32
# }
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# }
# op {
# name: "UnsortedSegmentProd"
# input_arg {
# name: "data"
# type_attr: "T"
# }
# input_arg {
# name: "segment_ids"
# type_attr: "Tindices"
# }
# input_arg {
# name: "num_segments"
# type_attr: "Tnumsegments"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "Tindices"
# type: "type"
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# attr {
# name: "Tnumsegments"
# type: "type"
# default_value {
# type: DT_INT32
# }
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# }
# op {
# name: "UnsortedSegmentSum"
# input_arg {
# name: "data"
# type_attr: "T"
# }
# input_arg {
# name: "segment_ids"
# type_attr: "Tindices"
# }
# input_arg {
# name: "num_segments"
# type_attr: "Tnumsegments"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "Tindices"
# type: "type"
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# attr {
# name: "Tnumsegments"
# type: "type"
# default_value {
# type: DT_INT32
# }
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# }
# op {
# name: "Zeta"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# input_arg {
# name: "q"
# type_attr: "T"
# }
# output_arg {
# name: "z"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# }
_op_def_lib = _InitOpDefLibrary(b"\n,\n\003Abs\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\003\t\no\n\rAccumulateNV2\022\016\n\006inputs\"\001T*\001N\032\010\n\003sum\"\001T\"\014\n\001N\022\003int(\0010\001\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\016\n\005shape\022\005shape\200\001\001\220\001\001\n/\n\004Acos\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\027\n\001T\022\004type:\014\n\n2\010\016\023\001\002\003\t\010\022\n.\n\005Acosh\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\n:\n\003Add\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\006\n\001z\"\001T\"\033\n\001T\022\004type:\020\n\0162\014\016\023\001\002\004\006\005\003\t\010\022\007\nW\n\004AddN\022\016\n\006inputs\"\001T*\001N\032\010\n\003sum\"\001T\"\014\n\001N\022\003int(\0010\001\"!\n\001T\022\004type:\026\n\0242\022\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\025\200\001\001\220\001\001\nA\n\005AddV2\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\006\n\001z\"\001T\"\032\n\001T\022\004type:\017\n\r2\013\016\023\001\002\004\006\005\003\t\010\022\200\001\001\220\001\001\nh\n\003All\022\t\n\005input\030\n\022\031\n\021reduction_indices\"\004Tidx\032\n\n\006output\030\n\"\025\n\tkeep_dims\022\004bool\032\002(\000\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\nT\n\005Angle\022\n\n\005input\"\001T\032\016\n\006output\"\004Tout\"\025\n\001T\022\004type\032\0020\010:\006\n\0042\002\010\022\"\030\n\004Tout\022\004type\032\0020\001:\006\n\0042\002\001\002\nh\n\003Any\022\t\n\005input\030\n\022\031\n\021reduction_indices\"\004Tidx\032\n\n\006output\030\n\"\025\n\tkeep_dims\022\004bool\032\002(\000\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\ni\n\020ApproximateEqual\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\005\n\001z\030\n\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\031\n\ttolerance\022\005float\032\005%\254\305\'7\220\001\001\n\233\001\n\006ArgMax\022\n\n\005input\"\001T\022\021\n\tdimension\"\004Tidx\032\025\n\006output\"\013output_type\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\"\037\n\013output_type\022\004type\032\0020\t:\006\n\0042\002\003\t\n\233\001\n\006ArgMin\022\n\n\005input\"\001T\022\021\n\tdimension\"\004Tidx\032\025\n\006output\"\013output_type\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\"\037\n\013output_type\022\004type\032\0020\t:\006\n\0042\002\003\t\n/\n\004Asin\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\027\n\001T\022\004type:\014\n\n2\010\016\023\001\002\003\t\010\022\n.\n\005Asinh\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\n/\n\004Atan\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\027\n\001T\022\004type:\014\n\n2\010\016\023\001\002\003\t\010\022\n4\n\005Atan2\022\006\n\001y\"\001T\022\006\n\001x\"\001T\032\006\n\001z\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\016\023\001\002\n.\n\005Atanh\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\nh\n\013BatchMatMul\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\013\n\006output\"\001T\"\026\n\001T\022\004type:\013\n\t2\007\016\023\001\002\003\010\022\"\021\n\005adj_x\022\004bool\032\002(\000\"\021\n\005adj_y\022\004bool\032\002(\000\n0\n\tBesselI0e\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\016\023\001\002\n0\n\tBesselI1e\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\016\023\001\002\n<\n\007Betainc\022\006\n\001a\"\001T\022\006\n\001b\"\001T\022\006\n\001x\"\001T\032\006\n\001z\"\001T\"\021\n\001T\022\004type:\006\n\0042\002\001\002\nK\n\010Bincount\022\007\n\003arr\030\003\022\010\n\004size\030\003\022\014\n\007weights\"\001T\032\t\n\004bins\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\003\t\001\002\nS\n\tBucketize\022\n\n\005input\"\001T\032\n\n\006output\030\003\"\023\n\001T\022\004type:\010\n\0062\004\003\t\001\002\"\031\n\nboundaries\022\013list(float)\n8\n\004Cast\022\t\n\001x\"\004SrcT\032\t\n\001y\"\004DstT\"\014\n\004SrcT\022\004type\"\014\n\004DstT\022\004type\n+\n\004Ceil\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\016\023\001\002\nn\n\013ClipByValue\022\006\n\001t\"\001T\022\023\n\016clip_value_min\"\001T\022\023\n\016clip_value_max\"\001T\032\013\n\006output\"\001T\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\nT\n\021CompareAndBitpack\022\n\n\005input\"\001T\022\016\n\tthreshold\"\001T\032\n\n\006output\030\004\"\027\n\001T\022\004type:\014\n\n2\010\n\023\001\002\006\005\003\t\n]\n\007Complex\022\t\n\004real\"\001T\022\t\n\004imag\"\001T\032\013\n\003out\"\004Tout\"\025\n\001T\022\004type\032\0020\001:\006\n\0042\002\001\002\"\030\n\004Tout\022\004type\032\0020\010:\006\n\0042\002\010\022\nP\n\nComplexAbs\022\006\n\001x\"\001T\032\t\n\001y\"\004Tout\"\025\n\001T\022\004type\032\0020\010:\006\n\0042\002\010\022\"\030\n\004Tout\022\004type\032\0020\001:\006\n\0042\002\001\002\n7\n\004Conj\022\n\n\005input\"\001T\032\013\n\006output\"\001T\"\026\n\001T\022\004type\032\0020\010:\007\n\0052\003\010\022\025\n,\n\003Cos\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\n-\n\004Cosh\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\nB\n\005Cross\022\006\n\001a\"\001T\022\006\n\001b\"\001T\032\014\n\007product\"\001T\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\n\221\001\n\007Cumprod\022\006\n\001x\"\001T\022\014\n\004axis\"\004Tidx\032\010\n\003out\"\001T\"\025\n\texclusive\022\004bool\032\002(\000\"\023\n\007reverse\022\004bool\032\002(\000\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\n\220\001\n\006Cumsum\022\006\n\001x\"\001T\022\014\n\004axis\"\004Tidx\032\010\n\003out\"\001T\"\025\n\texclusive\022\004bool\032\002(\000\"\023\n\007reverse\022\004bool\032\002(\000\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\n.\n\007Digamma\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\016\023\001\002\n:\n\003Div\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\006\n\001z\"\001T\"\033\n\001T\022\004type:\020\n\0162\014\016\023\001\002\004\006\021\005\003\t\010\022\nB\n\005Equal\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\005\n\001z\030\n\"\037\n\001T\022\004type:\024\n\0222\020\016\023\001\002\004\006\005\003\t\010\014\013\r\007\n\022\220\001\001\n*\n\003Erf\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\016\023\001\002\n+\n\004Erfc\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\016\023\001\002\n,\n\003Exp\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\n.\n\005Expm1\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\n,\n\005Floor\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\016\023\001\002\n?\n\010FloorDiv\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\006\n\001z\"\001T\"\033\n\001T\022\004type:\020\n\0162\014\016\023\001\002\004\006\021\005\003\t\010\022\n9\n\010FloorMod\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\006\n\001z\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\003\t\016\023\001\002\n=\n\007Greater\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\005\n\001z\030\n\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\nB\n\014GreaterEqual\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\005\n\001z\030\n\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\n}\n\023HistogramFixedWidth\022\013\n\006values\"\001T\022\020\n\013value_range\"\001T\022\t\n\005nbins\030\003\032\014\n\003out\"\005dtype\"\023\n\001T\022\004type:\010\n\0062\004\003\t\001\002\"\031\n\005dtype\022\004type\032\0020\003:\006\n\0042\002\003\t\n3\n\006Igamma\022\006\n\001a\"\001T\022\006\n\001x\"\001T\032\006\n\001z\"\001T\"\021\n\001T\022\004type:\006\n\0042\002\001\002\n8\n\013IgammaGradA\022\006\n\001a\"\001T\022\006\n\001x\"\001T\032\006\n\001z\"\001T\"\021\n\001T\022\004type:\006\n\0042\002\001\002\n4\n\007Igammac\022\006\n\001a\"\001T\022\006\n\001x\"\001T\032\006\n\001z\"\001T\"\021\n\001T\022\004type:\006\n\0042\002\001\002\nS\n\004Imag\022\n\n\005input\"\001T\032\016\n\006output\"\004Tout\"\025\n\001T\022\004type\032\0020\010:\006\n\0042\002\010\022\"\030\n\004Tout\022\004type\032\0020\001:\006\n\0042\002\001\002\n.\n\003Inv\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\027\n\001T\022\004type:\014\n\n2\010\016\023\001\002\003\t\010\022\n9\n\007InvGrad\022\006\n\001y\"\001T\022\007\n\002dy\"\001T\032\006\n\001z\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\n.\n\010IsFinite\022\006\n\001x\"\001T\032\005\n\001y\030\n\"\023\n\001T\022\004type:\010\n\0062\004\016\023\001\002\n+\n\005IsInf\022\006\n\001x\"\001T\032\005\n\001y\030\n\"\023\n\001T\022\004type:\010\n\0062\004\016\023\001\002\n+\n\005IsNan\022\006\n\001x\"\001T\032\005\n\001y\030\n\"\023\n\001T\022\004type:\010\n\0062\004\016\023\001\002\n:\n\004Less\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\005\n\001z\030\n\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\n?\n\tLessEqual\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\005\n\001z\030\n\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\n-\n\006Lgamma\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\016\023\001\002\ni\n\010LinSpace\022\n\n\005start\"\001T\022\t\n\004stop\"\001T\022\013\n\003num\"\004Tidx\032\013\n\006output\"\001T\"\022\n\001T\022\004type:\007\n\0052\003\016\001\002\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\n,\n\003Log\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\n.\n\005Log1p\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\n$\n\nLogicalAnd\022\005\n\001x\030\n\022\005\n\001y\030\n\032\005\n\001z\030\n\220\001\001\n\032\n\nLogicalNot\022\005\n\001x\030\n\032\005\n\001y\030\n\n#\n\tLogicalOr\022\005\n\001x\030\n\022\005\n\001y\030\n\032\005\n\001z\030\n\220\001\001\np\n\006MatMul\022\006\n\001a\"\001T\022\006\n\001b\"\001T\032\014\n\007product\"\001T\"\027\n\013transpose_a\022\004bool\032\002(\000\"\027\n\013transpose_b\022\004bool\032\002(\000\"\026\n\001T\022\004type:\013\n\t2\007\016\023\001\002\003\010\022\n\214\001\n\003Max\022\n\n\005input\"\001T\022\031\n\021reduction_indices\"\004Tidx\032\013\n\006output\"\001T\"\025\n\tkeep_dims\022\004bool\032\002(\000\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\n;\n\007Maximum\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\006\n\001z\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\003\t\220\001\001\n\215\001\n\004Mean\022\n\n\005input\"\001T\022\031\n\021reduction_indices\"\004Tidx\032\013\n\006output\"\001T\"\025\n\tkeep_dims\022\004bool\032\002(\000\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\n\214\001\n\003Min\022\n\n\005input\"\001T\022\031\n\021reduction_indices\"\004Tidx\032\013\n\006output\"\001T\"\025\n\tkeep_dims\022\004bool\032\002(\000\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\n;\n\007Minimum\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\006\n\001z\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\003\t\220\001\001\n5\n\003Mod\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\006\n\001z\"\001T\"\026\n\001T\022\004type:\013\n\t2\007\003\t\023\023\016\001\002\n=\n\003Mul\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\006\n\001z\"\001T\"\033\n\001T\022\004type:\020\n\0162\014\016\023\001\002\004\006\021\005\003\t\010\022\220\001\001\n.\n\003Neg\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\027\n\001T\022\004type:\014\n\n2\010\016\023\001\002\003\t\010\022\nE\n\010NotEqual\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\005\n\001z\030\n\"\037\n\001T\022\004type:\024\n\0222\020\016\023\001\002\004\006\005\003\t\010\014\013\r\007\n\022\220\001\001\n6\n\tPolygamma\022\006\n\001a\"\001T\022\006\n\001x\"\001T\032\006\n\001z\"\001T\"\021\n\001T\022\004type:\006\n\0042\002\001\002\n6\n\003Pow\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\006\n\001z\"\001T\"\027\n\001T\022\004type:\014\n\n2\010\016\001\023\002\003\t\010\022\n\215\001\n\004Prod\022\n\n\005input\"\001T\022\031\n\021reduction_indices\"\004Tidx\032\013\n\006output\"\001T\"\025\n\tkeep_dims\022\004bool\032\002(\000\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\n\267\001\n\032QuantizeDownAndShrinkRange\022\017\n\005input\"\006Tinput\022\r\n\tinput_min\030\001\022\r\n\tinput_max\030\001\032\022\n\006output\"\010out_type\032\016\n\noutput_min\030\001\032\016\n\noutput_max\030\001\"\031\n\006Tinput\022\004type:\t\n\0072\005\013\014\r\017\020\"\033\n\010out_type\022\004type:\t\n\0072\005\013\014\r\017\020\n\301\001\n\014QuantizedAdd\022\007\n\001x\"\002T1\022\007\n\001y\"\002T2\022\t\n\005min_x\030\001\022\t\n\005max_x\030\001\022\t\n\005min_y\030\001\022\t\n\005max_y\030\001\032\014\n\001z\"\007Toutput\032\t\n\005min_z\030\001\032\t\n\005max_z\030\001\"\025\n\002T1\022\004type:\t\n\0072\005\013\014\r\017\020\"\025\n\002T2\022\004type:\t\n\0072\005\013\014\r\017\020\"\036\n\007Toutput\022\004type\032\0020\r:\t\n\0072\005\013\014\r\017\020\220\001\001\n\235\002\n\017QuantizedMatMul\022\007\n\001a\"\002T1\022\007\n\001b\"\002T2\022\t\n\005min_a\030\001\022\t\n\005max_a\030\001\022\t\n\005min_b\030\001\022\t\n\005max_b\030\001\032\016\n\003out\"\007Toutput\032\013\n\007min_out\030\001\032\013\n\007max_out\030\001\"\025\n\002T1\022\004type:\t\n\0072\005\013\014\r\017\020\"\025\n\002T2\022\004type:\t\n\0072\005\013\014\r\017\020\"\036\n\007Toutput\022\004type\032\0020\r:\t\n\0072\005\013\014\r\017\020\"\027\n\013transpose_a\022\004bool\032\002(\000\"\027\n\013transpose_b\022\004bool\032\002(\000\"\"\n\013Tactivation\022\004type\032\0020\014:\t\n\0072\005\013\014\r\017\020\n\301\001\n\014QuantizedMul\022\007\n\001x\"\002T1\022\007\n\001y\"\002T2\022\t\n\005min_x\030\001\022\t\n\005max_x\030\001\022\t\n\005min_y\030\001\022\t\n\005max_y\030\001\032\014\n\001z\"\007Toutput\032\t\n\005min_z\030\001\032\t\n\005max_z\030\001\"\025\n\002T1\022\004type:\t\n\0072\005\013\014\r\017\020\"\025\n\002T2\022\004type:\t\n\0072\005\013\014\r\017\020\"\036\n\007Toutput\022\004type\032\0020\r:\t\n\0072\005\013\014\r\017\020\220\001\001\na\n\005Range\022\r\n\005start\"\004Tidx\022\r\n\005limit\"\004Tidx\022\r\n\005delta\"\004Tidx\032\016\n\006output\"\004Tidx\"\033\n\004Tidx\022\004type\032\0020\003:\t\n\0072\005\016\001\002\003\t\nS\n\004Real\022\n\n\005input\"\001T\032\016\n\006output\"\004Tout\"\025\n\001T\022\004type\032\0020\010:\006\n\0042\002\010\022\"\030\n\004Tout\022\004type\032\0020\001:\006\n\0042\002\001\002\n>\n\007RealDiv\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\006\n\001z\"\001T\"\033\n\001T\022\004type:\020\n\0162\014\016\023\001\002\004\006\021\005\003\t\010\022\n5\n\nReciprocal\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\027\n\001T\022\004type:\014\n\n2\010\016\023\001\002\003\t\010\022\n@\n\016ReciprocalGrad\022\006\n\001y\"\001T\022\007\n\002dy\"\001T\032\006\n\001z\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\n\177\n\023RequantizationRange\022\017\n\005input\"\006Tinput\022\r\n\tinput_min\030\001\022\r\n\tinput_max\030\001\032\016\n\noutput_min\030\001\032\016\n\noutput_max\030\001\"\031\n\006Tinput\022\004type:\t\n\0072\005\013\014\r\017\020\n\333\001\n\nRequantize\022\017\n\005input\"\006Tinput\022\r\n\tinput_min\030\001\022\r\n\tinput_max\030\001\022\030\n\024requested_output_min\030\001\022\030\n\024requested_output_max\030\001\032\022\n\006output\"\010out_type\032\016\n\noutput_min\030\001\032\016\n\noutput_max\030\001\"\031\n\006Tinput\022\004type:\t\n\0072\005\013\014\r\017\020\"\033\n\010out_type\022\004type:\t\n\0072\005\013\014\r\017\020\n+\n\004Rint\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\016\023\001\002\n0\n\005Round\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\027\n\001T\022\004type:\014\n\n2\010\016\023\001\002\003\t\010\022\n.\n\005Rsqrt\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\n;\n\tRsqrtGrad\022\006\n\001y\"\001T\022\007\n\002dy\"\001T\032\006\n\001z\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\nt\n\nSegmentMax\022\t\n\004data\"\001T\022\027\n\013segment_ids\"\010Tindices\032\013\n\006output\"\001T\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\"\030\n\010Tindices\022\004type:\006\n\0042\002\003\t\nz\n\013SegmentMean\022\t\n\004data\"\001T\022\027\n\013segment_ids\"\010Tindices\032\013\n\006output\"\001T\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\030\n\010Tindices\022\004type:\006\n\0042\002\003\t\nt\n\nSegmentMin\022\t\n\004data\"\001T\022\027\n\013segment_ids\"\010Tindices\032\013\n\006output\"\001T\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\"\030\n\010Tindices\022\004type:\006\n\0042\002\003\t\nz\n\013SegmentProd\022\t\n\004data\"\001T\022\027\n\013segment_ids\"\010Tindices\032\013\n\006output\"\001T\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\030\n\010Tindices\022\004type:\006\n\0042\002\003\t\ny\n\nSegmentSum\022\t\n\004data\"\001T\022\027\n\013segment_ids\"\010Tindices\032\013\n\006output\"\001T\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\030\n\010Tindices\022\004type:\006\n\0042\002\003\t\n?\n\006Select\022\r\n\tcondition\030\n\022\006\n\001t\"\001T\022\006\n\001e\"\001T\032\013\n\006output\"\001T\"\t\n\001T\022\004type\n0\n\007Sigmoid\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\n=\n\013SigmoidGrad\022\006\n\001y\"\001T\022\007\n\002dy\"\001T\032\006\n\001z\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\n/\n\004Sign\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\027\n\001T\022\004type:\014\n\n2\010\016\023\001\002\003\t\010\022\n,\n\003Sin\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\n-\n\004Sinh\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\n\301\001\n\014SparseMatMul\022\007\n\001a\"\002Ta\022\007\n\001b\"\002Tb\032\013\n\007product\030\001\"\027\n\013transpose_a\022\004bool\032\002(\000\"\027\n\013transpose_b\022\004bool\032\002(\000\"\027\n\013a_is_sparse\022\004bool\032\002(\000\"\027\n\013b_is_sparse\022\004bool\032\002(\000\"\026\n\002Ta\022\004type\032\0020\001:\006\n\0042\002\001\016\"\026\n\002Tb\022\004type\032\0020\001:\006\n\0042\002\001\016\nz\n\021SparseSegmentMean\022\t\n\004data\"\001T\022\017\n\007indices\"\004Tidx\022\017\n\013segment_ids\030\003\032\013\n\006output\"\001T\"\021\n\001T\022\004type:\006\n\0042\002\001\002\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\n\217\001\n\025SparseSegmentMeanGrad\022\t\n\004grad\"\001T\022\017\n\007indices\"\004Tidx\022\017\n\013segment_ids\030\003\022\017\n\013output_dim0\030\003\032\013\n\006output\"\001T\"\021\n\001T\022\004type:\006\n\0042\002\001\002\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\n\311\001\n SparseSegmentMeanWithNumSegments\022\t\n\004data\"\001T\022\017\n\007indices\"\004Tidx\022\017\n\013segment_ids\030\003\022\034\n\014num_segments\"\014Tnumsegments\032\013\n\006output\"\001T\"\021\n\001T\022\004type:\006\n\0042\002\001\002\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\" \n\014Tnumsegments\022\004type\032\0020\003:\006\n\0042\002\003\t\n{\n\022SparseSegmentSqrtN\022\t\n\004data\"\001T\022\017\n\007indices\"\004Tidx\022\017\n\013segment_ids\030\003\032\013\n\006output\"\001T\"\021\n\001T\022\004type:\006\n\0042\002\001\002\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\n\220\001\n\026SparseSegmentSqrtNGrad\022\t\n\004grad\"\001T\022\017\n\007indices\"\004Tidx\022\017\n\013segment_ids\030\003\022\017\n\013output_dim0\030\003\032\013\n\006output\"\001T\"\021\n\001T\022\004type:\006\n\0042\002\001\002\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\n\312\001\n!SparseSegmentSqrtNWithNumSegments\022\t\n\004data\"\001T\022\017\n\007indices\"\004Tidx\022\017\n\013segment_ids\030\003\022\034\n\014num_segments\"\014Tnumsegments\032\013\n\006output\"\001T\"\021\n\001T\022\004type:\006\n\0042\002\001\002\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\" \n\014Tnumsegments\022\004type\032\0020\003:\006\n\0042\002\003\t\n\203\001\n\020SparseSegmentSum\022\t\n\004data\"\001T\022\017\n\007indices\"\004Tidx\022\017\n\013segment_ids\030\003\032\013\n\006output\"\001T\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\n\322\001\n\037SparseSegmentSumWithNumSegments\022\t\n\004data\"\001T\022\017\n\007indices\"\004Tidx\022\017\n\013segment_ids\030\003\022\034\n\014num_segments\"\014Tnumsegments\032\013\n\006output\"\001T\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\" \n\014Tnumsegments\022\004type\032\0020\003:\006\n\0042\002\003\t\n-\n\004Sqrt\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\n:\n\010SqrtGrad\022\006\n\001y\"\001T\022\007\n\002dy\"\001T\032\006\n\001z\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\n1\n\006Square\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\027\n\001T\022\004type:\014\n\n2\010\016\023\001\002\003\t\010\022\nG\n\021SquaredDifference\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\006\n\001z\"\001T\"\027\n\001T\022\004type:\014\n\n2\010\016\023\001\002\003\t\010\022\220\001\001\n:\n\003Sub\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\006\n\001z\"\001T\"\033\n\001T\022\004type:\020\n\0162\014\016\023\001\002\004\006\021\005\003\t\010\022\n\214\001\n\003Sum\022\n\n\005input\"\001T\022\031\n\021reduction_indices\"\004Tidx\032\013\n\006output\"\001T\"\025\n\tkeep_dims\022\004bool\032\002(\000\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\n.\n\003Tan\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\027\n\001T\022\004type:\014\n\n2\010\016\023\001\002\003\t\010\022\n-\n\004Tanh\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\n:\n\010TanhGrad\022\006\n\001y\"\001T\022\007\n\002dy\"\001T\032\006\n\001z\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\nB\n\013TruncateDiv\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\006\n\001z\"\001T\"\033\n\001T\022\004type:\020\n\0162\014\016\023\001\002\004\006\021\005\003\t\010\022\n<\n\013TruncateMod\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\006\n\001z\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\003\t\016\023\001\002\n\274\001\n\022UnsortedSegmentMax\022\t\n\004data\"\001T\022\027\n\013segment_ids\"\010Tindices\022\034\n\014num_segments\"\014Tnumsegments\032\013\n\006output\"\001T\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\"\030\n\010Tindices\022\004type:\006\n\0042\002\003\t\" \n\014Tnumsegments\022\004type\032\0020\003:\006\n\0042\002\003\t\n\274\001\n\022UnsortedSegmentMin\022\t\n\004data\"\001T\022\027\n\013segment_ids\"\010Tindices\022\034\n\014num_segments\"\014Tnumsegments\032\013\n\006output\"\001T\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\"\030\n\010Tindices\022\004type:\006\n\0042\002\003\t\" \n\014Tnumsegments\022\004type\032\0020\003:\006\n\0042\002\003\t\n\302\001\n\023UnsortedSegmentProd\022\t\n\004data\"\001T\022\027\n\013segment_ids\"\010Tindices\022\034\n\014num_segments\"\014Tnumsegments\032\013\n\006output\"\001T\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\030\n\010Tindices\022\004type:\006\n\0042\002\003\t\" \n\014Tnumsegments\022\004type\032\0020\003:\006\n\0042\002\003\t\n\301\001\n\022UnsortedSegmentSum\022\t\n\004data\"\001T\022\027\n\013segment_ids\"\010Tindices\022\034\n\014num_segments\"\014Tnumsegments\032\013\n\006output\"\001T\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\030\n\010Tindices\022\004type:\006\n\0042\002\003\t\" \n\014Tnumsegments\022\004type\032\0020\003:\006\n\0042\002\003\t\n1\n\004Zeta\022\006\n\001x\"\001T\022\006\n\001q\"\001T\032\006\n\001z\"\001T\"\021\n\001T\022\004type:\006\n\0042\002\001\002")
| 31.749766 | 26,169 | 0.632458 |
53090ea45ea4a45cdc2f0069622a80742e35321e | 5,215 | py | Python | indico/modules/oauth/models/applications.py | yamiacat/indico | 754c02cd7cd25bf1eab0ca5f497eb24b135dd51c | [
"MIT"
] | null | null | null | indico/modules/oauth/models/applications.py | yamiacat/indico | 754c02cd7cd25bf1eab0ca5f497eb24b135dd51c | [
"MIT"
] | null | null | null | indico/modules/oauth/models/applications.py | yamiacat/indico | 754c02cd7cd25bf1eab0ca5f497eb24b135dd51c | [
"MIT"
] | null | null | null | # This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from uuid import uuid4
from sqlalchemy.dialects.postgresql import ARRAY, UUID
from sqlalchemy.ext.declarative import declared_attr
from werkzeug.urls import url_parse
from indico.core.db import db
from indico.core.db.sqlalchemy import PyIntEnum
from indico.modules.oauth import logger
from indico.util.i18n import _
from indico.util.struct.enum import IndicoEnum
SCOPES = {'read:user': _("User information (read only)"),
'read:legacy_api': _('Legacy API (read only)'),
'write:legacy_api': _('Legacy API (write only)'),
'registrants': _('Event registrants')}
def reset_client_secret(self):
self.client_secret = str(uuid4())
logger.info("Client secret for %s has been reset.", self)
def validate_redirect_uri(self, redirect_uri):
"""Called by flask-oauthlib to validate the redirect_uri.
Uses a logic similar to the one at GitHub, i.e. protocol and
host/port must match exactly and if there is a path in the
whitelisted URL, the path of the redirect_uri must start with
that path.
"""
uri_data = url_parse(redirect_uri)
for valid_uri_data in map(url_parse, self.redirect_uris):
if (uri_data.scheme == valid_uri_data.scheme and uri_data.netloc == valid_uri_data.netloc and
uri_data.path.startswith(valid_uri_data.path)):
return True
return False
| 31.227545 | 117 | 0.623011 |
530c6ba5f7b617f99321342102c64a175ed1a651 | 6,257 | py | Python | PaddleNLP/unarchived/deep_attention_matching_net/utils/layers.py | FrancisLiang/models-1 | e14d5bc1ab36d0dd11977f27cff54605bf99c945 | [
"Apache-2.0"
] | 3 | 2019-09-05T14:03:42.000Z | 2019-09-09T10:34:35.000Z | PaddleNLP/unarchived/deep_attention_matching_net/utils/layers.py | FrancisLiang/models-1 | e14d5bc1ab36d0dd11977f27cff54605bf99c945 | [
"Apache-2.0"
] | 2 | 2019-06-26T03:21:49.000Z | 2019-09-19T09:43:42.000Z | PaddleNLP/unarchived/deep_attention_matching_net/utils/layers.py | FrancisLiang/models-1 | e14d5bc1ab36d0dd11977f27cff54605bf99c945 | [
"Apache-2.0"
] | 2 | 2018-06-14T13:59:36.000Z | 2018-11-14T12:34:47.000Z | import paddle.fluid as fluid
def loss(x, y, clip_value=10.0):
"""Calculate the sigmoid cross entropy with logits for input(x).
Args:
x: Variable with shape with shape [batch, dim]
y: Input label
Returns:
loss: cross entropy
logits: prediction
"""
logits = fluid.layers.fc(
input=x,
size=1,
bias_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant(0.)))
loss = fluid.layers.sigmoid_cross_entropy_with_logits(x=logits, label=y)
loss = fluid.layers.reduce_mean(
fluid.layers.clip(
loss, min=-clip_value, max=clip_value))
return loss, logits
def ffn(input, d_inner_hid, d_hid, name=None):
"""Position-wise Feed-Forward Network
"""
hidden = fluid.layers.fc(input=input,
size=d_inner_hid,
num_flatten_dims=2,
param_attr=fluid.ParamAttr(name=name + '_fc.w_0'),
bias_attr=fluid.ParamAttr(
name=name + '_fc.b_0',
initializer=fluid.initializer.Constant(0.)),
act="relu")
out = fluid.layers.fc(input=hidden,
size=d_hid,
num_flatten_dims=2,
param_attr=fluid.ParamAttr(name=name + '_fc.w_1'),
bias_attr=fluid.ParamAttr(
name=name + '_fc.b_1',
initializer=fluid.initializer.Constant(0.)))
return out
def dot_product_attention(query,
key,
value,
d_key,
q_mask=None,
k_mask=None,
dropout_rate=None,
mask_cache=None):
"""Dot product layer.
Args:
query: a tensor with shape [batch, Q_time, Q_dimension]
key: a tensor with shape [batch, time, K_dimension]
value: a tensor with shape [batch, time, V_dimension]
q_lengths: a tensor with shape [batch]
k_lengths: a tensor with shape [batch]
Returns:
a tensor with shape [batch, query_time, value_dimension]
Raises:
AssertionError: if Q_dimension not equal to K_dimension when attention
type is dot.
"""
logits = fluid.layers.matmul(
x=query, y=key, transpose_y=True, alpha=d_key**(-0.5))
if (q_mask is not None) and (k_mask is not None):
if mask_cache is not None and q_mask.name in mask_cache and k_mask.name in mask_cache[
q_mask.name]:
mask, another_mask = mask_cache[q_mask.name][k_mask.name]
else:
mask = fluid.layers.matmul(x=q_mask, y=k_mask, transpose_y=True)
another_mask = fluid.layers.scale(
mask,
scale=float(2**32 - 1),
bias=float(-1),
bias_after_scale=False)
if mask_cache is not None:
if q_mask.name not in mask_cache:
mask_cache[q_mask.name] = dict()
mask_cache[q_mask.name][k_mask.name] = [mask, another_mask]
logits = mask * logits + another_mask
attention = fluid.layers.softmax(logits)
if dropout_rate:
attention = fluid.layers.dropout(
input=attention, dropout_prob=dropout_rate, is_test=False, seed=2)
atten_out = fluid.layers.matmul(x=attention, y=value)
return atten_out
def block(name,
query,
key,
value,
d_key,
q_mask=None,
k_mask=None,
is_layer_norm=True,
dropout_rate=None,
mask_cache=None):
"""
"""
att_out = dot_product_attention(
query,
key,
value,
d_key,
q_mask,
k_mask,
dropout_rate,
mask_cache=mask_cache)
y = query + att_out
if is_layer_norm:
y = fluid.layers.layer_norm(
input=y,
begin_norm_axis=len(y.shape) - 1,
param_attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(1.),
name=name + '_layer_norm.w_0'),
bias_attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(0.),
name=name + '_layer_norm.b_0'))
z = ffn(y, d_key, d_key, name)
w = y + z
if is_layer_norm:
w = fluid.layers.layer_norm(
input=w,
begin_norm_axis=len(w.shape) - 1,
param_attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(1.),
name=name + '_layer_norm.w_1'),
bias_attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(0.),
name=name + '_layer_norm.b_1'))
return w
| 31.129353 | 94 | 0.546588 |
530c6de530c859b58a3a007a91c54314cf276d8d | 6,896 | py | Python | plugin.video.saltsrd.lite/js2py/translators/jsregexps.py | TheWardoctor/wardoctors-repo | 893f646d9e27251ffc00ca5f918e4eb859a5c8f0 | [
"Apache-2.0"
] | 1 | 2019-03-05T09:38:10.000Z | 2019-03-05T09:38:10.000Z | plugin.video.saltsrd.lite/js2py/translators/jsregexps.py | TheWardoctor/wardoctors-repo | 893f646d9e27251ffc00ca5f918e4eb859a5c8f0 | [
"Apache-2.0"
] | null | null | null | plugin.video.saltsrd.lite/js2py/translators/jsregexps.py | TheWardoctor/wardoctors-repo | 893f646d9e27251ffc00ca5f918e4eb859a5c8f0 | [
"Apache-2.0"
] | 1 | 2021-11-05T20:48:09.000Z | 2021-11-05T20:48:09.000Z | from salts_lib.pyjsparser.pyjsparserdata import *
REGEXP_SPECIAL_SINGLE = {'\\', '^', '$', '*', '+', '?', '.'}
NOT_PATTERN_CHARS = {'^', '$', '\\', '.', '*', '+', '?', '(', ')', '[', ']', '|'} # what about '{', '}', ???
CHAR_CLASS_ESCAPE = {'d', 'D', 's', 'S', 'w', 'W'}
CONTROL_ESCAPE_CHARS = {'f', 'n', 'r', 't', 'v'}
CONTROL_LETTERS = {'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't',
'u', 'v', 'w', 'x', 'y', 'z', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N',
'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z'}
a = JsRegExpParser('a(?=x)', '')
print(a.parsePattern()) | 31.488584 | 118 | 0.484049 |
530d6b117d510322f8ed51ead9a1be94d7c692f1 | 2,388 | py | Python | connectomics/model/block/squeeze_excitation.py | yixinliao/pytorch_connectomics | 0f6de546e6da1e0f3258b2c84f7e16b3a993c70c | [
"MIT"
] | 1 | 2020-05-17T08:01:56.000Z | 2020-05-17T08:01:56.000Z | connectomics/model/block/squeeze_excitation.py | yixinliao/pytorch_connectomics | 0f6de546e6da1e0f3258b2c84f7e16b3a993c70c | [
"MIT"
] | null | null | null | connectomics/model/block/squeeze_excitation.py | yixinliao/pytorch_connectomics | 0f6de546e6da1e0f3258b2c84f7e16b3a993c70c | [
"MIT"
] | 3 | 2020-03-31T21:40:12.000Z | 2021-06-09T02:26:43.000Z | import torch.nn as nn
from .basic import *
| 46.823529 | 156 | 0.68258 |
530dbebdee877862aa38a08b696073a86196141c | 4,282 | py | Python | duckdown/handlers/site_handler.py | blueshed/duckdown | e6d0e62d378bd2d9ed0cd5ce4bc7ab3476b86020 | [
"MIT"
] | null | null | null | duckdown/handlers/site_handler.py | blueshed/duckdown | e6d0e62d378bd2d9ed0cd5ce4bc7ab3476b86020 | [
"MIT"
] | null | null | null | duckdown/handlers/site_handler.py | blueshed/duckdown | e6d0e62d378bd2d9ed0cd5ce4bc7ab3476b86020 | [
"MIT"
] | null | null | null | # pylint: disable=W0201, E1101
""" handle request for markdown pages """
import logging
import os
import importlib
from tornado.web import RequestHandler, HTTPError
from tornado.escape import url_escape
from ..utils.converter_mixin import ConverterMixin
from .access_control import UserMixin
from ..utils.nav import nav
LOGGER = logging.getLogger(__name__)
EMPTY_TOC = '<div class="toc">\n<ul></ul>\n</div>\n'
| 33.193798 | 70 | 0.578001 |
530e6c72942083800f06be0b1704afe86e8b9dd0 | 627 | py | Python | Problemset/binary-search-tree-to-greater-sum-tree/binary-search-tree-to-greater-sum-tree.py | KivenCkl/LeetCode | fcc97c66f8154a5d20c2aca86120cb37b9d2d83d | [
"MIT"
] | 7 | 2019-05-08T03:41:05.000Z | 2020-12-22T12:39:43.000Z | Problemset/binary-search-tree-to-greater-sum-tree/binary-search-tree-to-greater-sum-tree.py | Yuziquan/LeetCode | 303fc1c8af847f783c4020bd731b28b72ed92a35 | [
"MIT"
] | 1 | 2021-07-19T03:48:35.000Z | 2021-07-19T03:48:35.000Z | Problemset/binary-search-tree-to-greater-sum-tree/binary-search-tree-to-greater-sum-tree.py | Yuziquan/LeetCode | 303fc1c8af847f783c4020bd731b28b72ed92a35 | [
"MIT"
] | 7 | 2019-05-10T20:43:20.000Z | 2021-02-22T03:47:35.000Z |
# @Title: (Binary Search Tree to Greater Sum Tree)
# @Author: KivenC
# @Date: 2019-05-15 19:52:08
# @Runtime: 48 ms
# @Memory: 13 MB
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
| 24.115385 | 62 | 0.593301 |
530ebd58aea0c6b33d05245813f2f54d1c4a046b | 2,058 | py | Python | vine/clone.py | robinson96/GRAPE | f6404ae6ee2933647e515a9480077ab01fb2c430 | [
"BSD-3-Clause"
] | 4 | 2017-04-30T17:08:42.000Z | 2019-11-15T04:44:09.000Z | vine/clone.py | robinson96/GRAPE | f6404ae6ee2933647e515a9480077ab01fb2c430 | [
"BSD-3-Clause"
] | 1 | 2016-02-12T07:51:30.000Z | 2016-02-12T07:51:30.000Z | vine/clone.py | robinson96/GRAPE | f6404ae6ee2933647e515a9480077ab01fb2c430 | [
"BSD-3-Clause"
] | null | null | null | import os
import option
import utility
import grapeMenu
import grapeGit as git
import grapeConfig
| 30.716418 | 120 | 0.59378 |
530f17168f0cbb129e06d1280fd5322946f49710 | 45,589 | py | Python | neo/test/iotest/test_nixio.py | pearsonlab/python-neo | 8915dfe9e55fd3a36be83d820bdd83ab085e9402 | [
"BSD-3-Clause"
] | null | null | null | neo/test/iotest/test_nixio.py | pearsonlab/python-neo | 8915dfe9e55fd3a36be83d820bdd83ab085e9402 | [
"BSD-3-Clause"
] | null | null | null | neo/test/iotest/test_nixio.py | pearsonlab/python-neo | 8915dfe9e55fd3a36be83d820bdd83ab085e9402 | [
"BSD-3-Clause"
] | 1 | 2018-04-13T04:48:48.000Z | 2018-04-13T04:48:48.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2016, German Neuroinformatics Node (G-Node)
# Achilleas Koutsou <achilleas.k@gmail.com>
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted under the terms of the BSD License. See
# LICENSE file in the root of the Project.
"""
Tests for neo.io.nixio
"""
import os
from datetime import datetime
try:
import unittest2 as unittest
except ImportError:
import unittest
try:
from unittest import mock
except ImportError:
import mock
import string
import itertools
from six import string_types
import numpy as np
import quantities as pq
from neo.core import (Block, Segment, ChannelIndex, AnalogSignal,
IrregularlySampledSignal, Unit, SpikeTrain, Event, Epoch)
from neo.test.iotest.common_io_test import BaseTestIO
try:
import nixio
HAVE_NIX = True
except ImportError:
HAVE_NIX = False
from neo.io.nixio import NixIO
from neo.io.nixio import nixtypes
| 39.132189 | 81 | 0.564873 |
530f7b706908016c5f54e7ff0367363c422ad2e4 | 5,686 | py | Python | lib/taudataNlpTm.py | taudata-indonesia/elearning | 6f9db8b829357cde1ae678255cc251629dfc25d2 | [
"Apache-2.0"
] | 3 | 2020-08-29T04:54:25.000Z | 2021-12-12T08:25:48.000Z | lib/taudataNlpTm.py | taudataid/eLearning | 6f9db8b829357cde1ae678255cc251629dfc25d2 | [
"Apache-2.0"
] | null | null | null | lib/taudataNlpTm.py | taudataid/eLearning | 6f9db8b829357cde1ae678255cc251629dfc25d2 | [
"Apache-2.0"
] | 6 | 2020-07-28T23:46:57.000Z | 2021-09-27T02:22:01.000Z | # -*- coding: utf-8 -*-
"""
Created on Wed Mar 28 11:25:43 2019
@author: Taufik Sutanto
taufik@tau-data.id
https://tau-data.id
~~Perjanjian Penggunaan Materi & Codes (PPMC) - License:~~
* Modul Python dan gambar-gambar (images) yang digunakan adalah milik dari berbagai sumber sebagaimana yang telah dicantumkan dalam masing-masing license modul, caption atau watermark.
* Materi & Codes diluar point (1) (i.e. code ini & semua slide ".ipynb)) yang digunakan di tau-data dapat digunakan untuk keperluan akademis dan kegiatan non-komersil lainnya.
* Untuk keperluan diluar point (2), maka dibutuhkan izin tertulis dari Taufik Edy Sutanto (selanjutnya disebut sebagai pengarang).
* Materi & Codes tidak boleh dipublikasikan tanpa izin dari pengarang.
* Materi & codes diberikan "as-is", tanpa warranty. Pengarang tidak bertanggung jawab atas penggunaannya diluar kegiatan resmi yang dilaksanakan pengarang.
* Dengan menggunakan materi dan codes ini berarti pengguna telah menyetujui PPMC ini.
"""
import re, numpy as np
import itertools, nltk
from collections import Counter
from nltk.corpus import wordnet as wn
from nltk.stem import PorterStemmer;ps = PorterStemmer()
from itertools import chain
import warnings; warnings.simplefilter('ignore')
corpus = 'data/corpus_sederhana.txt'
WORDS = Counter(words(open(corpus).read()))
def P(word):
"Probability of `word`."
N=sum(WORDS.values())
return WORDS[word] / N
def correction(word):
"Most probable spelling correction for word."
return max(candidates(word), key=P)
def candidates(word):
"Generate possible spelling corrections for word."
return (known([word]) or known(edits1(word)) or known(edits2(word)) or [word])
def known(words):
"The subset of `words` that appear in the dictionary of WORDS."
return set(w for w in words if w in WORDS)
def edits1(word):
"All edits that are one edit away from `word`."
letters = 'abcdefghijklmnopqrstuvwxyz'
splits = [(word[:i], word[i:]) for i in range(len(word) + 1)]
deletes = [L + R[1:] for L, R in splits if R]
transposes = [L + R[1] + R[0] + R[2:] for L, R in splits if len(R)>1]
replaces = [L + c + R[1:] for L, R in splits if R for c in letters]
inserts = [L + c + R for L, R in splits for c in letters]
return set(deletes + transposes + replaces + inserts)
def edits2(word):
"All edits that are two edits away from `word`."
return (e2 for e1 in edits1(word) for e2 in edits1(e1))
def lDistance(firstString, secondString):
"Function to find the Levenshtein distance between two words/sentences - gotten from http://rosettacode.org/wiki/Levenshtein_distance#Python"
if len(firstString) > len(secondString):
firstString, secondString = secondString, firstString
distances = range(len(firstString) + 1)
for index2, char2 in enumerate(secondString):
newDistances = [index2 + 1]
for index1, char1 in enumerate(firstString):
if char1 == char2:
newDistances.append(distances[index1])
else:
newDistances.append(1 + min((distances[index1], distances[index1+1], newDistances[-1])))
distances = newDistances
return distances[-1] | 44.077519 | 185 | 0.657052 |
530fcd223daa28d413c36696ab6a481ec8c169e5 | 5,205 | py | Python | reamber/base/MapSet.py | Eve-ning/reamber_base_py | 6d19c84f2c110b60e633b82b73e0516396466f56 | [
"MIT"
] | 10 | 2020-06-28T11:16:36.000Z | 2021-08-09T21:41:43.000Z | reamber/base/MapSet.py | Eve-ning/reamberPy | 6d19c84f2c110b60e633b82b73e0516396466f56 | [
"MIT"
] | 35 | 2020-06-18T13:05:50.000Z | 2022-02-18T10:13:35.000Z | reamber/base/MapSet.py | Eve-ning/reamber_base_py | 6d19c84f2c110b60e633b82b73e0516396466f56 | [
"MIT"
] | 2 | 2021-05-26T17:05:06.000Z | 2021-06-12T18:42:13.000Z | from __future__ import annotations
from copy import deepcopy
from dataclasses import dataclass, field
from typing import List, Iterator, TypeVar, Union, Any, Generic
import pandas as pd
from pandas.core.indexing import _LocIndexer
from reamber.base.Map import Map
from reamber.base.Property import stack_props
NoteListT = TypeVar('NoteListT')
HitListT = TypeVar('HitListT')
HoldListT = TypeVar('HoldListT')
BpmListT = TypeVar('BpmListT')
MapT = TypeVar('MapT')
| 34.019608 | 102 | 0.615562 |
53104f17c4720a21e638155abf65cadc6cce2788 | 24,765 | py | Python | src/poretitioner/utils/filtering.py | uwmisl/poretitioner | 0ff9f67a3b25fdcb460b11c970b2ed366da07da7 | [
"MIT"
] | 2 | 2021-03-11T21:27:16.000Z | 2021-03-18T00:58:22.000Z | src/poretitioner/utils/filtering.py | uwmisl/poretitioner | 0ff9f67a3b25fdcb460b11c970b2ed366da07da7 | [
"MIT"
] | 12 | 2021-02-19T19:36:05.000Z | 2021-03-24T15:38:02.000Z | src/poretitioner/utils/filtering.py | uwmisl/poretitioner | 0ff9f67a3b25fdcb460b11c970b2ed366da07da7 | [
"MIT"
] | null | null | null | """
=========
filtering.py
=========
This module provides more granular filtering for captures.
You can customize your own filters too.
"""
from __future__ import annotations
import re
from abc import ABC, ABCMeta, abstractmethod
from dataclasses import dataclass
from json import JSONEncoder
from pathlib import PosixPath
from typing import (
Any,
Dict,
Iterable,
Mapping,
NewType,
Optional,
Protocol,
Type,
TypedDict,
Union,
)
import h5py
import numpy as np
from h5py import File as Fast5File
from ..hdf5 import (
HasFast5,
HDF5_Group,
HDF5_GroupSerialableDataclass,
HDF5_GroupSerializable,
HDF5_GroupSerializing,
IsAttr,
)
from ..logger import Logger, getLogger
from ..signals import Capture
from .core import NumpyArrayLike, PathLikeOrString, ReadId, stripped_by_keys
from .plugin import Plugin
CaptureOrTimeSeries = Union[Capture, NumpyArrayLike]
# Unique identifier for a collection of filters (e.g. "ProfJeffsAwesomeFilters")
FilterSetId = NewType("FilterSetId", str)
# Unique identifier for an individual filter (e.g. "min_frac")
FilterName = NewType("FilterName", str)
__all__ = [
"does_pass_filters",
"get_filters",
"FilterName",
"FilterSetId",
"FilterConfig",
"Filter",
"Filters",
"DEFAULT_FILTER_PLUGINS",
"FilterSet",
"FilterConfigs",
"FilterPlugin",
"PATH",
]
# Mapping of a FilterName to filter configurations.
FilterConfigs = NewType("FilterConfigs", Dict[FilterName, FilterConfig])
# TODO: Filter Plugin should check that name is unique. https://github.com/uwmisl/poretitioner/issues/91
RANGE_FILTER_DEFAULT_MINIMUM: float = -np.inf
RANGE_FILTER_DEFAULT_MAXIMUM: float = np.inf
"""
How to Create Your Own Custom Filter:
Need more advanced filtering than what we provide out of the box? No problem.
Create your own custom filter by inheriting from the FilterPlugin class.
For this example, let's do something complex. Say you only want to examine captures
that have more than 5 samples with a hyperbolic tangent greater than some threshold.
That means our custom filter's `apply` function should return True if and only if
the signal has more than 5 samples greater than the threshold, after taking the hyperbolic tangent in `extract`.
"""
def apply_feature_filters(capture: CaptureOrTimeSeries, filters: List[FilterPlugin]) -> bool:
"""
Check whether an array of current values (i.e. a single nanopore capture)
passes a set of filters. Filters can be based on summary statistics
(e.g., mean) and/or a range of allowed values.
Notes on filter behavior: If the filters list is empty, there are no filters
and the capture passes.
Parameters
----------
capture : CaptureOrTimeSeries | NumpyArrayLike
Capture containing time series of nanopore current values for a single capture, or the signal itself.
filters : List[FilterPlugin]
List of FilterPlugin instances. Write your own filter by subclassing FilterPlugin.
Returns
-------
boolean
True if capture passes all filters; False otherwise.
"""
if filters is None:
filters = []
# TODO: Parallelize? https://github.com/uwmisl/poretitioner/issues/67
filtered = [filter_out(capture) for filter_out in filters]
print(filtered)
# Did this signal pass all filters?
all_passed = all(filtered)
return all_passed
def check_capture_ejection_by_read(f5, read_id):
"""Checks whether the current capture was in the pore until the voltage
was reversed.
Parameters
----------
f5 : h5py.File object (open for reading or more)
Capture fast5 file
read_id : TODO
Returns
-------
boolean
True if the end of the capture coincides with the end of a voltage window.
"""
try:
ejected = f5.get(f"/read_{read_id}/Signal").attrs["ejected"]
except AttributeError:
raise ValueError(f"path /read_{read_id} does not exist in the fast5 file.")
return ejected
def check_capture_ejection(end_capture, voltage_ends, tol_obs=20):
"""Checks whether the current capture was in the pore until the voltage
was reversed.
Essentially checks whether a value (end_capture) is close enough (within
a margin of tol_obs) to any value in voltage_ends.
Parameters
----------
end_capture : numeric
The end time of the capture.
voltage_ends : list of numeric
List of times when the standard voltage ends.
tol_obs : int, optional
Tolerance for defining when the end of the capture = voltage end, by default 20
Returns
-------
boolean
True if the end of the capture coincides with the end of a voltage window.
"""
for voltage_end in voltage_ends:
if np.abs(end_capture - voltage_end) < tol_obs:
return True
return False
__DEFAULT_FILTER_PLUGINS = [
MeanFilter,
StandardDeviationFilter,
MedianFilter,
MinimumFilter,
MaximumFilter,
LengthFilter,
]
DEFAULT_FILTER_PLUGINS = {
filter_plugin_class.name(): filter_plugin_class
for filter_plugin_class in __DEFAULT_FILTER_PLUGINS
}
import json
# class Filters(HDF5_GroupSerialableDataclass):
# filters:
Filters = Dict[FilterName, Filter]
def get_filters(filter_configs: Optional[FilterConfigs] = None) -> Filters:
"""Creates Filters from a list of filter configurations.
Parameters
----------
filter_configs : Optional[FilterConfigs]
A mapping of filter names to their configurations, None by default (i.e. no filtering).
Returns
-------
Filters
A set of callable/applyable filters.
"""
filter_configs = filter_configs if filter_configs is not None else FilterConfigs({})
my_filters = {
name: filter_from_config(name, filter_config)
for name, filter_config in filter_configs.items()
}
return my_filters
def does_pass_filters(capture: CaptureOrTimeSeries, filters: Iterable[Filter]) -> bool:
"""
Check whether an array of values (e.g. a single nanopore capture)
passes a set of filters. Filters can be based on summary statistics
(e.g., mean) and/or a range of allowed values.
Parameters
----------
capture : CaptureOrTimeSeries | NumpyArrayLike
Capture containing time series of nanopore current values for a single capture, or the signal itself.
filters : Iterable[Filter]
The set of filters to apply. Write your own filter by subclassing FilterPlugin.
Returns
-------
boolean
True if capture passes all filters; False otherwise.
"""
all_passed = True
for some_filter in filters:
if not some_filter(capture):
return False
return all_passed
class HDF5_FilterSet(FilterSet, HDF5_GroupSerialableDataclass):
############################
#
# HDF5_GroupSerializable
#
############################
# @classmethod
# def from_group(
# cls, group: HDF5_Group, log: Optional[Logger] = None
# ) -> HDF5_GroupSerializable:
# raise NotImplementedError(
# f"from_group not implemented for {cls.__name__}. Make sure you write a method that returns a serialzied version of this object."
# )
def filter_from_config(name: str, config: FilterConfig, log: Logger = getLogger()) -> Filter:
"""Creates a Filter from a config spefication. If no "filename" is present in the FilterConfig, it's
assumed to be one of the default filtesr
Parameters
----------
name : str
The unique name of a filter.
config : FilterConfig
Filter configuration to build the plugin.
log : Logger, optional
Logger to use for information/warnings/debug, by default getLogger()
Returns
-------
Filter
A filter that can be applied to some data.
Raises
------
AttributeError
A filter plugin could not be built from the configuration description. If this error is raised, be sure to check
1) A plugin class with the name in the configuration is defined at the filepath described in the configuration
2) The plugin class inherits from the `FilterPlugin` abstract base class.
"""
filepath = config.get("filepath", None)
# TODO: For non-default FilterPlugins, load/unpickle the class from the filepath. https://github.com/uwmisl/poretitioner/issues/91
plugin = None
if name in DEFAULT_FILTER_PLUGINS:
plugin = DEFAULT_FILTER_PLUGINS[name]()
else:
# TODO: For non-default FilterPlugins, load the class from the filepath. https://github.com/uwmisl/poretitioner/issues/91
plugin = plugin_from_file(name, filepath)
pass
# Make sure any plugin attributes defined in the config are moved over to the plugin instance.
try:
# Here, we take care of setting whatever attributes the plugin config defines on the new plugin instance.
for key, value in config.items():
object.__setattr__(plugin, key, value)
except AttributeError as e:
log.warning(
"""
Uh oh, couldn't find plugin '{name}'. Are you sure:
1) A plugin class with the name '{name}' is defined in the file {filepath}?
2) That plugin class inherits from `FilterPlugin`?
"""
)
raise e
my_filter = Filter(config, plugin)
return my_filter
def plugin_from_file(name: str, filepath: PathLikeOrString):
"""[summary]
Parameters
----------
name : str
[description]
filepath : PathLikeOrString
[description]
Returns
-------
[type]
[description]
Raises
------
NotImplementedError
[description]
"""
# TODO: For non-default FilterPlugins, load/unpickle the class from the filepath. https://github.com/uwmisl/poretitioner/issues/91
raise NotImplementedError(
"Plugin from file has not been implemented! This method should take in a filepath and filter name, and return a runnable FilterPlugin!"
)
| 31.190176 | 171 | 0.666828 |
53106f8214d7d4213892a247bedd084271aa2866 | 344 | py | Python | Chapter 11/wrong_type.py | nescience8/starting-out-with-python-global-4th-edition | c16f93b7cbb4c7ae7b57653a7190bf192fe6b472 | [
"MIT"
] | 35 | 2019-05-03T00:30:31.000Z | 2022-01-20T06:57:25.000Z | Chapter 11/wrong_type.py | nescience8/starting-out-with-python-global-4th-edition | c16f93b7cbb4c7ae7b57653a7190bf192fe6b472 | [
"MIT"
] | 1 | 2020-09-04T02:04:33.000Z | 2020-09-04T02:04:33.000Z | Chapter 11/wrong_type.py | nescience8/starting-out-with-python-global-4th-edition | c16f93b7cbb4c7ae7b57653a7190bf192fe6b472 | [
"MIT"
] | 22 | 2020-05-13T21:20:02.000Z | 2021-12-21T08:35:59.000Z |
# The show_mammal_info function accepts an object
# as an argument, and calls its show_species
# and make_sound methods.
# Call the main function.
main()
| 22.933333 | 49 | 0.718023 |
531171031428111b90a898b1687d659af83a08e1 | 1,307 | py | Python | fase2-exercicios/cap2/lista-de-exercicios/RM94336_EX04.py | Leodf/FIAP | e2acf897017c4f49357112f67702070b7dcbfe9d | [
"MIT"
] | null | null | null | fase2-exercicios/cap2/lista-de-exercicios/RM94336_EX04.py | Leodf/FIAP | e2acf897017c4f49357112f67702070b7dcbfe9d | [
"MIT"
] | null | null | null | fase2-exercicios/cap2/lista-de-exercicios/RM94336_EX04.py | Leodf/FIAP | e2acf897017c4f49357112f67702070b7dcbfe9d | [
"MIT"
] | null | null | null | """
4 Um grande cliente seu sofreu um ataque hacker: o servidor foi sequestrado por um software malicioso, que criptografou todos os discos e pede a digitao de uma senha para a liberao da mquina. E claro que os criminosos exigem um pagamento para informar a senha.
Ao analisar o cdigo do programa deles, porm, voc descobre que a senha composta da palavra LIBERDADE seguida do fatorial dos minutos que a mquina estiver marcando no momento da digitao da senha (se a mquina estiver marcando 5 minutos, a senha ser LIBERDADE120). Crie um programa que receba do usurio os minutos atuais e exiba na tela a senha necessria para desbloqueio. ATENO: seu programa no pode utilizar funes prontas para o clculo do fatorial. Ele deve obrigatoriamente utilizar loop.
"""
print('\nPrograma para gerar de desbloqueio do servidor do ataque Hacker!!!\n')
print('Descobrimos que a senha a palavra LIBERDADE + o calculo de fatorial dos minutos no seu computador.\n')
minuto = input('Digite os minutos que aparecem neste computador: ')
minuto = int(minuto)
fatorial = 1
for i in range (minuto, 0, -1):
fatorial *= i
print(f'\nA senha que voc precisa digitar LIBERDADE{fatorial} para desbloquear o servidor.\nAteno!!!: voc tem 60 segundos validos at que a senha mude novamente!!!\n') | 81.6875 | 507 | 0.778118 |
53118be4670acbc222a105780122139b5d7bef92 | 1,555 | py | Python | dosagelib/helpers.py | yasen-m/dosage | 81fe088621ad335cac2a53fcbc7b9b37f49ddce2 | [
"MIT"
] | null | null | null | dosagelib/helpers.py | yasen-m/dosage | 81fe088621ad335cac2a53fcbc7b9b37f49ddce2 | [
"MIT"
] | null | null | null | dosagelib/helpers.py | yasen-m/dosage | 81fe088621ad335cac2a53fcbc7b9b37f49ddce2 | [
"MIT"
] | null | null | null | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2004-2005 Tristan Seligmann and Jonathan Jacobs
# Copyright (C) 2012-2014 Bastian Kleineidam
from .util import fetchUrl, getPageContent, getQueryParams
def queryNamer(paramName, usePageUrl=False):
"""Get name from URL query part."""
return _namer
def regexNamer(regex, usePageUrl=False):
"""Get name from regular expression."""
return _namer
def bounceStarter(url, nextSearch):
"""Get start URL by "bouncing" back and forth one time."""
return _starter
def indirectStarter(url, latestSearch):
"""Get start URL by indirection."""
return _starter
| 32.395833 | 63 | 0.659164 |
5311bdf4dfe6e2813dcf2c28b40dad10195c1693 | 66,098 | py | Python | research/object_detection/data_decoders/tf_example_decoder_test.py | akshit-protonn/models | 38c8c6fe4144c93d6aadd19981c2b90570c29eba | [
"Apache-2.0"
] | 18 | 2022-01-14T09:58:27.000Z | 2022-01-14T09:58:37.000Z | research/object_detection/data_decoders/tf_example_decoder_test.py | akshit-protonn/models | 38c8c6fe4144c93d6aadd19981c2b90570c29eba | [
"Apache-2.0"
] | 62 | 2021-06-09T00:47:27.000Z | 2021-09-24T09:06:58.000Z | research/object_detection/data_decoders/tf_example_decoder_test.py | akshit-protonn/models | 38c8c6fe4144c93d6aadd19981c2b90570c29eba | [
"Apache-2.0"
] | 2 | 2021-02-17T06:59:57.000Z | 2021-03-18T10:12:30.000Z | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.data_decoders.tf_example_decoder."""
import os
import numpy as np
import six
import tensorflow.compat.v1 as tf
from object_detection.core import standard_fields as fields
from object_detection.data_decoders import tf_example_decoder
from object_detection.protos import input_reader_pb2
from object_detection.utils import dataset_util
from object_detection.utils import test_case
if __name__ == '__main__':
tf.test.main()
| 40.03513 | 80 | 0.603089 |
53120b489f06aa51d07ee8b517ab0cf190f8e1f9 | 807 | py | Python | counting_capitals.py | m10singh94/Python-programs | a83083044b4a85afcf70c4b7024287a808b01fee | [
"Apache-2.0"
] | null | null | null | counting_capitals.py | m10singh94/Python-programs | a83083044b4a85afcf70c4b7024287a808b01fee | [
"Apache-2.0"
] | null | null | null | counting_capitals.py | m10singh94/Python-programs | a83083044b4a85afcf70c4b7024287a808b01fee | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Tue Apr 21 08:09:31 2020
@author: Shivadhar SIngh
"""
def remove_substring_everywhere(string, substring):
'''
Remove all occurrences of substring from string, and return
the resulting string. Both arguments must be strings.
'''
p = string.find(substring)
if p == -1:
return string
i = p
newstr = string[0:i]
lsub = len(substring) # length of the substring
while p < len(string) and string.find(substring) != -1:
p = string.find(substring)
if p==-1:
return newstr+string[i+lsub:]
newstr += string[p + lsub : p]
return newstr | 25.21875 | 63 | 0.591078 |
5312250f481274756afa3e391b2f3f60164d3ff0 | 12,695 | py | Python | admit/at/GenerateSpectrum_AT.py | astroumd/admit | bbf3d79bb6e1a6f7523553ed8ede0d358d106f2c | [
"MIT"
] | 4 | 2017-03-01T17:26:28.000Z | 2022-03-03T19:23:06.000Z | admit/at/GenerateSpectrum_AT.py | teuben/admit | 1cae54d1937c9af3f719102838df716e7e6d655c | [
"MIT"
] | 48 | 2016-10-04T01:25:33.000Z | 2021-09-08T14:51:10.000Z | admit/at/GenerateSpectrum_AT.py | teuben/admit | 1cae54d1937c9af3f719102838df716e7e6d655c | [
"MIT"
] | 2 | 2016-11-10T14:10:22.000Z | 2017-03-30T18:58:05.000Z | """ .. _GenerateSpectrum-at-api:
**GenerateSpectrum_AT** --- Generates synthetic test spectra.
-------------------------------------------------------------
This module defines the GenerateSpectrum_AT class.
"""
from admit.AT import AT
import admit.util.bdp_types as bt
from admit.bdp.CubeSpectrum_BDP import CubeSpectrum_BDP
import admit.util.filter.Filter1D as Filter1D
import admit.util.Table as Table
import admit.util.utils as utils
from admit.util import APlot
import admit.util.Image as Image
from admit.util import SpectralLineSearch
from admit.Summary import SummaryEntry
import os
import numpy as np
from copy import deepcopy
# @todo this could go as a very generic routine in utils
#
def getspec(file, xcol=0, ycol=1):
""" read a spectrum/table from column 1,2
returns: (freq,spec)
"""
lines = open(file).readlines()
x = []
y = []
mincol = max(xcol,ycol) + 1
for line in lines:
if line[0] == '#':
continue
w = line.split()
if len(w) < mincol:
continue
x.append(float(w[xcol]))
y.append(float(w[ycol]))
return (np.array(x),np.array(y))
| 37.338235 | 132 | 0.530996 |
53122c71e26ce2a712524601aa3f1353a6ea1b32 | 11,576 | py | Python | lib/flows/general/discovery_test.py | nahidupa/grr | 100a9d85ef2abb234e12e3ac2623caffb4116be7 | [
"Apache-2.0"
] | 1 | 2015-02-22T16:05:06.000Z | 2015-02-22T16:05:06.000Z | lib/flows/general/discovery_test.py | nahidupa/grr | 100a9d85ef2abb234e12e3ac2623caffb4116be7 | [
"Apache-2.0"
] | 3 | 2020-09-11T12:54:50.000Z | 2020-09-11T12:55:01.000Z | lib/flows/general/discovery_test.py | nahidupa/grr | 100a9d85ef2abb234e12e3ac2623caffb4116be7 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- mode: python; encoding: utf-8 -*-
"""Tests for Interrogate."""
import socket
from grr.client import vfs
from grr.lib import action_mocks
from grr.lib import aff4
from grr.lib import artifact_test
from grr.lib import client_index
from grr.lib import config_lib
from grr.lib import flags
from grr.lib import flow
from grr.lib import rdfvalue
from grr.lib import search
from grr.lib import test_lib
def main(argv):
# Run the full test suite
test_lib.GrrTestProgram(argv=argv)
if __name__ == "__main__":
flags.StartMain(main)
| 40.194444 | 80 | 0.666379 |
5312934109f52156adbe6f8ebda77f7b1fb3121e | 3,595 | py | Python | practices/20210112/GraphicsView.py | liff-engineer/articles | ad3386ef9cda5083793f485e309a9f85ab36f664 | [
"MIT"
] | 2 | 2020-12-01T06:44:41.000Z | 2021-11-22T06:07:52.000Z | practices/20210112/GraphicsView.py | liff-engineer/articles | ad3386ef9cda5083793f485e309a9f85ab36f664 | [
"MIT"
] | null | null | null | practices/20210112/GraphicsView.py | liff-engineer/articles | ad3386ef9cda5083793f485e309a9f85ab36f664 | [
"MIT"
] | null | null | null | import sys
from PySide2.QtWidgets import QGraphicsView, QGraphicsScene, QApplication
from PySide2.QtCore import *
from PySide2.QtGui import *
if __name__ == "__main__":
app = QApplication(sys.argv)
appView = GraphicsView()
appView.scene().addSimpleText('liff.engineer@gmail.com')
appView.scene().addRect(-200, -150, 400, 300)
appView.show()
sys.exit(app.exec_())
| 36.313131 | 99 | 0.638387 |
5312cd144bf0324fa0ae682f9f5f4f2654ceadda | 781 | py | Python | armstrong/hatband/tests/_utils.py | joncotton/armstrong.hatband | c71aed4bd0b03a78d68a6b306e8a0ba9cd92324e | [
"Apache-2.0"
] | null | null | null | armstrong/hatband/tests/_utils.py | joncotton/armstrong.hatband | c71aed4bd0b03a78d68a6b306e8a0ba9cd92324e | [
"Apache-2.0"
] | 3 | 2015-05-29T05:07:09.000Z | 2018-07-18T13:53:36.000Z | armstrong/hatband/tests/_utils.py | joncotton/armstrong.hatband | c71aed4bd0b03a78d68a6b306e8a0ba9cd92324e | [
"Apache-2.0"
] | 2 | 2015-07-29T20:58:29.000Z | 2015-08-07T02:59:37.000Z | from armstrong.dev.tests.utils import ArmstrongTestCase
import random
| 27.892857 | 79 | 0.729834 |
5313e0d9c7ffb25cacea29febc7679af1ef4f1a0 | 7,997 | py | Python | tests/propositional/test_natural_deduction.py | ariroffe/logics | fb918ae8cf243a452e5b030f0df17add83f47f8b | [
"MIT"
] | 12 | 2021-03-31T08:12:09.000Z | 2022-03-15T21:36:59.000Z | tests/propositional/test_natural_deduction.py | ariroffe/logics | fb918ae8cf243a452e5b030f0df17add83f47f8b | [
"MIT"
] | null | null | null | tests/propositional/test_natural_deduction.py | ariroffe/logics | fb918ae8cf243a452e5b030f0df17add83f47f8b | [
"MIT"
] | 1 | 2021-03-31T15:14:26.000Z | 2021-03-31T15:14:26.000Z | import unittest
from logics.classes.propositional import Inference, Formula
from logics.classes.propositional.proof_theories import NaturalDeductionStep, NaturalDeductionRule
from logics.utils.parsers import classical_parser
from logics.instances.propositional.natural_deduction import classical_natural_deduction_system as nd_system
if __name__ == '__main__':
unittest.main()
| 43.227027 | 108 | 0.549456 |
5314e40633c46116c596429cdd1af4edda4e5856 | 10,244 | py | Python | src/trusted/validator_arm/dgen_decoder_output.py | cohortfsllc/cohort-cocl2-sandbox | 0ac6669d1a459d65a52007b80d5cffa4ef330287 | [
"BSD-3-Clause"
] | 2,151 | 2020-04-18T07:31:17.000Z | 2022-03-31T08:39:18.000Z | src/trusted/validator_arm/dgen_decoder_output.py | cohortfsllc/cohort-cocl2-sandbox | 0ac6669d1a459d65a52007b80d5cffa4ef330287 | [
"BSD-3-Clause"
] | 395 | 2020-04-18T08:22:18.000Z | 2021-12-08T13:04:49.000Z | src/trusted/validator_arm/dgen_decoder_output.py | cohortfsllc/cohort-cocl2-sandbox | 0ac6669d1a459d65a52007b80d5cffa4ef330287 | [
"BSD-3-Clause"
] | 338 | 2020-04-18T08:03:10.000Z | 2022-03-29T12:33:22.000Z | #!/usr/bin/python
#
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
"""
Responsible for generating the decoder based on parsed
table representations.
"""
import dgen_opt
import dgen_output
import dgen_actuals
# This file generates the class decoder Decoder as defined by the
# decoder tables. The code is specifically written to minimize the
# number of decoder classes needed to parse valid ARM
# instructions. Many rows in the table use the same decoder class. In
# addition, we optimize tables by merging, so long as the same decoder
# class is built.
#
# The following files are generated:
#
# decoder.h
# decoder.cc
#
# decoder.h declares the generated decoder parser class while
# decoder.cc contains the implementation of that decoder class.
#
# For testing purposes (see dgen_test_output.py) different rules are
# applied. Note: It may be worth reading dgen_test_output.py preamble
# to get a better understanding of decoder actions, and why we need
# the "action_filter" methods.
"""The current command line arguments to use"""
_cl_args = {}
NEWLINE_STR="""
"""
COMMENTED_NEWLINE_STR="""
//"""
# Defines the header for decoder.h
H_HEADER="""%(FILE_HEADER)s
#ifndef %(IFDEF_NAME)s
#define %(IFDEF_NAME)s
#include "native_client/src/trusted/validator_arm/decode.h"
#include "%(FILENAME_BASE)s_actuals.h"
namespace nacl_arm_dec {
"""
DECODER_DECLARE_HEADER="""
// Defines a decoder class selector for instructions.
class %(decoder_name)s : DecoderState {
public:
explicit %(decoder_name)s();
// Parses the given instruction, returning the decoder to use.
virtual const ClassDecoder& decode(const Instruction) const;
// Returns the class decoder to use to process the fictitious instruction
// that is inserted before the first instruction in the code block by
// the validator.
const ClassDecoder &fictitious_decoder() const {
return %(fictitious_decoder)s_instance_;
}
private:
"""
DECODER_DECLARE_METHOD_COMMENTS="""
// The following list of methods correspond to each decoder table,
// and implements the pattern matching of the corresponding bit
// patterns. After matching the corresponding bit patterns, they
// either call other methods in this list (corresponding to another
// decoder table), or they return the instance field that implements
// the class decoder that should be used to decode the particular
// instruction.
"""
DECODER_DECLARE_METHOD="""
inline const ClassDecoder& decode_%(table_name)s(
const Instruction inst) const;
"""
DECODER_DECLARE_FIELD_COMMENTS="""
// The following fields define the set of class decoders
// that can be returned by the API function "decode". They
// are created once as instance fields, and then returned
// by the table methods above. This speeds up the code since
// the class decoders need to only be built once (and reused
// for each call to "decode")."""
DECODER_DECLARE_FIELD="""
const %(decoder)s %(decoder)s_instance_;"""
DECODER_DECLARE_FOOTER="""
};
"""
H_FOOTER="""
} // namespace nacl_arm_dec
#endif // %(IFDEF_NAME)s
"""
def generate_h(decoder, decoder_name, filename, out, cl_args):
"""Entry point to the decoder for .h file.
Args:
decoder: The decoder defined by the list of Table objects to
process.
decoder_name: The name of the decoder state to build.
filename: The (localized) name for the .h file.
named_decoders: If true, generate a decoder state with named
instances.
out: a COutput object to write to.
cl_args: A dictionary of additional command line arguments.
"""
global _cl_args
assert filename.endswith('.h')
_cl_args = cl_args
# Before starting, remove all testing information from the parsed tables.
decoder = decoder.action_filter(['actual'])
values = {
'FILE_HEADER': dgen_output.HEADER_BOILERPLATE,
'IFDEF_NAME': dgen_output.ifdef_name(filename),
'FILENAME_BASE': filename[:-len('.h')],
'decoder_name': decoder_name,
}
out.write(H_HEADER % values)
values['fictitious_decoder'] = (
decoder.get_value('FictitiousFirst').actual())
out.write(DECODER_DECLARE_HEADER % values)
out.write(DECODER_DECLARE_METHOD_COMMENTS)
for table in decoder.tables():
values['table_name'] = table.name
out.write(DECODER_DECLARE_METHOD % values)
out.write(DECODER_DECLARE_FIELD_COMMENTS)
for action in decoder.action_filter(['actual']).decoders():
values['decoder'] = action.actual()
out.write(DECODER_DECLARE_FIELD % values)
out.write(DECODER_DECLARE_FOOTER % values)
out.write(H_FOOTER % values)
# Defines the header for DECODER.h
CC_HEADER="""%(FILE_HEADER)s
#include "%(header_filename)s"
namespace nacl_arm_dec {
"""
CONSTRUCTOR_HEADER="""
%(decoder_name)s::%(decoder_name)s() : DecoderState()"""
CONSTRUCTOR_FIELD_INIT="""
, %(decoder)s_instance_()"""
CONSTRUCTOR_FOOTER="""
{}
"""
METHOD_HEADER="""
// Implementation of table: %(table_name)s.
// Specified by: %(citation)s
const ClassDecoder& %(decoder_name)s::decode_%(table_name)s(
const Instruction inst) const
{"""
METHOD_HEADER_TRACE="""
fprintf(stderr, "decode %(table_name)s\\n");
"""
METHOD_DISPATCH_BEGIN="""
if (%s"""
METHOD_DISPATCH_CONTINUE=""" &&
%s"""
METHOD_DISPATCH_END=") {"""
METHOD_DISPATCH_TRACE="""
fprintf(stderr, "count = %s\\n");"""
METHOD_DISPATCH_CLASS_DECODER="""
return %(decoder)s_instance_;"""
METHOD_DISPATCH_SUBMETHOD="""
return decode_%(subtable_name)s(inst);"""
METHOD_DISPATCH_CLOSE="""
}
"""
METHOD_FOOTER="""
// Catch any attempt to fall though ...
return %(not_implemented)s_instance_;
}
"""
DECODER_METHOD_HEADER="""
const ClassDecoder& %(decoder_name)s::decode(const Instruction inst) const {"""
DECODER_METHOD_TRACE="""
fprintf(stderr, "Parsing %%08x\\n", inst.Bits());"""
DECODER_METHOD_FOOTER="""
return decode_%(entry_table_name)s(inst);
}
"""
CC_FOOTER="""
} // namespace nacl_arm_dec
"""
def generate_cc(decoder, decoder_name, filename, out, cl_args):
"""Implementation of the decoder in .cc file
Args:
decoder: The decoder defined by the list of Table objects to
process.
decoder_name: The name of the decoder state to build.
filename: The (localized) name for the .h file.
named_decoders: If true, generate a decoder state with named
instances.
out: a COutput object to write to.
cl_args: A dictionary of additional command line arguments.
"""
global _cl_args
assert filename.endswith('.cc')
_cl_args = cl_args
# Before starting, remove all testing information from the parsed
# tables.
decoder = decoder.action_filter(['actual'])
values = {
'FILE_HEADER': dgen_output.HEADER_BOILERPLATE,
'header_filename': filename[:-2] + 'h',
'decoder_name': decoder_name,
'entry_table_name': decoder.primary.name,
}
out.write(CC_HEADER % values)
_generate_constructors(decoder, values, out)
_generate_methods(decoder, values, out)
out.write(DECODER_METHOD_HEADER % values)
if _cl_args.get('trace') == 'True':
out.write(DECODER_METHOD_TRACE % values)
out.write(DECODER_METHOD_FOOTER % values)
out.write(CC_FOOTER % values)
| 31.327217 | 79 | 0.695334 |
5314f89fdb3bc7bd293961169b273d8fa69fd14c | 2,985 | py | Python | compose/progress_stream.py | ilinum/compose | d1633d8e9df3c2dd4fa6f562c6b037cfe1af8ddb | [
"Apache-2.0"
] | 1 | 2019-03-06T08:03:18.000Z | 2019-03-06T08:03:18.000Z | compose/progress_stream.py | SeppPenner/compose | 87b25363a385b108066f87570aa5396567585324 | [
"Apache-2.0"
] | 2 | 2021-03-25T21:27:44.000Z | 2021-06-01T21:41:30.000Z | compose/progress_stream.py | SeppPenner/compose | 87b25363a385b108066f87570aa5396567585324 | [
"Apache-2.0"
] | 2 | 2018-07-20T15:52:21.000Z | 2018-12-14T11:54:03.000Z | from __future__ import absolute_import
from __future__ import unicode_literals
from compose import utils
| 26.651786 | 89 | 0.58794 |
5315716313b67d48f560713c76b83b311d4a39e6 | 13,110 | py | Python | tests/test_db.py | beloglazov/openstack-neat | a5a853ae2affb0cdc582e3ab641737f5ebd3d0a7 | [
"Apache-2.0"
] | 34 | 2015-01-04T08:02:37.000Z | 2022-02-19T14:43:47.000Z | tests/test_db.py | beloglazov/openstack-neat | a5a853ae2affb0cdc582e3ab641737f5ebd3d0a7 | [
"Apache-2.0"
] | 3 | 2015-01-23T07:45:15.000Z | 2019-07-03T11:16:27.000Z | tests/test_db.py | beloglazov/openstack-neat | a5a853ae2affb0cdc582e3ab641737f5ebd3d0a7 | [
"Apache-2.0"
] | 22 | 2015-01-14T17:54:46.000Z | 2021-08-09T06:09:17.000Z | # Copyright 2012 Anton Beloglazov
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mocktest import *
from pyqcy import *
import datetime
import neat.db_utils as db_utils
import logging
logging.disable(logging.CRITICAL)
def test_insert_host_overload(self):
db = db_utils.init_db('sqlite:///:memory:')
hosts = {}
hosts['host1'] = db.update_host('host1', 1, 1, 1)
hosts['host2'] = db.update_host('host2', 1, 1, 1)
db.insert_host_overload('host2', False)
db.insert_host_overload('host1', True)
db.insert_host_overload('host1', False)
db.insert_host_overload('host2', True)
result = db.host_overload.select().execute().fetchall()
host1 = [x[3] for x in sorted(filter(
lambda x: x[1] == hosts['host1'],
result), key=lambda x: x[0])]
self.assertEqual(host1, [1, 0])
host2 = [x[3] for x in sorted(filter(
lambda x: x[1] == hosts['host2'],
result), key=lambda x: x[0])]
self.assertEqual(host2, [0, 1])
| 36.825843 | 94 | 0.555683 |
53180c720a6c468e5a33fc70c5bb3a7936430339 | 808 | py | Python | libs/BIDS.py | GuillermoPerez32/EE2BIDS_backend | 2cab240840e11e227ad60e4c8e17ac9ac87defd4 | [
"MIT"
] | null | null | null | libs/BIDS.py | GuillermoPerez32/EE2BIDS_backend | 2cab240840e11e227ad60e4c8e17ac9ac87defd4 | [
"MIT"
] | null | null | null | libs/BIDS.py | GuillermoPerez32/EE2BIDS_backend | 2cab240840e11e227ad60e4c8e17ac9ac87defd4 | [
"MIT"
] | null | null | null | import os
from bids_validator import BIDSValidator
| 29.925926 | 81 | 0.613861 |
53186bfab87cf033e365cec1f3ce308e9a8c439b | 32,090 | py | Python | src/python/triangula/chassis.py | peterbrazil/brazil | 3823dca6f05b6946251800125d45069048d1bca1 | [
"Apache-2.0"
] | null | null | null | src/python/triangula/chassis.py | peterbrazil/brazil | 3823dca6f05b6946251800125d45069048d1bca1 | [
"Apache-2.0"
] | null | null | null | src/python/triangula/chassis.py | peterbrazil/brazil | 3823dca6f05b6946251800125d45069048d1bca1 | [
"Apache-2.0"
] | null | null | null | from math import cos, sin, degrees, radians, pi
from time import time
from euclid import Vector2, Point2
from numpy import array as np_array
from numpy.linalg import solve as np_solve
__author__ = 'tom'
def rotate_point(point, angle, origin=None):
"""
Rotate a Point2 around another Point2
:param euclid.Point2 point:
The point to rotate
:param float angle:
Angle in radians, clockwise rotation
:param euclid.Point2 origin:
Origin of the rotation, defaults to (0,0) if not specified
:return:
A new :class:`euclid.Point2` containing the rotated input point
"""
if origin is None:
origin = Point2(0, 0)
s = sin(-angle)
c = cos(-angle)
return Point2(c * (point.x - origin.x) - s * (point.y - origin.y) + origin.x,
s * (point.x - origin.x) + c * (point.y - origin.y) + origin.y)
def rotate_vector(vector, angle, origin=None):
"""
Rotate a :class:`euclid.Vector2` around a :class:`euclid.Point2`
:param euclid.Vector2 vector:
The vector to rotate
:param float angle:
Angle in radians, clockwise rotation
:param euclid.Point2 origin:
Origin of the rotation, defaults to (0,0) if not specified
:return:
A new :class:`euclid.Point2` containing the rotated input point
"""
if origin is None:
origin = Point2(0, 0)
s = sin(-angle)
c = cos(-angle)
return Vector2(c * (vector.x - origin.x) - s * (vector.y - origin.y) + origin.x,
s * (vector.x - origin.x) + c * (vector.y - origin.y) + origin.y)
def smallest_difference(a, b, max_value=2 * pi):
"""
Given two floats, a and b, and a maximum possible value for both a and b, calculate the smallest delta from a to b.
For example, if a=1.0, b=2.5 and max_value=2.6, this should return -1.1, as subtracting 1.1 from a would result in
-0.1, which will then be transformed to 2.5 after taking its modulus with 2.6. If max_value was 10, it would return
+1.5, as this is the lower magnitude delta needed to go from 1.0 to 2.5. This function is used when calculating the
shortest delta between two pose orientations, for this reason the max_value defaults to 2*pi for use when working
in radians.
If either a or b are less than zero or greater than the maximum value they will be treated as a % max_value or b %
max_value respectively for the purposes of this calculation.
:param float a:
First value (see above)
:param b:
Second value (see above)
:param max_value:
Modulus, defaults to 2*pi if not specified
:return:
A value d such that (a + d) % max_value == b, and abs(d) is minimal (as there would be an infinite number of
possible d that satisfy this relationship).
"""
mod_a = a % max_value
mod_b = b % max_value
if abs(mod_a - mod_b) <= max_value / 2:
return mod_b - mod_a
elif mod_a >= mod_b:
return mod_b + (max_value - mod_a)
else:
return -(mod_a + (max_value - mod_b))
def get_regular_triangular_chassis(wheel_distance, wheel_radius, max_rotations_per_second):
"""
Build a HoloChassis object with three wheels, each identical in size and maximum speed. Each wheel is positioned
at the corner of a regular triangle, and with direction perpendicular to the normal vector at that corner.
:param wheel_distance:
Distance in millimetres between the contact points of each pair of wheels (i.e. the length of each edge of the
regular triangle)
:param wheel_radius:
Wheel radius in millimetres
:param max_rotations_per_second:
Maximum wheel speed in revolutions per second
:return:
An appropriately configured HoloChassis
"""
point = Point2(0, cos(radians(30)) * wheel_distance / 2.0)
vector = Vector2(-2 * pi * wheel_radius, 0)
# Pink
wheel_a = HoloChassis.OmniWheel(
position=point,
vector=vector,
max_speed=max_rotations_per_second)
# Yellow
wheel_b = HoloChassis.OmniWheel(
position=rotate_point(point, pi * 2 / 3),
vector=rotate_vector(vector, pi * 2 / 3),
max_speed=max_rotations_per_second)
# Green
wheel_c = HoloChassis.OmniWheel(
position=rotate_point(point, pi * 4 / 3),
vector=rotate_vector(vector, pi * 4 / 3),
max_speed=max_rotations_per_second)
return HoloChassis(wheels=[wheel_a, wheel_b, wheel_c])
| 51.508828 | 120 | 0.67186 |
5319f9beb8c0372d2483c2292e3473295821dc00 | 12,467 | py | Python | libs/PureCloudPlatformClientV2/models/management_unit.py | rocketbot-cl/genesysCloud | dd9d9b5ebb90a82bab98c0d88b9585c22c91f333 | [
"MIT"
] | 1 | 2021-10-08T20:46:45.000Z | 2021-10-08T20:46:45.000Z | libs/PureCloudPlatformClientV2/models/management_unit.py | rocketbot-cl/genesysCloud | dd9d9b5ebb90a82bab98c0d88b9585c22c91f333 | [
"MIT"
] | null | null | null | libs/PureCloudPlatformClientV2/models/management_unit.py | rocketbot-cl/genesysCloud | dd9d9b5ebb90a82bab98c0d88b9585c22c91f333 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
import re
import json
from ..utils import sanitize_for_serialization
def to_json(self):
"""
Returns the model as raw JSON
"""
return json.dumps(sanitize_for_serialization(self.to_dict()))
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 29.612827 | 208 | 0.599422 |
531bf28ae62e80e65e0bf53d63aae0164d547e7c | 696 | py | Python | harmony_tools/core/colors.py | a1fred/guitar_gammas | 9933fe899af7a8e7f490f61d58004bb59f03271c | [
"MIT"
] | 1 | 2021-02-26T03:52:26.000Z | 2021-02-26T03:52:26.000Z | harmony_tools/core/colors.py | a1fred/harmony_tools | 9933fe899af7a8e7f490f61d58004bb59f03271c | [
"MIT"
] | null | null | null | harmony_tools/core/colors.py | a1fred/harmony_tools | 9933fe899af7a8e7f490f61d58004bb59f03271c | [
"MIT"
] | null | null | null | COLOR_BLUE = '\033[0;34m'
COLOR_GREEN = '\033[0;32m'
COLOR_CYAN = '\033[0;36m'
COLOR_RED = '\033[0;31m'
COLOR_PURPLE = '\033[0;35m'
COLOR_BROWN = '\033[0;33m'
COLOR_YELLOW = '\033[1;33m'
COLOR_GRAY = '\033[1;30m'
COLOR_RESET = '\033[0m'
FG_COLORS = [
# COLOR_BLUE,
COLOR_GREEN,
# COLOR_CYAN,
# COLOR_RED,
# COLOR_PURPLE,
# COLOR_BROWN,
# COLOR_YELLOW,
]
| 17.4 | 42 | 0.627874 |
531ccd14367ef1d863f40816ee6edf521bc6c3f6 | 712 | py | Python | Common_Questions/TextBookQuestions/PythonCrashCourse/Chapter_8/8_5.py | tegamax/ProjectCode | 0ed86e227fba50b453c5c4a2596afbadc39a167e | [
"MIT"
] | null | null | null | Common_Questions/TextBookQuestions/PythonCrashCourse/Chapter_8/8_5.py | tegamax/ProjectCode | 0ed86e227fba50b453c5c4a2596afbadc39a167e | [
"MIT"
] | null | null | null | Common_Questions/TextBookQuestions/PythonCrashCourse/Chapter_8/8_5.py | tegamax/ProjectCode | 0ed86e227fba50b453c5c4a2596afbadc39a167e | [
"MIT"
] | null | null | null | '''
8-5. Cities: Write a function called describe_city() that accepts the name of a city and its country.
The function should print a simple sentence, such as Reykjavik is in Iceland. Give the parameter for the country a default value.
Call your function for three different cities, at least one of which is not in the default country.
'''
describe_city('Garabr','country')
'''
rborg
Akureyri
''' | 28.48 | 130 | 0.685393 |
531ce1da9e2ab397f8a8222a28bef7e919e9c968 | 12,383 | py | Python | tests/test_geometry_loader.py | trnielsen/nexus-constructor | 65efb6eedca30250b75f142dd29a46bc909958df | [
"BSD-2-Clause"
] | null | null | null | tests/test_geometry_loader.py | trnielsen/nexus-constructor | 65efb6eedca30250b75f142dd29a46bc909958df | [
"BSD-2-Clause"
] | null | null | null | tests/test_geometry_loader.py | trnielsen/nexus-constructor | 65efb6eedca30250b75f142dd29a46bc909958df | [
"BSD-2-Clause"
] | null | null | null | from nexus_constructor.geometry import OFFGeometryNoNexus
from nexus_constructor.geometry.geometry_loader import load_geometry_from_file_object
from nexus_constructor.off_renderer import repeat_shape_over_positions
from PySide2.QtGui import QVector3D
from io import StringIO
| 34.112948 | 132 | 0.622305 |
531dc0c210eb864fa15db98132f5b9dc46d4e0b4 | 3,140 | py | Python | verteste/ui/ui_about.py | Chum4k3r/Verteste | 216c04468ff14c392ee3c6aebe12a0fa0e98767c | [
"MIT"
] | null | null | null | verteste/ui/ui_about.py | Chum4k3r/Verteste | 216c04468ff14c392ee3c6aebe12a0fa0e98767c | [
"MIT"
] | null | null | null | verteste/ui/ui_about.py | Chum4k3r/Verteste | 216c04468ff14c392ee3c6aebe12a0fa0e98767c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
################################################################################
## Form generated from reading UI file 'aboutdialog.ui'
##
## Created by: Qt User Interface Compiler version 6.1.1
##
## WARNING! All changes made in this file will be lost when recompiling UI file!
################################################################################
from PySide6.QtCore import * # type: ignore
from PySide6.QtGui import * # type: ignore
from PySide6.QtWidgets import * # type: ignore
| 36.941176 | 117 | 0.653503 |
531e55e6be488ba1586f078680847b9d77b065ff | 4,416 | py | Python | tests/test_base_protocol.py | Qix-/aiohttp | aee067dccad3dc0e79778a1b213105f20bf39baf | [
"Apache-2.0"
] | 3 | 2019-01-15T04:17:33.000Z | 2019-03-13T13:12:15.000Z | tests/test_base_protocol.py | Qix-/aiohttp | aee067dccad3dc0e79778a1b213105f20bf39baf | [
"Apache-2.0"
] | 309 | 2019-08-20T21:49:50.000Z | 2021-07-31T13:27:18.000Z | tests/test_base_protocol.py | amenezes/aiohttp | e8049814a2161278bae178cb96334ce0c98e66f3 | [
"Apache-2.0"
] | 1 | 2020-12-02T16:06:16.000Z | 2020-12-02T16:06:16.000Z | import asyncio
from contextlib import suppress
from unittest import mock
import pytest
from aiohttp.base_protocol import BaseProtocol
| 24.131148 | 63 | 0.688406 |
531e5848bd2aa1d173ccaded9dac7c7007b60544 | 1,599 | py | Python | main.py | marcusviniciusteixeira/RPAPython | 8055e7283e6a8dd8910139cbbaa914761e2924f2 | [
"MIT"
] | 1 | 2022-01-23T00:17:05.000Z | 2022-01-23T00:17:05.000Z | main.py | marcusviniciusteixeira/RPAPython | 8055e7283e6a8dd8910139cbbaa914761e2924f2 | [
"MIT"
] | null | null | null | main.py | marcusviniciusteixeira/RPAPython | 8055e7283e6a8dd8910139cbbaa914761e2924f2 | [
"MIT"
] | null | null | null | import PySimpleGUI as sg
import os
import time
import pyautogui
tela = TelaPython()
tela.Iniciar() | 32.632653 | 127 | 0.579112 |
531fa48589e0156d08a9e55a80a6582cdc603310 | 810 | py | Python | logistic-regression/plot_binary_losses.py | eliben/deep-learning-samples | d5ca86c5db664fabfb302cbbc231c50ec3d6a103 | [
"Unlicense"
] | 183 | 2015-12-29T07:21:24.000Z | 2022-01-18T01:19:23.000Z | logistic-regression/plot_binary_losses.py | eliben/deep-learning-samples | d5ca86c5db664fabfb302cbbc231c50ec3d6a103 | [
"Unlicense"
] | null | null | null | logistic-regression/plot_binary_losses.py | eliben/deep-learning-samples | d5ca86c5db664fabfb302cbbc231c50ec3d6a103 | [
"Unlicense"
] | 68 | 2016-06-02T15:31:51.000Z | 2021-09-08T19:58:10.000Z | # Helper code to plot binary losses.
#
# Eli Bendersky (http://eli.thegreenplace.net)
# This code is in the public domain
from __future__ import print_function
import matplotlib.pyplot as plt
import numpy as np
if __name__ == '__main__':
fig, ax = plt.subplots()
fig.set_tight_layout(True)
xs = np.linspace(-2, 2, 500)
# plot L0/1 loss
ax.plot(xs, np.where(xs < 0, np.ones_like(xs), np.zeros_like(xs)),
color='r', linewidth=2.0, label='$L_{01}$')
# plot square loss
ax.plot(xs, (xs - 1) ** 2, linestyle='-.', label='$L_2$')
# plot hinge loss
ax.plot(xs, np.maximum(np.zeros_like(xs), 1 - xs),
color='g', linewidth=2.0, label='$L_h$')
ax.grid(True)
plt.ylim((-1, 4))
ax.legend()
fig.savefig('loss.png', dpi=80)
plt.show()
| 23.823529 | 70 | 0.608642 |
531fbba3b5ee2b6bc1a9dd51c2d5b53732a600be | 1,243 | py | Python | utils/watch-less.py | K-Fitzpatrick/crop_planner | 2605c0886fd3b4681c2ea3ac5e88e1d8555178f5 | [
"MIT"
] | 91 | 2016-03-15T16:41:41.000Z | 2022-03-25T16:30:09.000Z | utils/watch-less.py | SoaringDragon42/crop_planner | 2605c0886fd3b4681c2ea3ac5e88e1d8555178f5 | [
"MIT"
] | 18 | 2016-03-30T15:01:25.000Z | 2020-03-09T06:17:08.000Z | utils/watch-less.py | SoaringDragon42/crop_planner | 2605c0886fd3b4681c2ea3ac5e88e1d8555178f5 | [
"MIT"
] | 48 | 2016-03-15T16:41:44.000Z | 2022-03-09T21:28:05.000Z | #!/usr/bin/env python3
################################
# Development tool
# Auto-compiles style.less to style.css
#
# Requires lessc and less clean css to be installed:
# npm install -g less
# npm install -g less-plugin-clean-css
################################
import os, time
from os import path
from math import floor
from _helper import *
# Main application
# Run application
if __name__ == "__main__":
try:
app = Main()
except KeyboardInterrupt:
print("Exiting") | 23.45283 | 79 | 0.636364 |
5320daf74e5189735d626582356010934114572d | 10,522 | py | Python | testapp/app/app/tests/test_export_action.py | instituciones-abiertas/django-admin-export-action | bb089180e418915e1bba31927554537249fbec78 | [
"MIT"
] | 5 | 2020-12-15T11:38:42.000Z | 2022-01-06T02:33:59.000Z | testapp/app/app/tests/test_export_action.py | instituciones-abiertas/django-admin-export-action | bb089180e418915e1bba31927554537249fbec78 | [
"MIT"
] | 2 | 2021-09-14T19:25:29.000Z | 2021-11-26T14:16:50.000Z | testapp/app/app/tests/test_export_action.py | instituciones-abiertas/django-admin-export-action | bb089180e418915e1bba31927554537249fbec78 | [
"MIT"
] | 2 | 2021-09-14T19:19:05.000Z | 2021-09-14T19:19:18.000Z | # -- encoding: UTF-8 --
import json
import uuid
from admin_export_action import report
from admin_export_action.admin import export_selected_objects
from admin_export_action.config import default_config, get_config
from django.contrib.admin.sites import AdminSite
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.test import TestCase, RequestFactory
from django.urls import reverse
from django.utils.http import urlencode
from news.models import Attachment, Category, News, NewsTag, Video
from news.admin import NewsAdmin
| 36.79021 | 129 | 0.560255 |
5321da160f808c8f4dfdd8dfa49b6f548aebb482 | 367 | py | Python | shapeshifter/tests/conftest.py | martinogden/django-shapeshifter | dbcba74c0a6914af181c1e8f0ba23369d4c3c94b | [
"Apache-2.0"
] | 164 | 2018-08-27T16:48:15.000Z | 2022-03-01T06:43:35.000Z | shapeshifter/tests/conftest.py | FlipperPA/django-shapeshifter | 0ce9cc2796164a529749a6b2208642d819f706bd | [
"Apache-2.0"
] | 2 | 2018-09-04T14:26:34.000Z | 2021-07-23T21:02:29.000Z | shapeshifter/tests/conftest.py | FlipperPA/django-shapeshifter | 0ce9cc2796164a529749a6b2208642d819f706bd | [
"Apache-2.0"
] | 12 | 2018-08-27T20:55:12.000Z | 2022-02-07T23:41:33.000Z | from pytest_djangoapp import configure_djangoapp_plugin
pytest_plugins = configure_djangoapp_plugin(
extend_INSTALLED_APPS=[
'django.contrib.sessions',
'django.contrib.messages',
],
extend_MIDDLEWARE=[
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
]
)
| 28.230769 | 63 | 0.730245 |
5322235447e97ac9d03d73498451ad6c8b6d4fab | 10,434 | py | Python | face_attribute_verification.py | seymayucer/FacialPhenotypes | 043f3ecf956cad53095d93f19383c4c94e033692 | [
"MIT"
] | 2 | 2021-03-02T22:25:32.000Z | 2021-03-06T23:53:13.000Z | face_attribute_verification.py | seymayucer/FacialPhenotypes | 043f3ecf956cad53095d93f19383c4c94e033692 | [
"MIT"
] | null | null | null | face_attribute_verification.py | seymayucer/FacialPhenotypes | 043f3ecf956cad53095d93f19383c4c94e033692 | [
"MIT"
] | 1 | 2021-03-22T02:05:32.000Z | 2021-03-22T02:05:32.000Z | import argparse
import numpy as np
from sklearn.model_selection import StratifiedKFold
import sklearn
import cv2
import datetime
import mxnet as mx
from mxnet import ndarray as nd
import pandas as pd
from numpy import linalg as line
import logging
logging.basicConfig(
format="%(asctime)s %(message)s", datefmt="%m/%d/%Y %I:%M:%S %p", level=logging.INFO
)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Face Verification for RFW")
parser.add_argument(
"--data_dir", type=str, default="RFW/test/aligned_data", help="dataset root"
)
parser.add_argument(
"--pair_file",
type=str,
default="./AttributePairs/eye_narrow_pairs_6000_selected.csv",
help="pair file to test",
)
parser.add_argument(
"--model_dir", type=str, default="/model/", help="pre-trained model directory"
)
parser.add_argument("--batch_size", type=int, default="32", help="batch_size")
args = parser.parse_args()
validation = FaceVerification(
batch_size=args.batch_size, model=None, data_dir=args.data_dir
)
validation.load_model(model_dir=args.model_dir)
_, _, _shape = validation.load_images(args.pair_file)
tpr, fpr, acc, std = validation.verify()
logging.info(
"Testing Accuracy {} for {} in shape {}".format(acc, args.pair_file, _shape[0])
)
| 35.610922 | 88 | 0.568047 |
5322c20dd329a34737e71921f9eef02bff3f4b61 | 691 | py | Python | pkgs/applications/virtualization/virt-manager/custom_runner.py | mornfall/nixpkgs | 0eb6f056b9ce3e32dbc3297f298472aef19f8c73 | [
"MIT"
] | 1 | 2015-03-10T08:51:43.000Z | 2015-03-10T08:51:43.000Z | pkgs/applications/virtualization/virt-manager/custom_runner.py | mornfall/nixpkgs | 0eb6f056b9ce3e32dbc3297f298472aef19f8c73 | [
"MIT"
] | null | null | null | pkgs/applications/virtualization/virt-manager/custom_runner.py | mornfall/nixpkgs | 0eb6f056b9ce3e32dbc3297f298472aef19f8c73 | [
"MIT"
] | null | null | null | #!/usr/bin/python -t
# this script was written to use /etc/nixos/nixpkgs/pkgs/development/python-modules/generic/wrap.sh
# which already automates python executable wrapping by extending the PATH/pythonPath
# from http://docs.python.org/library/subprocess.html
# Warning Invoking the system shell with shell=True can be a security hazard if combined with untrusted input. See the warning under Frequently Used Arguments for details.
from subprocess import Popen, PIPE, STDOUT
cmd = 'PYTHON_EXECUTABLE_PATH -t THE_CUSTOM_PATH/share/virt-manager/THE_CUSTOM_PROGRAM.py'
p = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True)
output = p.stdout.read()
print output
| 49.357143 | 171 | 0.798842 |
5322e645a2731e419b7aab530efd6f637ecbe5b3 | 6,645 | py | Python | jsonform/fields.py | Pix-00/jsonform | d62543474d96b258606ec38dd427693232daeda3 | [
"Apache-2.0"
] | null | null | null | jsonform/fields.py | Pix-00/jsonform | d62543474d96b258606ec38dd427693232daeda3 | [
"Apache-2.0"
] | null | null | null | jsonform/fields.py | Pix-00/jsonform | d62543474d96b258606ec38dd427693232daeda3 | [
"Apache-2.0"
] | null | null | null | import base64
import datetime
from abc import ABC, abstractmethod
from .conditions import AnyValue
from .errors import FieldError, FormError
__all__ = [
'Field', 'StringField', 'IntegerField', 'FloatField', 'BooleanField',
'DateTimeField', 'DateField', 'TimeField', 'ListField','SetField', 'EnumField', 'BytesField'
]
| 27.233607 | 96 | 0.582844 |
53233f3a8dea82db62a5a19f4e770dd7fed41add | 3,331 | py | Python | napari/layers/_source.py | napari/napari-gui | 9beb1a0b797890718e1c4f372cbd6256747f9101 | [
"BSD-3-Clause"
] | 7 | 2018-07-03T17:35:46.000Z | 2018-11-07T15:48:58.000Z | napari/layers/_source.py | guiwitz/napari | 1546f18ecc13364d5415623a9c11ed760ff043e2 | [
"BSD-3-Clause"
] | 120 | 2018-09-04T22:05:13.000Z | 2019-03-02T01:13:57.000Z | napari/layers/_source.py | napari/napari-gui | 9beb1a0b797890718e1c4f372cbd6256747f9101 | [
"BSD-3-Clause"
] | 8 | 2018-09-04T21:48:26.000Z | 2019-01-29T04:48:30.000Z | from __future__ import annotations
from contextlib import contextmanager
from contextvars import ContextVar
from typing import Optional, Tuple
from magicgui.widgets import FunctionGui
from pydantic import BaseModel
# layer source context management
_LAYER_SOURCE: ContextVar[dict] = ContextVar('_LAYER_SOURCE', default={})
def current_source():
"""Get the current layer :class:`Source` (inferred from context).
The main place this function is used is in :meth:`Layer.__init__`.
"""
return Source(**_LAYER_SOURCE.get())
| 33.31 | 97 | 0.691684 |
532376f847c7965500c6f9f56d9f6308f976ea4f | 1,599 | py | Python | tests/unit/test_BaseDirection.py | vpalex999/project-mars | 6e21c5acfe6105a7b7c87a79770e7420bda46f26 | [
"Apache-2.0"
] | null | null | null | tests/unit/test_BaseDirection.py | vpalex999/project-mars | 6e21c5acfe6105a7b7c87a79770e7420bda46f26 | [
"Apache-2.0"
] | null | null | null | tests/unit/test_BaseDirection.py | vpalex999/project-mars | 6e21c5acfe6105a7b7c87a79770e7420bda46f26 | [
"Apache-2.0"
] | null | null | null | import pytest
import src.constants as cnst
from src.directions import BaseDirection
| 35.533333 | 94 | 0.707942 |
5323be56fb9f52f802238cdfb9a7b782de3d3c6b | 1,220 | py | Python | OpenCV-Computer-Vision-Examples-with-Python-A-Complete-Guide-for-Dummies-master/Source Code/opencv_operations/draw-circles.py | Payal197bhadra/ComputerVision | d66b5037ece99b6189dd4306b2c9be67cffd14af | [
"MIT"
] | 6 | 2019-06-30T09:08:03.000Z | 2021-10-11T17:51:16.000Z | OpenCV-Computer-Vision-Examples-with-Python-A-Complete-Guide-for-Dummies-master/Source Code/opencv_operations/draw-circles.py | Payal197bhadra/ComputerVision | d66b5037ece99b6189dd4306b2c9be67cffd14af | [
"MIT"
] | null | null | null | OpenCV-Computer-Vision-Examples-with-Python-A-Complete-Guide-for-Dummies-master/Source Code/opencv_operations/draw-circles.py | Payal197bhadra/ComputerVision | d66b5037ece99b6189dd4306b2c9be67cffd14af | [
"MIT"
] | 3 | 2020-01-01T17:41:10.000Z | 2021-04-22T22:21:56.000Z | import numpy as np
import cv2
#define a canvas of size 300x300 px, with 3 channels (R,G,B) and data type as 8 bit unsigned integer
canvas = np.zeros((300,300,3), dtype ="uint8")
#define color
#draw a circle
#arguments are canvas/image, midpoint, radius, color, thickness(optional)
#display in cv2 window
green = (0,255,0)
cv2.circle(canvas,(100,100), 10, green)
cv2.imshow("Single circle", canvas)
cv2.waitKey(0)
# draw concentric white circles
# calculate the center point of canvas
# generate circles using for loop
# clearning the canvas
canvas = np.zeros((300,300,3), dtype ="uint8")
white = (255,255,255)
(centerX, centerY) = (canvas.shape[1]//2, canvas.shape[0]//2)
for r in range(0,175,25):
cv2.circle(canvas, (centerX,centerY), r, white)
cv2.imshow("concentric circles", canvas)
cv2.waitKey(0)
# generate random radius, center point, color
# draw circles in for loop
canvas = np.zeros((300,300,3), dtype ="uint8")
for i in range(0, 25):
radius = np.random.randint(5, high = 200)
color = np.random.randint(0, high = 256, size = (3,)).tolist()
pt = np.random.randint(0, high = 300, size = (2,))
cv2.circle(canvas, tuple(pt), radius, color, -1)
cv2.imshow("Canvas", canvas)
cv2.waitKey(0) | 30.5 | 100 | 0.696721 |
5323c4b7d5f3632bee1bd22e2b1ceebf3d070d46 | 1,648 | py | Python | tmux_cssh/main.py | cscutcher/tmux_cssh | bfbb7eb26d5f5864c0888fa8e614122401ed4f5f | [
"Unlicense"
] | null | null | null | tmux_cssh/main.py | cscutcher/tmux_cssh | bfbb7eb26d5f5864c0888fa8e614122401ed4f5f | [
"Unlicense"
] | null | null | null | tmux_cssh/main.py | cscutcher/tmux_cssh | bfbb7eb26d5f5864c0888fa8e614122401ed4f5f | [
"Unlicense"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Main Script
"""
import logging
import argh
import sarge
import tmuxp
DEV_LOGGER = logging.getLogger(__name__)
def get_current_session(server=None):
'''
Seems to be no easy way to grab current attached session in tmuxp so
this provides a simple alternative.
'''
server = tmuxp.Server() if server is None else server
session_name = sarge.get_stdout('tmux display-message -p "#S"').strip()
session = server.findWhere({"session_name": session_name})
return session
| 26.580645 | 78 | 0.68932 |
53249bb622e69bc18a3497cdf35b1da555bf7b6c | 87 | py | Python | nautobot_capacity_metrics/management/commands/__init__.py | david-kn/nautobot-plugin-capacity-metrics | df2a635257a464b8340b788d8723b5f00e3e238f | [
"Apache-2.0"
] | 6 | 2021-02-24T22:17:11.000Z | 2022-01-25T18:36:29.000Z | nautobot_capacity_metrics/management/commands/__init__.py | david-kn/nautobot-plugin-capacity-metrics | df2a635257a464b8340b788d8723b5f00e3e238f | [
"Apache-2.0"
] | 12 | 2021-04-26T17:23:53.000Z | 2021-09-09T16:23:56.000Z | nautobot_capacity_metrics/management/commands/__init__.py | david-kn/nautobot-plugin-capacity-metrics | df2a635257a464b8340b788d8723b5f00e3e238f | [
"Apache-2.0"
] | 3 | 2021-06-28T12:44:45.000Z | 2021-08-02T18:00:22.000Z | """Additional Django management commands added by nautobot_capacity_metrics plugin."""
| 43.5 | 86 | 0.827586 |
5324fa73c034a05cd172d09f6d03e2153b7f495e | 35 | py | Python | nptweak/__init__.py | kmedian/nptweak | 222f46b8abb9b00f1ae8065d38d0514193aa8a4b | [
"MIT"
] | null | null | null | nptweak/__init__.py | kmedian/nptweak | 222f46b8abb9b00f1ae8065d38d0514193aa8a4b | [
"MIT"
] | 2 | 2019-12-03T12:37:17.000Z | 2019-12-03T12:37:45.000Z | nptweak/__init__.py | kmedian/nptweak | 222f46b8abb9b00f1ae8065d38d0514193aa8a4b | [
"MIT"
] | null | null | null | from .to_2darray import to_2darray
| 17.5 | 34 | 0.857143 |
5326222cf04cc16e106a9b078150b32472ee3fb7 | 1,520 | py | Python | resources/models/Image.py | sphildreth/roadie-python | 1465ac0f4282356ab5a074020b4f0a9f28058a86 | [
"MIT"
] | null | null | null | resources/models/Image.py | sphildreth/roadie-python | 1465ac0f4282356ab5a074020b4f0a9f28058a86 | [
"MIT"
] | null | null | null | resources/models/Image.py | sphildreth/roadie-python | 1465ac0f4282356ab5a074020b4f0a9f28058a86 | [
"MIT"
] | null | null | null | import io
from PIL import Image as PILImage
from sqlalchemy import Column, ForeignKey, LargeBinary, Index, Integer, String
from resources.models.ModelBase import Base
| 38 | 117 | 0.647368 |
5326a402c4dd86ad307f49de1d63c62b7a921bd6 | 10,288 | py | Python | arch2vec/search_methods/reinforce_darts.py | gabrielasuchopar/arch2vec | 1fc47d2cc7d63832e0d6337b8482669366b4aef2 | [
"Apache-2.0"
] | null | null | null | arch2vec/search_methods/reinforce_darts.py | gabrielasuchopar/arch2vec | 1fc47d2cc7d63832e0d6337b8482669366b4aef2 | [
"Apache-2.0"
] | null | null | null | arch2vec/search_methods/reinforce_darts.py | gabrielasuchopar/arch2vec | 1fc47d2cc7d63832e0d6337b8482669366b4aef2 | [
"Apache-2.0"
] | null | null | null | import os
import sys
import argparse
import json
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from arch2vec.models.pretraining_nasbench101 import configs
from arch2vec.utils import load_json, preprocessing, one_hot_darts
from arch2vec.preprocessing.gen_isomorphism_graphs import process
from arch2vec.models.model import Model
from torch.distributions import MultivariateNormal
from arch2vec.darts.cnn.train_search import Train
def select_action(state, policy):
"""
MVN based action selection.
:param state: 1 x dim
:param policy: policy network
:return: selected action: 1 x dim
"""
mean = policy(state.view(1, state.shape[0]))
mvn = MultivariateNormal(mean, torch.eye(state.shape[0]).cuda())
action = mvn.sample()
policy.saved_log_probs.append(torch.mean(mvn.log_prob(action)))
return action
def finish_episode(policy, optimizer):
R = 0
policy_loss = []
returns = []
for r in policy.rewards:
R = r + args.gamma * R
returns.append(R)
returns = torch.Tensor(policy.rewards)
val, indices = torch.sort(returns)
print("sorted validation reward:", val)
returns = returns - args.objective
for log_prob, R in zip(policy.saved_log_probs, returns):
policy_loss.append(-log_prob * R)
optimizer.zero_grad()
policy_loss = torch.mean(torch.stack(policy_loss, dim=0))
print("average reward: {}, policy loss: {}".format(sum(policy.rewards)/len(policy.rewards), policy_loss.item()))
policy_loss.backward()
optimizer.step()
del policy.rewards[:]
del policy.saved_log_probs[:]
policy.hx = None
policy.cx = None
def query(counter, seed, genotype, epochs):
trainer = Train()
rewards, rewards_test = trainer.main(counter, seed, genotype, epochs=epochs, train_portion=args.train_portion, save=args.logging_path)
val_sum = 0
for epoch, val_acc in rewards:
val_sum += val_acc
val_avg = val_sum / len(rewards)
return val_avg / 100. , rewards_test[-1][-1] / 100.
def reinforce_search(env):
""" implementation of arch2vec-RL on DARTS Search Space """
policy = Policy_LSTM(args.dim, 128).cuda()
optimizer = optim.Adam(policy.parameters(), lr=1e-2)
counter = 0
MAX_BUDGET = args.max_budgets
state, genotype = env.get_init_state()
CURR_BEST_VALID = 0
CURR_BEST_TEST = 0
CURR_BEST_GENOTYPE = None
test_trace = []
valid_trace = []
genotype_trace = []
counter_trace = []
while counter < MAX_BUDGET:
for c in range(args.bs):
state = state.cuda()
action = select_action(state, policy)
state, genotype = env.step(action)
reward, reward_test = query(counter=counter, seed=args.seed, genotype=genotype, epochs=args.inner_epochs)
policy.rewards.append(reward)
counter += 1
print('counter: {}, validation reward: {}, test reward: {}, genotype: {}'.format(counter, reward, reward_test, genotype))
if reward > CURR_BEST_VALID:
CURR_BEST_VALID = reward
CURR_BEST_TEST = reward_test
CURR_BEST_GENOTYPE = genotype
valid_trace.append(float(CURR_BEST_VALID))
test_trace.append(float(CURR_BEST_TEST))
genotype_trace.append(CURR_BEST_GENOTYPE)
counter_trace.append(counter)
if counter >= MAX_BUDGET:
break
finish_episode(policy, optimizer)
res = dict()
res['validation_acc'] = valid_trace
res['test_acc'] = test_trace
res['genotype'] = genotype_trace
res['counter'] = counter_trace
save_path = os.path.join(args.output_path, 'dim{}'.format(args.dim))
if not os.path.exists(save_path):
os.mkdir(save_path)
print('save to {}'.format(save_path))
fh = open(os.path.join(save_path, 'run_{}_arch2vec_model_darts.json'.format(args.seed)), 'w')
json.dump(res, fh)
fh.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="arch2vec-REINFORCE")
parser.add_argument("--gamma", type=float, default=0.8, help="discount factor (default 0.99)")
parser.add_argument("--seed", type=int, default=3, help="random seed")
parser.add_argument('--cfg', type=int, default=4, help='configuration (default: 4)')
parser.add_argument('--bs', type=int, default=16, help='batch size')
parser.add_argument('--objective', type=float, default=0.95, help='rl baseline')
parser.add_argument('--max_budgets', type=int, default=100, help='number of queries')
parser.add_argument('--inner_epochs', type=int, default=50, help='inner loop epochs')
parser.add_argument('--train_portion', type=float, default=0.9, help='train/validation split portion')
parser.add_argument('--output_path', type=str, default='rl', help='rl/bo (default: rl)')
parser.add_argument('--logging_path', type=str, default='', help='search logging path')
parser.add_argument('--saved_arch2vec', action="store_true", default=False)
parser.add_argument('--input_dim', type=int, default=11)
parser.add_argument('--hidden_dim', type=int, default=128)
parser.add_argument('--dim', type=int, default=16,
help='feature dimension (default: 16)')
parser.add_argument('--hops', type=int, default=5)
parser.add_argument('--mlps', type=int, default=2)
parser.add_argument('--dropout', type=float, default=0.3)
args = parser.parse_args()
cfg = configs[args.cfg]
env = Env('REINFORCE', args.seed, cfg, data_path='data/data_darts_counter600000.json', save=args.saved_arch2vec)
torch.manual_seed(args.seed)
reinforce_search(env)
| 39.722008 | 138 | 0.629374 |
53285c4fc141853fde6ba747fb42c02369b8ef62 | 2,326 | py | Python | setup.py | mentaal/r_map | 42986e90b31018b1e7fc992a53b0f5f6e559253f | [
"MIT"
] | null | null | null | setup.py | mentaal/r_map | 42986e90b31018b1e7fc992a53b0f5f6e559253f | [
"MIT"
] | null | null | null | setup.py | mentaal/r_map | 42986e90b31018b1e7fc992a53b0f5f6e559253f | [
"MIT"
] | null | null | null | """A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
# Arguments marked as "Required" below must be included for upload to PyPI.
# Fields marked as "Optional" may be commented out.
setup(
name='r_map', # Required
version='0.9.0', # Required
description='A data structure for working with register map information', # Required
long_description=long_description, # Optional
long_description_content_type='text/markdown', # Optional (see note above)
url='https://github.com/mentaal/r_map', # Optional
# This should be your name or the name of the organization which owns the
# project.
author='Gregory Kuhn', # Optional
# This should be a valid email address corresponding to the author listed
# above.
author_email='gregorykuhn@gmail.com', # Optional
# Classifiers help users find your project by categorizing it.
#
# For a list of valid classifiers, see https://pypi.org/classifiers/
classifiers=[ # Optional
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
# Pick your license as you wish
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3.6',
],
keywords='register bitfield registermap', # Optional
packages=['r_map'],
python_requires='>=3.6',
project_urls={ # Optional
'Bug Reports': 'https://github.com/mentaal/r_map/issues',
'Source': 'https://github.com/mentaal/r_map',
},
)
| 34.205882 | 89 | 0.676698 |
53288d2b29e82fc8c4f0e83a7806673cbfd64265 | 538 | py | Python | dont_worry.py | karianjahi/fahrer_minijob | 020a9de27b77f8e0bcdec198a37cfb7f1d4736ed | [
"MIT"
] | null | null | null | dont_worry.py | karianjahi/fahrer_minijob | 020a9de27b77f8e0bcdec198a37cfb7f1d4736ed | [
"MIT"
] | null | null | null | dont_worry.py | karianjahi/fahrer_minijob | 020a9de27b77f8e0bcdec198a37cfb7f1d4736ed | [
"MIT"
] | null | null | null |
if __name__ == "__main__":
#print(Hey().get_name())
teen = Teenager("Joseph Njeri", 924, "www.fowr.gd")
print(teen.website) | 26.9 | 55 | 0.615242 |
532b482d23a5934d3f01f1f60135af259bfe9eb5 | 449 | py | Python | tests/zoo/tree.py | dynalz/odmantic | f20f08f8ab1768534c1e743f7539bfe4f8c73bdd | [
"0BSD"
] | 486 | 2020-10-19T05:33:53.000Z | 2022-03-30T12:54:57.000Z | tests/zoo/tree.py | dynalz/odmantic | f20f08f8ab1768534c1e743f7539bfe4f8c73bdd | [
"0BSD"
] | 183 | 2020-10-19T18:15:25.000Z | 2022-03-31T04:59:21.000Z | tests/zoo/tree.py | dynalz/odmantic | f20f08f8ab1768534c1e743f7539bfe4f8c73bdd | [
"0BSD"
] | 53 | 2020-10-19T09:35:01.000Z | 2022-03-31T20:39:51.000Z | import enum
from typing import Dict, List
from odmantic.field import Field
from odmantic.model import Model
| 22.45 | 71 | 0.721604 |
532c5feeb5220f24428fb820adb0794dc7c4ef05 | 115,103 | py | Python | adanet/core/estimator_test.py | eustomaqua/adanet | 9c1de82428a4e661768af8e764041afebfec2e6f | [
"Apache-2.0"
] | null | null | null | adanet/core/estimator_test.py | eustomaqua/adanet | 9c1de82428a4e661768af8e764041afebfec2e6f | [
"Apache-2.0"
] | null | null | null | adanet/core/estimator_test.py | eustomaqua/adanet | 9c1de82428a4e661768af8e764041afebfec2e6f | [
"Apache-2.0"
] | null | null | null | """Test AdaNet estimator single graph implementation.
Copyright 2018 The AdaNet Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import time
from absl import logging
from absl.testing import parameterized
from adanet import replay
from adanet import tf_compat
from adanet.core import testing_utils as tu
from adanet.core.estimator import Estimator
from adanet.core.evaluator import Evaluator
from adanet.core.report_materializer import ReportMaterializer
from adanet.distributed.placement import RoundRobinStrategy
from adanet.ensemble import AllStrategy
from adanet.ensemble import ComplexityRegularizedEnsembler
from adanet.ensemble import GrowStrategy
from adanet.ensemble import MixtureWeightType
from adanet.ensemble import SoloStrategy
from adanet.subnetwork import Builder
from adanet.subnetwork import Generator
from adanet.subnetwork import MaterializedReport
from adanet.subnetwork import Report
from adanet.subnetwork import SimpleGenerator
from adanet.subnetwork import Subnetwork
from adanet.subnetwork import TrainOpSpec
import numpy as np
import tensorflow as tf
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.eager import context
from tensorflow.python.framework import test_util
from tensorflow.python.tools import saved_model_utils
# pylint: enable=g-direct-tensorflow-import
from tensorflow_estimator.python.estimator.canned.head import _binary_logistic_head_with_sigmoid_cross_entropy_loss as binary_class_head_v1
from tensorflow_estimator.python.estimator.export import export
from tensorflow_estimator.python.estimator.head import binary_class_head
from tensorflow_estimator.python.estimator.head import multi_head as multi_head_lib
from tensorflow_estimator.python.estimator.head import regression_head
logging.set_verbosity(logging.INFO)
XOR_FEATURES = [[1., 0.], [0., 0], [0., 1.], [1., 1.]]
XOR_LABELS = [[1.], [0.], [1.], [0.]]
class _AlwaysLastEvaluator(_FakeEvaluator):
def evaluate(self, sess, ensemble_metrics):
"""Always makes the last loss the smallest."""
del sess # Unused.
losses = [np.inf] * len(ensemble_metrics)
losses[-1] = 0.
return losses
class _AlwaysSecondToLastEvaluator(_FakeEvaluator):
def evaluate(self, sess, ensemble_metrics):
"""Always makes the second to last loss the smallest."""
del sess # Unused.
losses = [np.inf] * len(ensemble_metrics)
losses[-2] = 0.
return losses
class _EarlyStoppingHook(tf_compat.SessionRunHook):
"""Hook that immediately requests training to stop."""
class EstimatorTest(tu.AdanetTestCase):
def test_binary_head_asserts_are_disabled(self):
"""Tests b/140267630."""
subnetwork_generator = SimpleGenerator([
_DNNBuilder("dnn"),
_NanLossBuilder(),
])
estimator = Estimator(
head=binary_class_head_v1(),
subnetwork_generator=subnetwork_generator,
max_iteration_steps=10,
model_dir=self.test_subdirectory)
eval_input_fn = tu.dummy_input_fn([[1., 0.]], [[1.]])
estimator.evaluate(input_fn=eval_input_fn, steps=1)
class KerasCNNBuilder(Builder):
"""Builds a CNN subnetwork for AdaNet."""
def __init__(self, learning_rate, seed=42):
"""Initializes a `SimpleCNNBuilder`.
Args:
learning_rate: The float learning rate to use.
seed: The random seed.
Returns:
An instance of `SimpleCNNBuilder`.
"""
self._learning_rate = learning_rate
self._seed = seed
def build_subnetwork(self,
features,
logits_dimension,
training,
iteration_step,
summary,
previous_ensemble=None):
"""See `adanet.subnetwork.Builder`."""
seed = self._seed
if previous_ensemble:
seed += len(previous_ensemble.weighted_subnetworks)
images = list(features.values())[0]
images = tf.reshape(images, [-1, 2, 2, 1])
kernel_initializer = tf_compat.v1.keras.initializers.he_normal(seed=seed)
x = tf.keras.layers.Conv2D(
filters=3,
kernel_size=1,
padding="same",
activation="relu",
kernel_initializer=kernel_initializer)(
images)
x = tf.keras.layers.MaxPool2D(pool_size=2, strides=1)(x)
x = tf.keras.layers.Flatten()(x)
x = tf.keras.layers.Dense(
units=3, activation="relu", kernel_initializer=kernel_initializer)(
x)
logits = tf_compat.v1.layers.Dense(
units=1, activation=None, kernel_initializer=kernel_initializer)(
x)
complexity = tf.constant(1)
return Subnetwork(
last_layer=x,
logits=logits,
complexity=complexity,
persisted_tensors={})
def _check_eventfile_for_keyword(keyword, dir_):
"""Checks event files for the keyword."""
tf_compat.v1.summary.FileWriterCache.clear()
if not tf.io.gfile.exists(dir_):
raise ValueError("Directory '{}' not found.".format(dir_))
# Get last `Event` written.
filenames = os.path.join(dir_, "events*")
event_paths = tf.io.gfile.glob(filenames)
if not event_paths:
raise ValueError("Path '{}' not found.".format(filenames))
for last_event in tf_compat.v1.train.summary_iterator(event_paths[-1]):
if last_event.summary is not None:
for value in last_event.summary.value:
if keyword == value.tag:
if value.HasField("simple_value"):
return value.simple_value
if value.HasField("image"):
return (value.image.height, value.image.width,
value.image.colorspace)
if value.HasField("tensor"):
return value.tensor.string_val
raise ValueError("Keyword '{}' not found in path '{}'.".format(
keyword, filenames))
def _mean_keras_metric(value):
"""Returns the mean of given value as a Keras metric."""
mean = tf.keras.metrics.Mean()
mean.update_state(value)
return mean
def _dummy_feature_dict_input_fn(features, labels):
"""Returns an input_fn that returns feature and labels `Tensors`."""
return _input_fn
if __name__ == "__main__":
tf.test.main()
| 34.83747 | 139 | 0.57326 |
532c9a004feae83c4d1b5c9bdf050c58af603c9f | 1,473 | py | Python | ua_roomseeker/uploader.py | nyg1/classroom-finder | 13b6332187c2afb9833a1acd82bdf31ab81af5c8 | [
"MIT"
] | 1 | 2020-08-29T22:04:17.000Z | 2020-08-29T22:04:17.000Z | ua_roomseeker/uploader.py | nyg1/classroom-finder | 13b6332187c2afb9833a1acd82bdf31ab81af5c8 | [
"MIT"
] | 1 | 2020-02-17T05:18:36.000Z | 2020-02-17T05:18:36.000Z | ua_roomseeker/uploader.py | nyg1/UAroomseeker | 13b6332187c2afb9833a1acd82bdf31ab81af5c8 | [
"MIT"
] | 2 | 2020-08-29T22:04:22.000Z | 2020-09-07T18:01:46.000Z | from seeker.models import Building, Classroom, Time
import json
import os
os.chdir('../data')
fileList = os.listdir()
#loops through each json file
for jsonfile in fileList:
#opens the jsonfile and loads the data
f = open(jsonfile, 'r')
data = f.read()
jsondata = json.loads(data)
#create the building
building = Building(BuildingName=os.path.splitext(jsonfile)[0])
building.save()
for day in jsondata:
for room in jsondata[day].keys():
#creates each classroom, adding one only if one doesn't exist
classroom = Classroom.objects.get_or_create(building = Building.objects.get(BuildingName = os.path.splitext(jsonfile)[0]), ClassroomName = os.path.splitext(jsonfile)[0] + ' - ' + room)
for time in jsondata[day][room]:
#creates each time
time = Time(building=Building.objects.get(BuildingName = os.path.splitext(jsonfile)[0]), classroom=Classroom.objects.get(ClassroomName = os.path.splitext(jsonfile)[0] + ' - ' + room), DayofWeek=day, TimeValue=time)
time.save()
#IMPORTANT!!!!!!!
# This program must be run inside a python manage.py shell for it to work, in the future a fix may be found,
# but for the time being, follow these steps:
# 1. open powershell and navigate to the folder that contains this file
# 2. type in "python manage.py shell"
# 3. copy and paste the code into the shell and press enter
# 4. wait time is around 5 minutes
| 43.323529 | 230 | 0.681602 |
532d64eb017b0350df305fd05c57bebebc901080 | 6,729 | py | Python | dash_daq/Slider.py | luiztauffer/dash-daq | 4975093449bdc4d7ff4cd366ac82a847cdf24c34 | [
"MIT"
] | null | null | null | dash_daq/Slider.py | luiztauffer/dash-daq | 4975093449bdc4d7ff4cd366ac82a847cdf24c34 | [
"MIT"
] | null | null | null | dash_daq/Slider.py | luiztauffer/dash-daq | 4975093449bdc4d7ff4cd366ac82a847cdf24c34 | [
"MIT"
] | null | null | null | # AUTO GENERATED FILE - DO NOT EDIT
from dash.development.base_component import Component, _explicitize_args
| 38.895954 | 651 | 0.693862 |
532e406ed64b6d506b797cd81b981fdce20419b3 | 353 | py | Python | src/util/__init__.py | ooshyun/filterdesign | 59dbea191b8cd44aa9f2d02d3787b5805d486ae2 | [
"MIT"
] | 1 | 2021-12-27T00:38:32.000Z | 2021-12-27T00:38:32.000Z | src/util/__init__.py | ooshyun/FilterDesign | 7162ccad8e1ae8aebca370da56be56603b9e8b24 | [
"MIT"
] | null | null | null | src/util/__init__.py | ooshyun/FilterDesign | 7162ccad8e1ae8aebca370da56be56603b9e8b24 | [
"MIT"
] | null | null | null | """Utility function for process to raw data
"""
from .util import (
cvt_pcm2wav,
cvt_float2fixed,
cvt_char2num,
plot_frequency_response,
plot_pole_zero_analysis,
)
from .fi import fi
__all__ = [
"fi",
"cvt_pcm2wav",
"cvt_float2fixed",
"cvt_char2num",
"plot_frequency_response",
"plot_pole_zero_analysis",
]
| 16.809524 | 43 | 0.677054 |
532f2625149c93f751d40a68a85af35c606a9f80 | 1,192 | py | Python | infra/apps/catalog/tests/views/distribution_upload_tests.py | datosgobar/infra.datos.gob.ar | 9f6ae7f0fc741aad79d074e7b2eb2a7dddf8b2cf | [
"MIT"
] | 1 | 2019-07-01T19:38:52.000Z | 2019-07-01T19:38:52.000Z | infra/apps/catalog/tests/views/distribution_upload_tests.py | datosgobar/infra.datos.gob.ar | 9f6ae7f0fc741aad79d074e7b2eb2a7dddf8b2cf | [
"MIT"
] | 77 | 2019-05-27T18:16:30.000Z | 2021-09-20T21:25:24.000Z | infra/apps/catalog/tests/views/distribution_upload_tests.py | datosgobar/infra.datos.gob.ar | 9f6ae7f0fc741aad79d074e7b2eb2a7dddf8b2cf | [
"MIT"
] | 3 | 2019-12-09T16:38:18.000Z | 2020-10-30T02:10:20.000Z | import pytest
from django.core.files import File
from django.urls import reverse
from freezegun import freeze_time
from infra.apps.catalog.tests.helpers.open_catalog import open_catalog
pytestmark = pytest.mark.django_db
| 34.057143 | 78 | 0.72651 |
5330d3c71a4dca71ef0aca045f8b4a15a601bd18 | 3,494 | py | Python | examples/model_zoo/build_binaries.py | Embracing/unrealcv | 19305da8554c3a0e683a5e27a1e487cc2cf42776 | [
"MIT"
] | 1,617 | 2016-09-10T04:41:33.000Z | 2022-03-31T20:03:28.000Z | examples/model_zoo/build_binaries.py | Embracing/unrealcv | 19305da8554c3a0e683a5e27a1e487cc2cf42776 | [
"MIT"
] | 199 | 2016-09-13T09:40:59.000Z | 2022-03-16T02:37:23.000Z | examples/model_zoo/build_binaries.py | Embracing/unrealcv | 19305da8554c3a0e683a5e27a1e487cc2cf42776 | [
"MIT"
] | 431 | 2016-09-10T03:20:35.000Z | 2022-03-19T13:44:21.000Z | import subprocess, os
ue4_win = r"C:\Program Files\Epic Games\UE_4.16"
ue4_linux = "/home/qiuwch/workspace/UE416"
ue4_mac = '/Users/Shared/Epic Games/UE_4.16'
win_uprojects = [
r'C:\qiuwch\workspace\uprojects\UE4RealisticRendering\RealisticRendering.uproject',
r'C:\qiuwch\workspace\uprojects\UE4ArchinteriorsVol2Scene1\ArchinteriorsVol2Scene1.uproject',
r'C:\qiuwch\workspace\uprojects\UE4ArchinteriorsVol2Scene2\ArchinteriorsVol2Scene2.uproject',
r'C:\qiuwch\workspace\uprojects\UE4ArchinteriorsVol2Scene3\ArchinteriorsVol2Scene3.uproject',
r'C:\qiuwch\workspace\uprojects\UE4UrbanCity\UrbanCity.uproject',
r'D:\workspace\uprojects\Matinee\Matinee.uproject',
r'D:\workspace\uprojects\PhotorealisticCharacter\PhotorealisticCharacter2.uproject',
]
linux_uprojects = [
os.path.expanduser('~/workspace/uprojects/UE4RealisticRendering/RealisticRendering.uproject'),
os.path.expanduser('~/workspace/uprojects/UE4ArchinteriorsVol2Scene1/ArchinteriorsVol2Scene1.uproject'),
os.path.expanduser('~/workspace/uprojects/UE4ArchinteriorsVol2Scene2/ArchinteriorsVol2Scene2.uproject'),
os.path.expanduser('~/workspace/uprojects/UE4ArchinteriorsVol2Scene3/ArchinteriorsVol2Scene3.uproject'),
os.path.expanduser("~/workspace/uprojects/UE4UrbanCity/UrbanCity.uproject"),
]
mac_uprojects = [
os.path.expanduser('~/workspace/UnrealEngine/Templates/FP_FirstPerson/FP_FirstPerson.uproject'),
os.path.expanduser('~/uprojects/RealisticRendering/RealisticRendering.uproject'),
os.path.expanduser('~/uprojects/UE4ArchinteriorsVol2Scene1/ArchinteriorsVol2Scene1.uproject'),
os.path.expanduser('~/uprojects/UE4ArchinteriorsVol2Scene2/ArchinteriorsVol2Scene2.uproject'),
os.path.expanduser('~/uprojects/UE4ArchinteriorsVol2Scene3/ArchinteriorsVol2Scene3.uproject'),
os.path.expanduser('~/uprojects/UE4UrbanCity/UrbanCity.uproject'),
]
uprojects = []
for uproject_path in win_uprojects:
uproject_name = os.path.basename(uproject_path).split('.')[0]
uprojects.append(
dict(
uproject_path = uproject_path,
ue4_path = ue4_win,
log_file = 'log/win_%s.log' % uproject_name
),
)
for uproject_path in linux_uprojects:
uproject_name = os.path.basename(uproject_path).split('.')[0]
uprojects.append(
dict(
uproject_path = uproject_path,
ue4_path = ue4_linux,
log_file = 'log/linux_%s.log' % uproject_name
),
)
for uproject_path in mac_uprojects:
uproject_name = os.path.basename(uproject_path).split('.')[0]
uprojects.append(
dict(
uproject_path = uproject_path,
ue4_path = ue4_mac,
log_file = 'log/mac_%s.log' % uproject_name
),
)
if __name__ == '__main__':
for uproject in uprojects:
uproject_path = uproject['uproject_path']
if not os.path.isfile(uproject_path):
print("Can not find uproject file %s, skip this project" % uproject_path)
continue
cmd = [
'python', 'build.py',
'--UE4', uproject['ue4_path'],
# '--output', uproject['output_folder'],
uproject['uproject_path']
]
print(cmd)
subprocess.call(cmd,
stdout = open(uproject['log_file'], 'w'))
with open(uproject['log_file']) as f:
lines = f.readlines()
print(''.join(lines[-10:])) # Print the last few lines
| 40.627907 | 108 | 0.694333 |
53315defe5a40f6e5f9bc740259ebb1dfe1b3225 | 3,515 | py | Python | __init__.py | NeonJarbas/skill-ddg | 48476ad650e72f68ee7e96dd92c6d18f841ce6ec | [
"Apache-2.0"
] | null | null | null | __init__.py | NeonJarbas/skill-ddg | 48476ad650e72f68ee7e96dd92c6d18f841ce6ec | [
"Apache-2.0"
] | null | null | null | __init__.py | NeonJarbas/skill-ddg | 48476ad650e72f68ee7e96dd92c6d18f841ce6ec | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from ovos_utils.gui import can_use_gui
from adapt.intent import IntentBuilder
from mycroft.skills.common_query_skill import CommonQuerySkill, CQSMatchLevel
from mycroft.skills.core import intent_handler
from neon_solver_ddg_plugin import DDGSolver
def CQS_action(self, phrase, data):
""" If selected show gui """
self.display_ddg(data["answer"], data["image"])
# duck duck go api
def create_skill():
return DuckDuckGoSkill()
| 35.867347 | 82 | 0.62276 |
533191e519d8de6668af0108951a5877a4213bac | 6,639 | py | Python | openstack_lease_it/openstack_lease_it/settings.py | LAL/openstack-lease-it | 4ff983911825eac886fa6f76d6efc25225a698b7 | [
"Apache-2.0"
] | null | null | null | openstack_lease_it/openstack_lease_it/settings.py | LAL/openstack-lease-it | 4ff983911825eac886fa6f76d6efc25225a698b7 | [
"Apache-2.0"
] | 11 | 2017-04-13T16:48:16.000Z | 2017-11-22T08:13:39.000Z | openstack_lease_it/openstack_lease_it/settings.py | LAL/openstack-lease-it | 4ff983911825eac886fa6f76d6efc25225a698b7 | [
"Apache-2.0"
] | 3 | 2017-04-06T09:08:40.000Z | 2021-05-25T08:15:00.000Z | """
Django settings for openstack_lease_it project.
Generated by 'django-admin startproject' using Django 1.8.7.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import ast
import logging
from openstack_lease_it.config import GLOBAL_CONFIG, load_config
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Load configuration
load_config()
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = GLOBAL_CONFIG['DJANGO_SECRET_KEY']
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = ast.literal_eval(GLOBAL_CONFIG['DJANGO_DEBUG'])
# ALLOWED_HOSTS secure django app access
ALLOWED_HOSTS = []
# A email as format must match this regular expression
# If you not understand, please
EMAIL_REGEXP = r"^[A-Za-z0-9\.\+_-]+@[A-Za-z0-9\.-]+\.[A-Za-z]*$"
# Application definition
INSTALLED_APPS = (
'openstack_auth',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'openstack_lease_it',
'lease_it',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'openstack_lease_it.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'openstack_lease_it.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en'
TIME_ZONE = 'Europe/Paris'
USE_I18N = True
USE_L10N = True
USE_TZ = True
DEFAULT_CHARSET = 'utf-8'
# We use memcached as cache backend
SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': '{MEMCACHED_HOST}:{MEMCACHED_PORT}'.format(**GLOBAL_CONFIG),
}
}
SESSION_COOKIE_SECURE = False
SESSION_TIMEOUT = 1800
# A token can be near the end of validity when a page starts loading, and
# invalid during the rendering which can cause errors when a page load.
# TOKEN_TIMEOUT_MARGIN defines a time in seconds we retrieve from token
# validity to avoid this issue. You can adjust this time depending on the
# performance of the infrastructure.
TOKEN_TIMEOUT_MARGIN = 100
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
LOGIN_URL = 'login'
LOGOUT_URL = 'logout'
LOGIN_REDIRECT_URL = '/'
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.PickleSerializer'
if GLOBAL_CONFIG['BACKEND_PLUGIN'] == 'Openstack':
# UserId on django-openstack_auth need specific User model
AUTH_USER_MODEL = 'openstack_auth.User'
# Define keystone URL for authentification
OPENSTACK_KEYSTONE_URL = GLOBAL_CONFIG['OS_AUTH_URL']
# We use keystone v3 API
OPENSTACK_API_VERSIONS = {
"identity": GLOBAL_CONFIG['OS_IDENTITY_API_VERSION'],
}
# We use multidomain
OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True
# We load Openstack_auth backend
AUTHENTICATION_BACKENDS = (
'openstack_auth.backend.KeystoneBackend',
'django.contrib.auth.backends.ModelBackend',
)
else:
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
)
# Configure logging
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'simple': {
'format': '%(levelname)s %(asctime)s: %(message)s'
},
},
'handlers': {
'django': {
'level': GLOBAL_CONFIG['DJANGO_LOGLEVEL'],
'class': 'logging.FileHandler',
'filename': os.path.join(GLOBAL_CONFIG['DJANGO_LOGDIR'], 'django.log'),
'formatter': 'simple'
},
'main': {
'level': GLOBAL_CONFIG['DJANGO_LOGLEVEL'],
'class': 'logging.FileHandler',
'filename': os.path.join(GLOBAL_CONFIG['DJANGO_LOGDIR'], 'main.log'),
'formatter': 'simple'
},
'notification': {
'level': GLOBAL_CONFIG['DJANGO_LOGLEVEL'],
'class': 'logging.FileHandler',
'filename': os.path.join(GLOBAL_CONFIG['DJANGO_LOGDIR'], 'notification.log'),
'formatter': 'simple'
},
'instances': {
'level': GLOBAL_CONFIG['DJANGO_LOGLEVEL'],
'class': 'logging.FileHandler',
'filename': os.path.join(GLOBAL_CONFIG['DJANGO_LOGDIR'], 'instances.log'),
'formatter': 'simple'
},
},
'loggers': {
'django': {
'handlers': ['django'],
'level': GLOBAL_CONFIG['DJANGO_LOGLEVEL'],
'propagate': True,
},
'main': {
'handlers': ['main'],
'level': GLOBAL_CONFIG['DJANGO_LOGLEVEL'],
'propagate': True,
},
'notification': {
'handlers': ['notification'],
'level': GLOBAL_CONFIG['DJANGO_LOGLEVEL'],
'propagate': True,
},
'instances': {
'handlers': ['instances'],
'level': GLOBAL_CONFIG['DJANGO_LOGLEVEL'],
'propagate': True,
},
},
}
LOGGER = logging.getLogger('main')
LOGGER_NOTIFICATION = logging.getLogger('notification')
LOGGER_INSTANCES = logging.getLogger('instances')
| 31.023364 | 89 | 0.658081 |
533284cf96b1c69f9f29a622772bb5c570e08d44 | 3,619 | py | Python | rigl/experimental/jax/pruning/pruning.py | vishalbelsare/rigl | f18abc7d82ae3acc6736068408a0186c9efa575c | [
"Apache-2.0"
] | 276 | 2019-11-25T22:05:45.000Z | 2022-03-30T11:55:34.000Z | rigl/experimental/jax/pruning/pruning.py | vishalbelsare/rigl | f18abc7d82ae3acc6736068408a0186c9efa575c | [
"Apache-2.0"
] | 10 | 2020-02-26T14:53:50.000Z | 2021-09-08T16:27:28.000Z | rigl/experimental/jax/pruning/pruning.py | vishalbelsare/rigl | f18abc7d82ae3acc6736068408a0186c9efa575c | [
"Apache-2.0"
] | 54 | 2019-11-26T18:50:33.000Z | 2022-03-29T20:08:08.000Z | # coding=utf-8
# Copyright 2021 RigL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Functions for pruning FLAX masked models."""
import collections
from typing import Any, Callable, Mapping, Optional, Union
import flax
import jax.numpy as jnp
from rigl.experimental.jax.pruning import masked
def weight_magnitude(weights):
"""Creates weight magnitude-based saliencies, given a weight matrix."""
return jnp.absolute(weights)
def prune(
model,
pruning_rate,
saliency_fn = weight_magnitude,
mask = None,
compare_fn = jnp.greater):
"""Returns a mask for a model where the params in each layer are pruned using a saliency function.
Args:
model: The model to create a pruning mask for.
pruning_rate: The fraction of lowest magnitude saliency weights that are
pruned. If a float, the same rate is used for all layers, otherwise if it
is a mapping, it must contain a rate for all masked layers in the model.
saliency_fn: A function that returns a float number used to rank
the importance of individual weights in the layer.
mask: If the model has an existing mask, the mask will be applied before
pruning the model.
compare_fn: A pairwise operator to compare saliency with threshold, and
return True if the saliency indicates the value should not be masked.
Returns:
A pruned mask for the given model.
"""
if not mask:
mask = masked.simple_mask(model, jnp.ones, masked.WEIGHT_PARAM_NAMES)
if not isinstance(pruning_rate, collections.Mapping):
pruning_rate_dict = {}
for param_name, _ in masked.iterate_mask(mask):
# Get the layer name from the parameter's full name/path.
layer_name = param_name.split('/')[-2]
pruning_rate_dict[layer_name] = pruning_rate
pruning_rate = pruning_rate_dict
for param_path, param_mask in masked.iterate_mask(mask):
split_param_path = param_path.split('/')
layer_name = split_param_path[-2]
param_name = split_param_path[-1]
# If we don't have a pruning rate for the given layer, don't mask it.
if layer_name in pruning_rate and mask[layer_name][param_name] is not None:
param_value = model.params[layer_name][
masked.MaskedModule.UNMASKED][param_name]
# Here any existing mask is first applied to weight matrix.
# Note: need to check explicitly is not None for np array.
if param_mask is not None:
saliencies = saliency_fn(param_mask * param_value)
else:
saliencies = saliency_fn(param_value)
# TODO: Use partition here (partial sort) instead of sort,
# since it's O(N), not O(N log N), however JAX doesn't support it.
sorted_param = jnp.sort(jnp.abs(saliencies.flatten()))
# Figure out the weight magnitude threshold.
threshold_index = jnp.round(pruning_rate[layer_name] *
sorted_param.size).astype(jnp.int32)
threshold = sorted_param[threshold_index]
mask[layer_name][param_name] = jnp.array(
compare_fn(saliencies, threshold), dtype=jnp.int32)
return mask
| 37.697917 | 100 | 0.716773 |
5333f0221e4e3679432478592dc87f3e97d8ca99 | 3,643 | py | Python | venv/Lib/site-packages/dash_bootstrap_components/_components/CardLink.py | hanzzhu/chadle | ac1d63b0410bb43f3fab362bb00abfc2e8790b9d | [
"Apache-2.0"
] | null | null | null | venv/Lib/site-packages/dash_bootstrap_components/_components/CardLink.py | hanzzhu/chadle | ac1d63b0410bb43f3fab362bb00abfc2e8790b9d | [
"Apache-2.0"
] | null | null | null | venv/Lib/site-packages/dash_bootstrap_components/_components/CardLink.py | hanzzhu/chadle | ac1d63b0410bb43f3fab362bb00abfc2e8790b9d | [
"Apache-2.0"
] | null | null | null | # AUTO GENERATED FILE - DO NOT EDIT
from dash.development.base_component import Component, _explicitize_args
| 42.360465 | 349 | 0.705188 |
53341c91d6109f552f8886886b2f526f32484d2e | 731 | py | Python | plugins/hanlp_demo/hanlp_demo/zh/tf/train/train_ctb9_pos_electra.py | antfootAlex/HanLP | e8044b27ae1de54b9070db08549853d3ca8271e2 | [
"Apache-2.0"
] | 3 | 2022-03-07T08:33:16.000Z | 2022-03-07T08:38:08.000Z | plugins/hanlp_demo/hanlp_demo/zh/tf/train/train_ctb9_pos_electra.py | hushaoyun/HanLP | 967b52404c9d0adbc0cff2699690c127ecfca36e | [
"Apache-2.0"
] | null | null | null | plugins/hanlp_demo/hanlp_demo/zh/tf/train/train_ctb9_pos_electra.py | hushaoyun/HanLP | 967b52404c9d0adbc0cff2699690c127ecfca36e | [
"Apache-2.0"
] | null | null | null | # -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2019-12-28 23:15
from hanlp.components.taggers.transformers.transformer_tagger_tf import TransformerTaggerTF
from tests import cdroot
cdroot()
tagger = TransformerTaggerTF()
save_dir = 'data/model/pos/ctb9_electra_small_zh_epoch_20'
tagger.fit('data/pos/ctb9/train.tsv',
'data/pos/ctb9/test.tsv',
save_dir,
transformer='hfl/chinese-electra-small-discriminator',
max_seq_length=130,
warmup_steps_ratio=0.1,
epochs=20,
learning_rate=5e-5)
tagger.load(save_dir)
print(tagger(['', '', '', '', '', '']))
tagger.evaluate('data/pos/ctb9/test.tsv', save_dir=save_dir)
print(f'Model saved in {save_dir}')
| 33.227273 | 91 | 0.679891 |
533456ef85893ecb35c41dc38df64614c652cb8f | 768 | py | Python | src/app/main.py | Wedding-APIs-System/Backend-APi | 5a03be5f36ce8ca7e3abba2d64b63c55752697f3 | [
"MIT"
] | null | null | null | src/app/main.py | Wedding-APIs-System/Backend-APi | 5a03be5f36ce8ca7e3abba2d64b63c55752697f3 | [
"MIT"
] | null | null | null | src/app/main.py | Wedding-APIs-System/Backend-APi | 5a03be5f36ce8ca7e3abba2d64b63c55752697f3 | [
"MIT"
] | null | null | null | from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from app.api import landing, login, attendance_confirmation
from sql_app.database import orm_connection
app = FastAPI(title="Sergio's wedding backend API",
description="REST API which serves login, attendance confirmation and other features",
version="1.0",)
origins = [
"*"
# "http://190.96.140.12:5500",
# "68.251.63.208"
]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
app.include_router(landing.router)
app.include_router(login.router)
app.include_router(attendance_confirmation.router)
| 20.210526 | 90 | 0.71875 |
5334ff647ed3cefd61f9291666af5ce5a96e862e | 1,800 | py | Python | tests/test_pydora/test_utils.py | NextGenTechBar/twandora | f626717a5580f82250bbe66d4ebc357e0882382c | [
"MIT"
] | null | null | null | tests/test_pydora/test_utils.py | NextGenTechBar/twandora | f626717a5580f82250bbe66d4ebc357e0882382c | [
"MIT"
] | null | null | null | tests/test_pydora/test_utils.py | NextGenTechBar/twandora | f626717a5580f82250bbe66d4ebc357e0882382c | [
"MIT"
] | null | null | null | from unittest import TestCase
from pandora.client import APIClient
from pandora.errors import InvalidAuthToken, ParameterMissing
from pandora.models.pandora import Station, AdItem, PlaylistItem
from pandora.py2compat import Mock, patch
from pydora.utils import iterate_forever
| 45 | 124 | 0.693333 |