blob_id
stringlengths
40
40
directory_id
stringlengths
40
40
path
stringlengths
3
281
content_id
stringlengths
40
40
detected_licenses
listlengths
0
57
license_type
stringclasses
2 values
repo_name
stringlengths
6
116
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
branch_name
stringclasses
313 values
visit_date
timestamp[us]
revision_date
timestamp[us]
committer_date
timestamp[us]
github_id
int64
18.2k
668M
star_events_count
int64
0
102k
fork_events_count
int64
0
38.2k
gha_license_id
stringclasses
17 values
gha_event_created_at
timestamp[us]
gha_created_at
timestamp[us]
gha_language
stringclasses
107 values
src_encoding
stringclasses
20 values
language
stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
4
6.02M
extension
stringclasses
78 values
content
stringlengths
2
6.02M
authors
listlengths
1
1
author
stringlengths
0
175
6e3180bc027c26aa13a8dc0a405036ae94a748b9
a2220e9509e3a0ed1b5435aeac0130eb09377aae
/tools/repostats/stats.py
8aa8815a039581f8e7c90a89cfc2c23b182ad198
[]
no_license
librefonts/collection
fe1f3ec9064c5b01137732fdc543dff977be4119
817817628b388b621a699c1d4623201583836438
refs/heads/master
2016-09-05T21:46:04.909016
2015-01-27T19:51:34
2015-01-27T19:51:34
13,711,616
6
0
null
null
null
null
UTF-8
Python
false
false
984
py
# stats - get stats on the given repo # From # http://www.selenic.com/repo/python-hglib/file/tip/examples/stats.py # # Copyright (c) Matt Mackall <mpm@selenic.com> # Copyright (c) Dave Crossland <dave@understandinglimited.com> # MIT license @ http://www.selenic.com/repo/python-hglib/file/tip/LICENSE import sys import hglib # figure out what repo path to use repo = '.' if len(sys.argv) > 1: repo = sys.argv[1] # connect to hg client = hglib.open(repo) # gather some stats revs = int(client.tip().rev) files = len(list(client.manifest())) heads = len(client.heads()) branches = len(client.branches()) tags = len(client.tags()) - 1 # don't count tip authors = {} for e in client.log(): authors[e.author] = True merges = 0 for e in client.log(onlymerges=True): merges += 1 print "%d revisions" % revs print "%d merges" % merges print "%d files" % files print "%d heads" % heads print "%d branches" % branches print "%d tags" % tags print "%d authors" % len(authors)
[ "m@xen.ru" ]
m@xen.ru
43762e6631bb0431b80bd2656e2d2522d44b3bed
3d228d5eac44b31d460dd81767b43309b7356577
/extra/graph/company_tree.py
f1637b2f1a04bcbe1902f64e15364798ff383c47
[ "BSD-3-Clause" ]
permissive
lsbardel/mathfun
da65a6f09faacdb4815111dae287c9b974acf928
98e7c210409c2b5777e91059c3651cef4f3045dd
refs/heads/master
2021-05-02T08:56:05.565539
2020-07-30T09:14:04
2020-07-30T09:14:04
26,242,622
0
0
null
null
null
null
UTF-8
Python
false
false
41
py
from mathfun.graph.template import Graph
[ "luca@quantmind.com" ]
luca@quantmind.com
1cd64aeac36e0dc10aa204505f3bf8ccab4a83ad
8cb27c172442021165fa386106bf733959c8be71
/src/Splunkmath/mathinsplunk.py
a6a387dd07621475fd5cb9d4c34616b2a92e2abd
[]
no_license
dbreddyAI/SplunkML
15da618bdd33368fec022f10e286ff570037a8e4
d0147caf0f666b9c87c4c17f275ba52d4ec32e8c
refs/heads/master
2020-12-07T17:28:34.691184
2014-09-06T17:47:31
2014-09-06T17:47:31
232,760,661
1
0
null
2020-01-09T08:33:11
2020-01-09T08:33:10
null
UTF-8
Python
false
false
5,392
py
''' mathinsplunk module: functions to pull math out of splunk (i.e, after one of these functions are called, no more splunkvector operations can be done) uses things like stats avg,count,etc. Meant to be numpy-like. ''' import splunklib.client as client import splunklib.results as results import numpy as np # from ..classes import SplunkArray from utils.strings import * from numpyfuncs import * def case_mapping(mapping, index_field, output_field): ''' adds a string output_field which is equal to mapping[index_field] assumes index_field contains a single element that can be indexed by mapping ''' string = 'eval %s = case(%s)' % (output_field, ','.join(['%s == %s,"%s"' % (index_field, elem, mapping[elem]) for elem in mapping])) return string def search_to_numpy_reps(splunk_search, feature_mapping, class_field, type_tuple): ''' turns a search that returns multiple events into an X, y numpy representation type_tuple must have, in both indices, 'continouous' or 'discrete' returns X, y ''' search_kwargs = {'timeout':1000, 'exec_mode':'blocking'} job = self.jobs.create(splunk_search, **search_kwargs) return events_to_numpy_reps(job, feature_mapping, class_field, type_tuple) def job_to_numpy_reps(job, feature_mapping, class_field, type_tuple, bias=False): ''' turns a job that returns multiple events into an X, y numpy representation. type_tuple must have, in both indices, 'continouous' or 'discrete' returns X, y note: if only X is required (no y), for now just pass one of the X values into class field and disregard it ''' # find correct numpy reps func to_numpy_reps_func = find_correct_to_numpy_reps_func(type_tuple) # iterate through events in the job, filling "X" and "y" X = [] y = [] offset = 0 result_count = int(job["resultCount"]) count = 50 while (offset < int(result_count)): kwargs_paginate = {'count': count, 'offset':offset} search_results = job.results(**kwargs_paginate) for result in results.ResultsReader(search_results): try: x, curr_y = to_numpy_reps_func(result, feature_mapping, class_field, bias=bias) except: print "couldn't find something in" print result X.append(x) y.append(curr_y) offset += count X = np.array(X, dtype=np.float) y = np.array(y, dtype=np.float) return X, y def find_correct_to_numpy_reps_func(type_tuple): if type_tuple == ('continuous', 'continuous'): return event_to_numpy_reps_continuous_continuous else: raise NotImplementedError def event_to_numpy_reps_continuous_continuous(event, feature_mapping, class_field, bias=False): ''' turns an event into a numpy rep X, y, where it is assumed that all teh values in both X and y are event_to_numpy_reps_continuous_continuous returns x, y ''' if bias: # the last x term is the bias term, always 1 x = np.zeros(len(feature_mapping)+1) x[len(feature_mapping)] = 1 else: x = np.zeros(len(feature_mapping)) for feature in feature_mapping: x[feature_mapping[feature]] = event[feature] y = event[class_field] return x, y #WIP# def pull_sa_out(search_string, sa, jobs): ''' pulls the given splunk array out of splunk and into a numpy array params: - search_string: search string that returns correct events in splunk - sa: splunk array to pull into numpy array - jobs: a splunk 'jobs' object to run the job returns: - numpy array corresponding to the splunk array contents after the search is return notes: - splunk array is assumed to have finished with some form of a stats command, so that 'events' no longer exist in the search ''' # initialize the splunk search splunk_search = 'search %s' % search_string # add the splunk array's string splunk_search = splunk_concat(splunk_search, sa.string) # run the search search_kwargs = {'timeout':1000, 'exec_mode':'blocking'} job = self.jobs.create(splunk_search, **search_kwargs) # read the results #WIP# def avg_over_events_by_field(sa, field): ''' finds the average over events of the elements of sa, by field 'field'. params: - sa: splunk array to find average of (average is done elementwise, across events) - field: what to average by returns: - splunk vector ready to be pulled out of splunk (elements are averages over events) notes: - WARNING: this function uses stats, so no more mathinsplunk operations can be used ''' avg_string = 'stats ' new_elems = zeros(sa.shape) for i,j in sa.iterable(): field = sa.elems[i][j] avg_string += 'avg(%s) as %s_avg, ' % (field, field) new_elems[i][j] = field + '_avg' new_sa = SplunkArray(time_hash(), sa.shape) new_sa.string = splunk_concat(sa.string, avg_string) new_sa.elems = new_elems return new_sa def avg_over_events(sa): ''' finds the average over events of the elements of sa. params: - sa: splunk array to find average of (average is done elementwise, across events) returns: - splunk vector ready to be pulled out of splunk (elements are averages over events) notes: - WARNING: this function uses stats, so no more mathinsplunk operations can be used ''' avg_string = 'stats ' new_elems = zeros(sa.shape) for i,j in sa.iterable(): field = sa.elems[i][j] avg_string += 'avg(%s) as %s_avg ' % (field, field) new_elems[i][j] = field + '_avg' new_sa = SplunkArray(time_hash(), sa.shape) new_sa.string = splunk_concat(sa.string, avg_string) new_sa.elems = new_elems return new_sa
[ "ankitk@stanford.edu" ]
ankitk@stanford.edu
3ffc5f6b56cf38ad702bf0fbbf6902e8b656686c
aec5c21025e849ca85c60130ee027dc76d04d8ba
/hola.py
1107ff1a1f1a61fef442614613429a6e02c9df6e
[]
no_license
chacar3/curso
2d7f3ac0578bd20cdfae1cbed57317ace54ecc5b
61f45cc5710152083368ae341cee9fc55723fd96
refs/heads/main
2023-08-22T22:08:53.802558
2021-10-19T21:46:43
2021-10-19T21:46:43
418,994,809
0
0
null
null
null
null
UTF-8
Python
false
false
135
py
print("Hola Mundo") def fib(n): a, b = 0, 1 while a < n: print(a, end=' ') a, b = b, a+b fib(100)
[ "noreply@github.com" ]
noreply@github.com
1a8961c8a1590e97a75c83f4bdef0ec6c3a48893
4abda661304ee18724e28a0243ccbb7c9e359871
/gasoline/models/comment.py
f476c6a625e222dc9a71e9738fc0f51f443d5e79
[]
no_license
sfirmery/gasoline
62ee0c6a4c6c36db64abecbac2f6702b141e6d80
7ff5410cb677ecf213280dc4752c9a2e61a513f9
refs/heads/master
2020-04-21T04:20:08.523056
2014-04-01T22:13:10
2014-04-01T22:13:10
null
0
0
null
null
null
null
UTF-8
Python
false
false
484
py
# -*- coding: utf-8 -*- from datetime import datetime import markdown2 from gasoline.core.extensions import db from gasoline.core.signals import event, activity from .user import User class Comment(db.EmbeddedDocument): author = db.ReferenceField(User) date = db.DateTimeField(default=datetime.utcnow) content = db.StringField() reply = db.ListField(db.EmbeddedDocumentField("Comment")) def __repr__(self): return '<Comment author=%s>' % self.author
[ "sylvain@firmery.fr" ]
sylvain@firmery.fr
057695d4910d814affa1cef49fbca93b9b520c88
df690ac0484ff04cb63f71f528a9d0a0e557d6a3
/.history/ws_20210608130810.py
59216ed4c38672800e718b0909e4e451e853a45b
[]
no_license
khanhdk0000/Mqtt-Web-Socket
437777c740c68d4197353e334f6fe6a629094afd
4f9e49a3817baa9ebc4e4f8dcffc21b6ea9d0134
refs/heads/master
2023-06-20T17:08:09.447381
2021-06-08T17:42:37
2021-06-08T17:42:37
375,090,458
0
0
null
null
null
null
UTF-8
Python
false
false
3,612
py
from flask import Flask, jsonify, request from flask_sock import Sock import time app = Flask(__name__) sock = Sock(app) import threading BROKER = 'io.adafruit.com' USER = 'khanhdk0000' PASSWORD = 'aio_FfID10QWNVSKUC2j15nLtOSeckin' TOPIC = 'khanhdk0000/feeds/' LIGHT = 'light' SOUND = 'sound' TEMP = 'temp' LCD = 'iot_led' BUZZER = 'buzzer' ######## # USER = 'CSE_BBC' # PASSWORD = 'aio_FfID10QWNVSKUC2j15nLtOSeckin' # TOPIC = 'CSE_BBC/feeds/' # USER1 = 'CSE_BBC1' # PASSWORD1 = 'aio_FfID10QWNVSKUC2j15nLtOSeckin' # TOPIC1 = 'CSE_BBC1/feeds/' # LIGHT = 'bk-iot-light' # SOUND = 'bk-iot-sound' # TEMP = 'bk-iot-temp-humid' # LCD = 'bk-iot-lcd' # BUZZER = 'bk-iot-speaker' resLight = '"id":"13","name":"LIGHT","data":"0","unit":""' prevLight = resLight resTemp = '"id":"7","name":"SOUND","data":"0","unit":""' prevTemp = resTemp resSound = '"id":"12","name":"TEMP-HUMID","data":"0","unit":""' prevSound = resSound def mqttGet(user, password,topic,device): import paho.mqtt.client as mqtt def on_connect(client, userdata, flags, rc): print("Connected with result code "+str(rc)) if rc == 0: print('good') else: print('no good') def on_disconnect(client, userdata, flags, rc=0): print("Disconnected result code " + str(rc)) def on_message(client, userdata, message): if device == LIGHT: global resLight message = str(message.payload.decode("utf-8")) print(message) resLight = message elif device == TEMP: global resTemp message = str(message.payload.decode("utf-8")) print(message) resTemp = message elif device == SOUND: global resSound message = str(message.payload.decode("utf-8")) print(message) resSound = message client = mqtt.Client() client.username_pw_set(username=user,password=password) client.on_connect = on_connect client.on_disconnect = on_disconnect client.on_message = on_message client.connect(BROKER, 1883, 60) client.subscribe(topic) client.loop_forever() t1 = threading.Thread(target=mqttGet, name=mqttGet, args=(USER, PASSWORD,TOPIC + LIGHT, LIGHT)) t1.start() t2 = threading.Thread(target=mqttGet, name=mqttGet, args=(USER, PASSWORD,TOPIC + TEMP, TEMP)) t2.start() t3 = threading.Thread(target=mqttGet, name=mqttGet, args=(USER, PASSWORD,TOPIC + SOUND, SOUND)) t3.start() def mqttPost(topic, user,pass,payload): import paho.mqtt.publish as publish publish.single(topic,hostname="io.adafruit.com",auth={"username":user, "password":pass},payload = payload) @sock.route('/light') def light(ws): global resLight, prevLight while True: if prevLight == resLight: continue else: ws.send(resLight) prevLight = resLight @sock.route('/sound') def sound(ws): global resSound, prevSound while True: if prevSound == resSound: continue else: ws.send(resSound) prevSound = resSound @sock.route('/temp') def temp(ws): global resTemp, prevTemp while True: if prevTemp == resTemp: continue else: ws.send(resTemp) prevTemp = resTemp @app.route('/postlcd', methods=["POST"]) def testpost(): input_json = request.get_json(force=True) domain = input_json['data'] print('receive data', domain) mqttPost(TOPIC+LCD, U) return 'yea:' + domain if __name__ == '__main__': app.run(debug=True)
[ "khanhtran28092000@gmail.com" ]
khanhtran28092000@gmail.com
6eb49e8db32d4611726004ff87a58fdd4e774e68
f2a625cefb799e08bb984cb131e98c3ea2920ec8
/OctoPrintHttpClient.py
5f6346ad12d471ef7c8b119dd6d325a2964d0b3f
[ "Apache-2.0" ]
permissive
AllenMcAfee/octoprint-printer-connection-plugin-for-cura
f576c78978b719ef8c5efab8ff3f9328bc68944a
5212d1153615aa608ac0a763ceb955e3e2bb25bf
refs/heads/master
2020-12-29T03:07:49.115524
2015-03-04T06:28:35
2015-03-04T06:28:35
null
0
0
null
null
null
null
UTF-8
Python
false
false
5,065
py
# Copyright 2015 Scott Hraban # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. __copyright__ = "Copyright (C) 2015 Scott Hraban - Released under terms of the Apache License, Version 2.0" import time import urllib import httplib as httpclient import json import xml.etree.ElementTree as ET import cStringIO as StringIO import traceback class OctoPrintHttpClientResponse(object): def __init__(self, statusCode, statusMessage, body): self.statusCode = statusCode self.statusMessage = statusMessage self.body = body def isOk(self): return self.statusCode >= 200 and self.statusCode < 300 def isUnauthorized(self): return self.statusCode == 401 class OctoPrintHttpClient(object): def __init__(self, scheme, host, port, rootPath, contentType): super(OctoPrintHttpClient, self).__init__() if scheme != "http" and scheme != "https": raise IllegalArgument("scheme", "scheme must be http or https") if host is None or host == "": raise IllegalArgument("host", "host must not be empty") if host is None or host == "": raise IllegalArgument("port", "port must not be empty") if contentType != "json" and contentType != "xml": raise IllegalArgument("contentType", "contentType must be json or xml") self._scheme = scheme self._host = host self._port = port self._rootPath = rootPath or "" self._contentType = contentType self._headers = {} while self._rootPath and self._rootPath[-1] == '/': self._rootPath = self._rootPath[:-1] def addHeader(self, key, value): self._headers[key] = value def request(self, method, path, postData = None, filename = None): if self._scheme == "https": http = httpclient.HTTPSConnection(self._host, self._port or 443, timeout=5) else: http = httpclient.HTTPConnection(self._host, self._port or 80, timeout=5) fullPath = "{}{}".format(self._rootPath, path) try: headers = {"User-Agent": "Cura OctoPrint Printer Connection"} for key in self._headers.keys(): headers[key] = self._headers[key] if filename is not None and postData is not None: boundary = "OctoPrintConnectBoundary" headers["Content-type"] = "multipart/form-data; boundary={}".format(boundary) prefix = StringIO.StringIO() prefix.write('--{}\r\nContent-Disposition: form-data; name="file"; filename="{}"\r\nContent-Type: application/octet-stream\r\n\r\n'.format(boundary, filename)) postData._list.insert(0, prefix) suffix = StringIO.StringIO() suffix.write("\r\n--{}--".format(boundary)) postData._list.append(suffix) postData.seekStart() http.request(method, fullPath, postData, headers = headers) elif postData is not None: if self._contentType == "json": headers["Content-type"] = "application/json" http.request(method, fullPath, json.dumps(postData), headers = headers) else: headers["Content-type"] = "application/xml" http.request(method, fullPath, ET.tostring(postData), headers = headers) else: http.request(method, fullPath, headers = headers) except: print "Request Error: ({}:{})".format(method, fullPath) traceback.print_exc() http.close() return OctoPrintHttpClientResponse(0, "error making request", None) try: response = http.getresponse() responseText = response.read() except: print "Response Error: ({}:{})".format(method, fullPath) traceback.print_exc() http.close() return OctoPrintHttpClientResponse(0, "error getting response", None) http.close() if str(response.status)[0] != '2': print "Responded with error: ({}:{}) ({}) {}".format(method, fullPath, response.status, response.reason) return OctoPrintHttpClientResponse(response.status, response.reason, None) if responseText: try: if self._contentType == "json": return OctoPrintHttpClientResponse(response.status, response.reason, json.loads(responseText)) else: return OctoPrintHttpClientResponse(response.status, response.reason, ET.fromstring(responseText)) except ValueError: print "Error: ({}) ({}:{})".format(self._contentType, method, fullPath) traceback.print_exc() return OctoPrintHttpClientResponse(0, "Error parsing response body", None) class Error(Exception): """Base class for exceptions in this module.""" pass class IllegalArgument(Error): """Exception raised for errors in the arguments. Attributes: expr -- input expression in which the error occurred msg -- explanation of the error """ def __init__(self, expr, msg): self.expr = expr self.msg = msg
[ "scotthraban@gmail.com" ]
scotthraban@gmail.com
56749342e68294136dbbbacb342a3d9b2f01f30b
18b3ad3b0e1f7f10969738251e1201d01dfbc6bf
/backup_files/samplepy/passbyvalue.py
26689727f2944f32dee1688daef3ff1dc4632725
[]
no_license
sahthi/backup2
11d509b980e731c73733b1399a8143780779e75a
16bed38f0867fd7c766c2a008c8d43b0660f0cb0
refs/heads/master
2020-03-21T12:39:56.890129
2018-07-09T08:12:46
2018-07-09T08:12:46
138,565,151
0
0
null
null
null
null
UTF-8
Python
false
false
180
py
def changeme(mylist): mylist = [1,2,3,4 ] print "values inside the function",mylist return mylist = [10,20,30] changeme(mylist) print"values outside the function ",mylist
[ "siddamsetty.sahithi@votarytech.com" ]
siddamsetty.sahithi@votarytech.com
f8fd9391978c42645ffab97a26b62801717c3913
c4cb645799539089f89b3203a7ce88ba85cde199
/src/development/vortex/development/utils/data/loader/pytorch_loader.py
f970047939254b08d5fea11996c34433d50b6e90
[]
no_license
jesslynsepthiaa/vortex
2533d245f3003c649205af51f7692d6022d6638f
1532db8447d03e75d5ec26f93111270a4ccb7a7e
refs/heads/master
2023-01-10T19:20:25.190350
2020-11-13T07:20:39
2020-11-13T07:20:39
null
0
0
null
null
null
null
UTF-8
Python
false
false
239
py
from torch.utils.data.dataloader import DataLoader supported_loaders = [('PytorchDataLoader','default')] # (Supported data loader, Supported dataset wrapper format) def create_loader(*args,**kwargs): return DataLoader(*args,**kwargs)
[ "alvinprayuda@nodeflux.io" ]
alvinprayuda@nodeflux.io
e5b76034e7c6036b7c81ca4e1246520c552d8e69
4232db80ce44e91aa249c5f640a4e8fe4b75a471
/bushy/_pivotal.py
c6bc07ab7f8cbc13131780417b23e8dd5ced1f55
[]
no_license
junkafarian/bushy
97a8d215501b2424722ba37c3e6fe8a1b3d1ed5b
3d50bee8c6d6fc64939ae4e19f1e62ab17ad6154
refs/heads/master
2021-01-23T15:57:27.352579
2011-07-08T17:07:48
2011-07-08T17:07:48
1,221,588
1
1
null
null
null
null
UTF-8
Python
false
false
11,205
py
""" Utilities for interfacing with a Pivotal Tracker project """ import sys import optparse import httplib2 from datetime import datetime from commands import getoutput from pivotal import Pivotal, anyetree from bushy.base import Base __all__ = ['Bug', 'Feature', 'Finish', ] class PivotalBase(Base): def init_parser(self): parser = optparse.OptionParser(description=__doc__) parser.add_option('-k', '--api-key', dest='api_token', help='Pivotal Tracker API key') parser.add_option('-p', '--project-id', dest='project_id', help='Pivotal Tracker project id') parser.add_option('-n', '--full-name', dest='full_name', help='Pivotal Tracker full name') parser.add_option('-b', '--integration-branch', dest='integration_branch', default='master', help='The branch to merge finished stories back down onto') parser.add_option('-m', '--only-mine', dest='only_mine', help='Only select Pivotal Tracker stories assigned to you') parser.add_option('-s', '--story', dest='target_story', help='Specify a story to work on (if applicable)') parser.add_option('-q', '--quiet', action="store_true", dest='quiet', help='Quiet, no-interaction mode') parser.add_option('-v', '--verbose', action="store_true", dest='verbose', help='Run verbosely') return parser def parse_gitconfig(self): config = {} keys = [ 'api-token', 'project-id', 'full-name', 'integration-branch', 'only-mine' ] for key in keys: val = getoutput('git config --get bushy-pivotal.%s' % key) if val: config[key.replace('-', '_')] = val.strip() if 'only_mine' in config: config['only_mine'] = bool(config['only_mine']) return config _api = None @property def api(self): if self._api is None: api = Pivotal(self.options['api_token']) self._api = api return self._api _project = None @property def project(self): if self._project is None: project = self.api.projects(self.options['project_id']) self._project = project return self._project def format_filter(qs): filters = ['%s:%s' % (k,v) for k,v in qs.items()] return ' '.join(filters) def etree_text(etree, element): if etree.find(element) is not None: return etree.find(element).text return '' def etree_int(etree, element): if etree.find(element) is not None: return int(etree.find(element).text) return 0 def etree_datetime(etree, element): if etree.find(element) is not None: return datetime.strptime(etree.find(element).text, '%Y/%m/%d %H:%M:%S UTC') return None class Story(PivotalBase): def __init__(self, etree, input=sys.stdin, output=sys.stdout, args=sys.argv): super(Story, self).__init__(input, output, args) self._update(etree) self.h = httplib2.Http() self.h.force_exception_to_status_code = True def _update(self, etree): self.id = etree_int(etree, 'id') self.project_id = etree_int(etree, 'project_id') self.story_type = etree_text(etree, 'story_type') self.url = etree_text(etree, 'url') self.estimate = etree_int(etree, 'estimate') self.current_state = etree_text(etree, 'current_state') self.description = etree_text(etree, 'description') self.name = etree_text(etree, 'name') self.requested_by = etree_text(etree, 'requested_by') self.owned_by = etree_text(etree, 'owned_by') self.created_at = etree_datetime(etree, 'created_at') self.updated_at = etree_datetime(etree, 'updated_at') def update_status(self, status): h = self.h url = 'http://www.pivotaltracker.com/services/v3/projects/%s/stories/%s' % (self.project_id, self.id) headers = {'X-TrackerToken': self.api.token, 'Content-type': 'application/xml'} body = '<story><current_state>%s</current_state></story>' % status resp, content = h.request(url, 'PUT', headers=headers, body=body) etree = anyetree.etree.fromstring(content) self._update(etree) def update_owner(self, owner): h = self.h url = 'http://www.pivotaltracker.com/services/v3/projects/%s/stories/%s' % (self.project_id, self.id) headers = {'X-TrackerToken': self.api.token, 'Content-type': 'application/xml'} body = '<story><owned_by>%s</owned_by></story>' % owner resp, content = h.request(url, 'PUT', headers=headers, body=body) etree = anyetree.etree.fromstring(content) self._update(etree) def comment(self, comment): h = self.h url = 'http://www.pivotaltracker.com/services/v3/projects/%s/stories/%s/notes' % (self.project_id, self.id) headers = {'X-TrackerToken': self.api.token, 'Content-type': 'application/xml'} body = '<note><text>%s</text></note>' % comment resp, content = h.request(url, 'POST', headers=headers, body=body) return content def start(self): self.update_status('started') self.update_owner(self.options['full_name']) self.comment('Story started by %s' % self.options['full_name']) class Pick(PivotalBase): @property def type(self): raise NotImplementedError('Must define in subclass') @property def plural_type(self): raise NotImplementedError('Must define in subclass') @property def branch_suffix(self): raise NotImplementedError('Must define in subclass') _story = None def get_story(self, story_id=None): if not self._story: qs = {'state': 'unstarted', 'type': self.type, } if story_id is not None: qs['id'] = story_id elif self.options.get('only_mine'): qs['owned_by'] = self.options['full_name'] stories = self.project.stories(filter=format_filter(qs)).get_etree() story = stories.find('story') if story: # pragma: no cover self._story = Story(story) return self._story def __call__(self, raw_input=raw_input): super(Pick, self).__call__() if self.options['target_story']: # the -s / --story flag was specified target_story = self.options['target_story'] self.put('Retrieving story %s from Pivotal Tracker' % target_story) story = self.get_story(story_id=target_story) if story is None: self.put('Story %s is unavailable!' % target_story) return else: # there was no story number provided so just pick the first one msg = 'Retrieving latest %s from Pivotal Tracker' % self.plural_type if self.options['only_mine']: msg += " for " + self.options['full_name'] self.put(msg) story = self.get_story() if story is None: self.put('No %s available!' % self.plural_type) return self.put('Story: %s' % story.name) self.put('URL: %s' % story.url) self.put('Updating %s status in Pivotal Tracker...' % self.type) story.start() if story.owned_by == self.options['full_name']: suffix = default = self.branch_suffix if not self.options['quiet']: suffix = raw_input('Enter branch name (will be prepended by %s) [%s]: ' % (story.id, default)) if suffix == '': suffix = default branch = '%s-%s' % (story.id, suffix) branches = self.sys('git branch') if branch not in branches: self.put('Creating new branch: ', False) self.put(branch) self.sys('git checkout -b %s' % branch) else: self.put('Switching to branch %s' % branch) self.sys('git checkout %s' % branch) else: self.put('Unable to update ', False) self.put(story.id) ## Command line API ## class Feature(Pick): type = 'feature' plural_type = 'features' branch_suffix = 'feature' class Bug(Pick): type = 'bug' plural_type = 'bugs' branch_suffix = 'bug' class Finish(PivotalBase): @property def current_branch(self): branches = self.sys('git branch') branches = branches.split('\n') for b in branches: if b.startswith('* '): return b.strip('* ') return '' @property def story_id(self): current_branch = self.current_branch if '-' not in current_branch: return '' story_id,_ = current_branch.split('-', 1) return story_id _story = None @property def story(self): if self._story is None: qs = {} qs['owned_by'] = self.options['full_name'] stories = self.project.stories(filter=format_filter(qs)).get_etree() for story in stories.getchildren(): if hasattr(story.find('id'), 'text') and story.find('id').text == self.story_id: # pragma: no cover self._story = Story(story) break return self._story def __call__(self): super(Finish, self).__call__() if self.story_id == '': self.put('The current branch name (%s) does not follow the ' 'correct format, please checkout the correct ' 'branch then re-run this command' % self.current_branch) return story = self.story self.put('Marking Story %s as finished...' % story.id) story.update_status('finished') if story.current_state == 'finished': integration_branch = self.options['integration_branch'] current_branch = self.current_branch self.put('Merging %s into %s' % (current_branch, integration_branch)) out = self.sys('git checkout %s' % integration_branch) if 'error: ' in out: # TODO: error handling for each command (or before running commands) self.put('There was an error checking out master:\n%s' % out) return self.sys('git merge --no-ff %s' % current_branch) self.put('Removing %s branch' % current_branch) self.sys('git branch -d %s' % current_branch) story.comment('Development work for this story has been merged into the trunk') self.put('Merged code into trunk. Please push upstream and notify the release manager if necessary') else: self.put('Unable to mark Story %s as finished' % story.id)
[ "fergus.doyle@largeblue.com" ]
fergus.doyle@largeblue.com
1f83f2fd3f72b375690b8c90d78f0eae7f16cfe2
8507fe7cecb7db369fee27451109fa0c6d885d4c
/Classes/carrs.py
4ef83e9bb84233e9e01a9a1593c71244090f7086
[]
no_license
savanmin/pythonfreq
e7e781b2e4fb2f5d434bf9274ee02df1b35deec7
e3eeb0b15b24ce59a49a534e5760cf8dff7761b0
refs/heads/master
2021-09-04T03:48:51.318040
2018-01-15T13:58:09
2018-01-15T13:58:09
114,525,629
0
0
null
null
null
null
UTF-8
Python
false
false
729
py
class Car: def __init__(self, year, price): self.year = year self.price = price def about(self): print("Year of the car is %d and price is %d " % (self.year, self.price), end=" ") class Honda(Car): def __init__(self, year, price, model): Car.__init__(self, year, price) self.model = model def about(self): Car.about(self) print("Model is %s " % self.model) class Civic(Honda): class Toyota(Car): def __init__(self, year, price, model): Car.__init__(self, year, price) self.model = model def about(self): Car.about(self) print("Model is %s " % self.model) class Innova(Toyota): while True: print()
[ "savanminsariya18@gmail.com" ]
savanminsariya18@gmail.com
b16eae335b0a0379a9d997a92554ec34481cb9c4
27076e3ab905ff9494c159fb53d5ad5e8ecd29c8
/笔记代码/python/test/test3.py
a10aee82a95f7c3878c5b3ea1126180052a40392
[]
no_license
windkidtrh/learn_text
598fee4a29382098d578a1dd616ca5da0f269f42
fda65095be8c9c5d81beb8513d479b6f6846701d
refs/heads/master
2020-03-10T17:04:25.005353
2018-12-12T12:10:26
2018-12-12T12:10:26
129,490,658
0
0
null
null
null
null
UTF-8
Python
false
false
740
py
#coding:utf -8 import os def alter(file,old_str,new_str): """ 将替换的字符串写到一个新的文件中,然后将原文件删除,新文件改为原来文件的名字 :param file: 文件路径 :param old_str: 需要替换的字符串 :param new_str: 替换的字符串 :return: None """ with open(file, "r+") as f1,open("%s.doc" % file, "w") as f2: for x in f1: line = x.decode('utf-16').encode('utf-8') if old_str in line: line = line.replace(old_str, new_str) print line f2.write(line) f2.close() os.remove(file) os.rename("%s.doc" % file, file) File_path='1.doc' alter(File_path, "。”," ,"。”")
[ "1442983474@qq.com" ]
1442983474@qq.com
bee05cd03c18b9bfb9bd86c0782b1244d48c34aa
df0ac2efe2037c03f47e8f5b0352d2072e49c315
/paytrek/client.py
eaa42551a8974b9716be3d473ad27832a43c4a04
[ "MIT" ]
permissive
erkanay/paytrek-python-client
59f2b155597ed6f4b1313bbdc10946f62d24abe3
ab02f0c1843647e590667acf161dba7f51c82622
refs/heads/master
2021-05-14T11:13:09.846087
2018-01-05T10:57:30
2018-01-05T10:57:30
116,364,748
0
0
null
null
null
null
UTF-8
Python
false
false
6,232
py
import requests from requests.packages.urllib3.exceptions import InsecureRequestWarning requests.packages.urllib3.disable_warnings(InsecureRequestWarning) class Paytrek(object): BASE_URLS = { 'sandbox': 'https://sandbox.paytrek.com', 'worldwide_live': 'https://secure.paytrek.com', 'turkey_live': 'https://secure.paytrek.com.tr' } def __init__(self, username, password, environment='sandbox'): self.basic_auth = (username, password) self.base_url = self.BASE_URLS.get(environment) self.endpoints = { 'sale': ''.join([self.base_url, '/api/v1/sale/']), 'charge': ''.join([self.base_url, '/api/v1/charge/']), 'capture': ''.join([self.base_url, '/api/v1/capture/']), 'refund': ''.join([self.base_url, '/api/v1/refund/']), 'fraudcheck': ''.join([self.base_url, '/api/v1/fraudcheck/']), 'charge_with_token': ''.join([self.base_url, '/api/v1/charge_with_token/']), 'tokenization': ''.join([self.base_url, '/payment/tokenization/']), 'options': ''.join([self.base_url, '/payment/options/']), 'vault': ''.join([self.base_url, '/payment/vault/']), } self.headers = {'Content-type': 'application/json'} def _request(self, url, query={}): """ Returns json response according to defined endpoint :param url: :param query: :return: """ response = requests.post(url=url, auth=self.basic_auth, json=query, headers=self.headers, verify=False) if not response.ok: raise Exception(response.text) return response.json() def sale(self, payload=None, sale_token=None): """ Returns json response within sale result to create sale resource payload is required to get created sale resource sale_token is required :param payload: https://sandbox.paytrek.com/docs/integration/saleresource.html#list-of-parameters :param sale_token: :return: """ if payload: response = requests.post(url=self.endpoints.get('sale'), json=payload, headers=self.headers, auth=self.basic_auth, verify=False) elif sale_token: url = ''.join([self.endpoints.get('sale'), sale_token]) response = requests.get(url=url, headers=self.headers, auth=self.basic_auth, verify=False) if not response.ok: raise Exception(response.text) return response.json() def charge(self, payload): """ Returns json response within charge result :param payload: https://sandbox.paytrek.com/docs/integration/chargeresource.html#list-of-parameters :return: """ return self._request(self.endpoints.get('charge'), query=payload) def charge_with_token(self, sale_token, payment_token, dcc_currency=None, secure_charge=False): """ Returns json response within succeeded or failed result :param sale_token: :param payment_token: :param secure_charge: redirects to 3D secure :param dcc_currency: the DCC currency symbol that you have obtained :return: """ payload = { 'sale': '/api/v1/sale/{}/'.format(sale_token), 'payment_token': payment_token, 'dcc_currency': dcc_currency, 'secure_charge': secure_charge } return self._request(self.endpoints.get('charge_with_token'), query=payload) def fraudcheck(self, sale_token, payment_token): """ Returns json response within sale risk result :param sale_token: :param payment_token: :return: """ payload = { 'sale': '/api/v1/sale/{}/'.format(sale_token), 'payment_token': payment_token, } return self._request(self.endpoints.get('fraudcheck'), query=payload) def capture(self, sale_token, comments=None): """ Returns json response within succeeded or fail result :param comments: comments for accepting the fraud review decision :param sale_token: :return: """ params = {'comments': comments} url = ''.join([self.endpoints.get('capture'), sale_token, '/']) return self._request(url, query=params) def refund(self, sale_token, amount=None, comments=None): """ Returns json response within refund or fail result :param sale_token: :param amount: amount to refund :param comments: comments for reject decision :return: """ params = { 'amount': amount, 'comments': comments } url = ''.join([self.endpoints.get('refund'), sale_token, '/']) return self._request(url, query=params) def tokenization(self, payload): """ Returns json response representing payment token and strict card information such as bin number, bin country, card currency and card issuer. :param payload: https://sandbox.paytrek.com/docs/integration/tokenization.html#list-of-parameters :return: """ return self._request(self.endpoints.get('tokenization'), query=payload) def vault(self, payload): """ Returns json response representing payment token and strict card information such as bin number, bin country, card currency and card issuer. :param payload: https://sandbox.paytrek.com/docs/integration/vault.html#list-of-parameters :return: """ return self._request(self.endpoints.get('vault'), query=payload) def options(self, payload): """ Returns json response representing payment token and strict card information such as bin number, bin country, card currency and card issuer. :param payload: https://sandbox.paytrek.com/docs/integration/vault.html#list-of-parameters :return: """ return self._request(self.endpoints.get('options'), query=payload)
[ "ytu-erkan-aydin@ytu-erkan-ay.local" ]
ytu-erkan-aydin@ytu-erkan-ay.local
3da8f7b040ba3e4364324d6671fd5826cd2494b7
1dacbf90eeb384455ab84a8cf63d16e2c9680a90
/pkgs/odo-0.4.2-py27_0/lib/python2.7/site-packages/odo/backends/sparksql.py
80e27ad318d1981f6194e3e20d84b782da1089e7
[ "Apache-2.0", "BSD-3-Clause", "LicenseRef-scancode-unknown" ]
permissive
wangyum/Anaconda
ac7229b21815dd92b0bd1c8b7ec4e85c013b8994
2c9002f16bb5c265e0d14f4a2314c86eeaa35cb6
refs/heads/master
2022-10-21T15:14:23.464126
2022-10-05T12:10:31
2022-10-05T12:10:31
76,526,728
11
10
Apache-2.0
2022-10-05T12:10:32
2016-12-15T05:26:12
Python
UTF-8
Python
false
false
9,639
py
from __future__ import division, print_function, absolute_import import os import glob import itertools import tempfile import shutil from functools import partial from collections import Iterator from datetime import datetime, date import pandas as pd import toolz from toolz.curried import get, map, memoize from toolz import pipe, concat, curry from pyspark import RDD, SQLContext, HiveContext from pyspark.sql import SchemaRDD from pyspark.rdd import PipelinedRDD import datashape from datashape import dshape, Record, DataShape, Option, Tuple from datashape.predicates import isdimension, isrecord, iscollection from .. import append, discover, convert from ..core import ooc_types from ..directory import Directory from ..temp import Temp from ..chunks import chunks from .json import JSONLines, JSON from .csv import CSV from pyspark.sql import DataFrame as SparkDataFrame from pyspark.sql.types import ( ByteType, ShortType, IntegerType, LongType, FloatType, DoubleType, StringType, BinaryType, BooleanType, TimestampType, DateType, ArrayType, StructType, StructField ) base = int, float, datetime, date, bool, str _names = ('tmp%d' % i for i in itertools.count()) @append.register(SQLContext, object) def iterable_to_sql_context(ctx, seq, **kwargs): return append(ctx, append(ctx._sc, seq, **kwargs), **kwargs) def register_table(ctx, srdd, name=None): if name is None: name = next(_names) ctx.registerDataFrameAsTable(srdd, name) @append.register(SQLContext, (JSONLines, Directory(JSONLines))) def jsonlines_to_sparksql(ctx, json, dshape=None, name=None, schema=None, samplingRatio=0.25, **kwargs): # if we're passing in schema, assume that we know what we're doing and # bypass any automated dshape inference if dshape is not None and schema is None: schema = dshape_to_schema(dshape.measure if isrecord(dshape.measure) else dshape) srdd = ctx.jsonFile(json.path, schema=schema, samplingRatio=samplingRatio) register_table(ctx, srdd, name=name) return srdd @convert.register(list, (SparkDataFrame, SchemaRDD), cost=200.0) def sparksql_dataframe_to_list(df, dshape=None, **kwargs): result = df.collect() if (dshape is not None and iscollection(dshape) and not isrecord(dshape.measure)): return list(map(get(0), result)) return result @convert.register(base, (SparkDataFrame, SchemaRDD), cost=200.0) def spark_df_to_base(df, **kwargs): return df.collect()[0][0] @append.register(SQLContext, RDD) def rdd_to_sqlcontext(ctx, rdd, name=None, dshape=None, **kwargs): """ Convert a normal PySpark RDD to a SparkSQL RDD or Spark DataFrame Schema inferred by ds_to_sparksql. Can also specify it explicitly with schema keyword argument. """ # TODO: assumes that we don't have e.g., 10 * 10 * {x: int, y: int} if isdimension(dshape.parameters[0]): dshape = dshape.measure sql_schema = dshape_to_schema(dshape) sdf = ctx.applySchema(rdd, sql_schema) if name is None: name = next(_names) register_table(ctx, sdf, name=name) ctx.cacheTable(name) return sdf def scala_set_to_set(ctx, x): from py4j.java_gateway import java_import # import scala java_import(ctx._jvm, 'scala') # grab Scala's set converter and convert to a Python set return set(ctx._jvm.scala.collection.JavaConversions.setAsJavaSet(x)) @discover.register(SQLContext) def discover_sqlcontext(ctx): table_names = sorted(map(str, ctx.tableNames())) dshapes = zip(table_names, map(discover, map(ctx.table, table_names))) return datashape.DataShape(datashape.Record(dshapes)) @discover.register((SparkDataFrame, SchemaRDD)) def discover_spark_data_frame(df): schema = df.schema() if callable(df.schema) else df.schema return datashape.var * schema_to_dshape(schema) def chunk_file(filename, chunksize): """Stream `filename` in chunks of size `chunksize`. Parameters ---------- filename : str File to chunk chunksize : int Number of bytes to hold in memory at a single time """ with open(filename, mode='rb') as f: for chunk in iter(partial(f.read, chunksize), b''): yield chunk @append.register(JSONLines, (SparkDataFrame, SchemaRDD)) def spark_df_to_jsonlines(js, df, pattern='part-*', chunksize=1 << 23, # 8MB **kwargs): tmpd = tempfile.mkdtemp() try: try: df.save(tmpd, source='org.apache.spark.sql.json', mode='overwrite') except AttributeError: shutil.rmtree(tmpd) df.toJSON().saveAsTextFile(tmpd) except: raise else: files = glob.glob(os.path.join(tmpd, pattern)) with open(js.path, mode='ab') as f: pipe(files, map(curry(chunk_file, chunksize=chunksize)), concat, map(f.write), toolz.count) finally: shutil.rmtree(tmpd) return js @convert.register((SparkDataFrame, SchemaRDD), (RDD, PipelinedRDD)) def rdd_to_spark_df_or_srdd(rdd, **kwargs): return append(HiveContext(rdd.context), rdd, **kwargs) try: from .hdfs import HDFS except ImportError: pass else: @append.register(HDFS(JSONLines), (Iterator, object, SparkDataFrame, SchemaRDD)) @append.register(HDFS(JSON), (list, object)) @append.register(HDFS(CSV), (chunks(pd.DataFrame), pd.DataFrame, object)) def append_spark_to_hdfs(target, source, **kwargs): tmp = convert(Temp(target.subtype), source, **kwargs) return append(target, tmp, **kwargs) def dshape_to_schema(ds): """Convert datashape to SparkSQL type system. Examples -------- >>> print(dshape_to_schema('int32')) # doctest: +SKIP IntegerType >>> print(dshape_to_schema('5 * int32') # doctest: +SKIP ArrayType(IntegerType,false) >>> print(dshape_to_schema('5 * ?int32')) # doctest: +SKIP ArrayType(IntegerType,true) >>> print(dshape_to_schema('{name: string, amount: int32}')) # doctest: +SKIP StructType(List(StructField(name,StringType,false),StructField(amount,IntegerType,false) # doctest: +SKIP)) >>> print(dshape_to_schema('10 * {name: string, amount: ?int32}')) # doctest: +SKIP ArrayType(StructType(List(StructField(name,StringType,false),StructField(amount,IntegerType,true))),false) """ if isinstance(ds, str): return dshape_to_schema(dshape(ds)) if isinstance(ds, Tuple): raise TypeError('Please provide a Record dshape for these column ' 'types: %s' % (ds.dshapes,)) if isinstance(ds, Record): return StructType([ StructField(name, dshape_to_schema(deoption(typ)), isinstance(typ, datashape.Option)) for name, typ in ds.fields]) if isinstance(ds, DataShape): if isdimension(ds[0]): elem = ds.subshape[0] if isinstance(elem, DataShape) and len(elem) == 1: elem = elem[0] return ArrayType(dshape_to_schema(deoption(elem)), isinstance(elem, Option)) else: return dshape_to_schema(ds[0]) if ds in dshape_to_sparksql: return dshape_to_sparksql[ds] raise NotImplementedError() def schema_to_dshape(schema): if type(schema) in sparksql_to_dshape: return sparksql_to_dshape[type(schema)] if isinstance(schema, ArrayType): dshape = schema_to_dshape(schema.elementType) return datashape.var * (Option(dshape) if schema.containsNull else dshape) if isinstance(schema, StructType): fields = [(field.name, Option(schema_to_dshape(field.dataType)) if field.nullable else schema_to_dshape(field.dataType)) for field in schema.fields] return datashape.dshape(Record(fields)) raise NotImplementedError('SparkSQL type not known %r' % type(schema).__name__) def deoption(ds): """ >>> deoption('int32') ctype("int32") >>> deoption('?int32') ctype("int32") """ if isinstance(ds, str): ds = dshape(ds) if isinstance(ds, DataShape) and not isdimension(ds[0]): return deoption(ds[0]) if isinstance(ds, Option): return ds.ty else: return ds # see http://spark.apache.org/docs/latest/sql-programming-guide.html#spark-sql-datatype-reference sparksql_to_dshape = { ByteType: datashape.int8, ShortType: datashape.int16, IntegerType: datashape.int32, LongType: datashape.int64, FloatType: datashape.float32, DoubleType: datashape.float64, StringType: datashape.string, BinaryType: datashape.bytes_, BooleanType: datashape.bool_, TimestampType: datashape.datetime_, DateType: datashape.date_, # sql.ArrayType: ?, # sql.MapTYpe: ?, # sql.StructType: ? } dshape_to_sparksql = { datashape.int16: ShortType(), datashape.int32: IntegerType(), datashape.int64: LongType(), datashape.float32: FloatType(), datashape.float64: DoubleType(), datashape.real: DoubleType(), datashape.time_: TimestampType(), datashape.date_: DateType(), datashape.datetime_: TimestampType(), datashape.bool_: BooleanType(), datashape.string: StringType() } ooc_types |= set([SparkDataFrame, SchemaRDD]) SQLContext = memoize(SQLContext) HiveContext = memoize(HiveContext)
[ "wgyumg@mgail.com" ]
wgyumg@mgail.com
81ff78d1540f5df07d51f5caa5b6559fe46bde09
7e0da49bbe69e3924828bb269e69307e0f41913d
/accounting_scrap.py
02d421242146999d9fbd097d4176f071fe2b903d
[]
no_license
mjaleman/social_network_py
2c06ffcc3f6a7c78e6146d659242ad2571ca9caa
691aacef77b2468f844c6258e6100f28fd94b774
refs/heads/main
2022-12-27T02:09:27.136538
2020-10-09T05:03:21
2020-10-09T05:03:21
302,537,287
0
0
null
null
null
null
UTF-8
Python
false
false
180
py
#filename = sys[1] open_file = open('/Users/marlonaleman/Desktop/Work/GitHub/bookkeeping/business/inbox/todo/fireman_checking/149575-S12.CSV', 'r') print(open_file.readlines())
[ "mjaleman1997@gmail.com" ]
mjaleman1997@gmail.com
6298f8909b10c1ef31600f83005f206f19655241
5478476360fa51930ae1c3c8b42959152ffe038b
/factor calculation scripts/23.rvol.py
336678d12011a2c0d66a5bc1fea80efbb94d79d3
[ "MIT" ]
permissive
cagdemir/equity-index-predictors
ebffa54dcd9151a916b48fb9cdb3c3beffc82476
2546e72328de848222cb6a1c744ababab2058477
refs/heads/master
2020-12-20T23:06:25.296145
2020-03-01T12:48:16
2020-03-01T12:48:16
236,235,003
2
2
null
null
null
null
UTF-8
Python
false
false
6,182
py
# -*- coding: utf-8 -*- """ Created on Mon Dec 2 17:03:06 2019 @author: Administrator """ import pdblp import pandas as pd import numpy as np import matplotlib.pyplot as plt #import seaborn as sns plt.style.use('seaborn') #con = pdblp.BCon(debug=True, port=8194, timeout=5000) con = pdblp.BCon(debug=False, port=8194, timeout=5000) con.start() index_tickers = ['NYA Index', 'SPX Index', 'CCMP Index' ,'CDAX Index' , 'ASX Index', 'TPX Index', 'SHCOMP Index' , 'SZCOMP Index', 'XUTUM Index', 'MEXBOL Index', 'IBOV Index', 'IMOEX Index' , 'JALSH Index'] from datetime import date start = '2004-1-1' today = date.today().strftime('%Y%m%d') firstday = '19991230' window_long = 52 window_short = 13 ohlc_tickers = ['OPEN','HIGH','LOW', 'LAST'] prices_open = con.bdh(index_tickers, 'PX OPEN',firstday , today) prices_open.columns = [i[0] for i in prices_open.columns] prices_open_int = prices_open.interpolate(method='linear')[index_tickers] prices_open_w = prices_open_int.groupby(pd.Grouper(freq='W')).first() prices_high = con.bdh(index_tickers, 'PX HIGH',firstday , today) prices_high.columns = [i[0] for i in prices_high.columns] prices_high_int = prices_high.interpolate(method='linear')[index_tickers] prices_high_w = prices_high_int.groupby(pd.Grouper(freq='W')).max() prices_low = con.bdh(index_tickers, 'PX LOW',firstday , today) prices_low.columns = [i[0] for i in prices_low.columns] prices_low_int = prices_low.interpolate(method='linear')[index_tickers] prices_low_w = prices_low_int.groupby(pd.Grouper(freq='W')).min() prices_close = con.bdh(index_tickers, 'PX LAST',firstday , today) prices_close.columns = [i[0] for i in prices_close.columns] prices_close_int = prices_close.interpolate(method='linear')[index_tickers] prices_close_w = prices_close_int.groupby(pd.Grouper(freq='W')).last() returns_open = prices_open_w / prices_close_w.shift(1) - 1 returns_high = prices_high_w / prices_close_w.shift(1) - 1 returns_low = prices_low_w / prices_close_w.shift(1) - 1 returns_close = prices_close_w / prices_close_w.shift(1) - 1 returns_fromClose_ohlc = pd.concat([returns_open, returns_high, returns_low, returns_close],axis=1) returns_fromClose_ohlc.columns = [('_').join(i) for i in zip(returns_fromClose_ohlc.columns,np.repeat(ohlc_tickers,len(index_tickers)))] std_fromClose_ohlc_long = returns_fromClose_ohlc.rolling(window_long).std()[returns_fromClose_ohlc.index>=start] std_fromClose_ohlc_long.columns = ['23-2_'+i for i in std_fromClose_ohlc_long.columns] #std_fromClose_ohlc_long.columns = ['23_1_US_NY','23_1_US_SPX','23_1_US_CCMP','23_1_DE','23_1_UK','23_1_JP','23_1_CH_SH','23_1_SZ','23_1_TR', # '23_1_MX','23_1_BR','23_1_RU','23_1_ZA'] mean_fromClose_ohlc_long = returns_fromClose_ohlc.rolling(window_long).mean()[returns_fromClose_ohlc.index>=start] mean_fromClose_ohlc_long.columns = ['23-1_'+i for i in mean_fromClose_ohlc_long.columns] std_fromClose_ohlc_short= returns_fromClose_ohlc.rolling(window_short).std()[returns_fromClose_ohlc.index>=start] std_fromClose_ohlc_short.columns = ['23-4_'+i for i in std_fromClose_ohlc_short.columns] mean_fromClose_ohlc_short= returns_fromClose_ohlc.rolling(window_short).mean()[returns_fromClose_ohlc.index>=start] mean_fromClose_ohlc_short.columns = ['23-3_'+i for i in mean_fromClose_ohlc_short.columns] #std_fromClose_ohlc_short.columns = ['23_2_US_NY','23_2_US_SPX','23_2_US_CCMP','23_2_DE','23_2_UK','23_2_JP','23_2_CH_SH','23_2_SZ','23_2_TR', # '23_2_MX','23_2_BR','23_2_RU','23_2_ZA'] ############################################################################## prices_ohlc = pd.concat([prices_open_w, prices_high_w, prices_low_w, prices_close_w],axis=1) prices_ohlc.columns = [('_').join(i) for i in zip(prices_ohlc.columns,np.repeat(ohlc_tickers,len(index_tickers)))] std_delta_ohlc_long = prices_ohlc.pct_change().rolling(window_long).std() mean_delta_ohlc_long = prices_ohlc.pct_change().rolling(window_long).mean() std_ohlc_long = std_delta_ohlc_long[std_delta_ohlc_long.index>=start] std_ohlc_long.columns = ['23-6_'+i for i in std_ohlc_long.columns] mean_ohlc_long = mean_delta_ohlc_long[mean_delta_ohlc_long.index>=start] mean_ohlc_long.columns = ['23-5_'+i for i in mean_ohlc_long.columns] #std_ohlc_long.columns = ['23_3_US_NY','23_3_US_SPX','23_3_US_CCMP','23_3_DE','23_3_UK','23_3_JP','23_3_CH_SH','23_3_SZ','23_3_TR', # '23_3_MX','23_3_BR','23_3_RU','23_3_ZA'] std_delta_ohlc_short = prices_ohlc.pct_change().rolling(window_short).std() mean_delta_ohlc_short = prices_ohlc.pct_change().rolling(window_short).mean() std_ohlc_short = std_delta_ohlc_short[std_delta_ohlc_short.index>=start] std_ohlc_short.columns = ['23-8_'+i for i in std_ohlc_short.columns] mean_ohlc_short = mean_delta_ohlc_short[mean_delta_ohlc_short.index>=start] mean_ohlc_short.columns = ['23-7_'+i for i in mean_ohlc_short.columns] #std_ohlc_short.columns = ['23_4_US_NY','23_4_US_SPX','23_4_US_CCMP','23_4_DE','23_4_UK','23_4_JP','23_4_CH_SH','23_4_SZ','23_4_TR', # '23_4_MX','23_4_BR','23_4_RU','23_4_ZA'] mean_fromClose_ohlc_long.to_excel('C:/Users/sb0538/Desktop/15022020/excels/23-1_rvolmeanfromCloseohlclong.xlsx') std_fromClose_ohlc_long.to_excel('C:/Users/sb0538/Desktop/15022020/excels/23-2_rvolstdfromcloseohlclong.xlsx') mean_fromClose_ohlc_short.to_excel('C:/Users/sb0538/Desktop/15022020/excels/23-3_rvolmeanfromcloseohlcshort.xlsx') std_fromClose_ohlc_short.to_excel('C:/Users/sb0538/Desktop/15022020/excels/23-4_rvolstdfromcloseohlcshort.xlsx') mean_ohlc_long.to_excel('C:/Users/sb0538/Desktop/15022020/excels/23-5_rvolmeanohlcdeltalong.xlsx') std_ohlc_long.to_excel('C:/Users/sb0538/Desktop/15022020/excels/23-6_rvolstdohlcdeltalong.xlsx') mean_ohlc_short.to_excel('C:/Users/sb0538/Desktop/15022020/excels/23-7_rvolmeanohlcdeltashort.xlsx') std_ohlc_short.to_excel('C:/Users/sb0538/Desktop/15022020/excels/23-8_rvolstdohlcdeltashort.xlsx')
[ "54540791+cagdemir@users.noreply.github.com" ]
54540791+cagdemir@users.noreply.github.com
18bc999bf22341e382cdddb4ddac6fa6549a0a16
e265e25633c583c5b0b1381238dd0e880d469f22
/blog/models.py
015260753321b75aa80dc91a3fa1eb53ede439dc
[ "MIT" ]
permissive
samuira/TutionMastor
ba59b8b963a761b55d8fedfc2b4d881beef812d3
5b6d89efc90a9ebb54766530554d7dc9d5ee8298
refs/heads/master
2023-01-12T12:26:35.935698
2020-01-04T13:17:18
2020-01-04T13:17:18
205,794,187
1
1
MIT
2023-01-07T09:18:05
2019-09-02T06:50:04
CSS
UTF-8
Python
false
false
633
py
from django.db import models from custom_admin.models import User class BlogPost(models.Model): published_on = models.DateTimeField(auto_now_add=False, blank=True, null=True) created_on = models.DateTimeField(auto_now_add=True, blank=True, null=True) created_by = models.ForeignKey(User, on_delete=models.CASCADE, default=None) title_image = models.ImageField(upload_to='blog/title_images/%Y/%m/%d', blank=True, null=True) title = models.CharField(max_length=300) description = models.TextField() slug = models.SlugField(max_length=300, unique=True) is_verified = models.BooleanField(default=False)
[ "rajesh.samui@digitalaptech.com" ]
rajesh.samui@digitalaptech.com
db9c4a3e84a6f07991c42aaf777fce3637f14486
4f1923a2b4e42e806a2b29709b9aad2464f8831f
/dataloaders/dataset_mnist.py
077cf549bc1a1b27bbfb6b3bf638ecf7a69ddc80
[ "MIT" ]
permissive
ParkLabML/Dirichlet_Pruning
5b6f92d71d671da3b3a4a49c64e9541ef1bdd96e
0cc0406b7034e865dfb63188452fce3d67a0b881
refs/heads/master
2023-04-12T21:45:49.637771
2023-03-30T20:16:54
2023-03-30T20:16:54
340,360,892
2
0
null
null
null
null
UTF-8
Python
false
false
1,426
py
from torchvision import datasets, transforms import torch def load_mnist(BATCH_SIZE, trainval_perc=1): trainval_dataset = datasets.MNIST('data/MNIST', train=True, download=True, # transform=transforms.Compose([transforms.ToTensor(), # transforms.Normalize((0.1307,), (0.3081,))]), transform=transforms.ToTensor()) train_size = int(trainval_perc * len(trainval_dataset)) val_size = len(trainval_dataset) - train_size torch.manual_seed(0) train_dataset, val_dataset = torch.utils.data.random_split(trainval_dataset, [train_size, val_size]) test_dataset = datasets.MNIST('data/MNIST', train=False, transform=transforms.ToTensor()) print("Loading MNIST") # Load datasets train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True) if len(val_dataset) >0: val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=BATCH_SIZE, shuffle=True) else: val_loader = None test_loader = torch.utils.data.DataLoader( # datasets.MNIST('data', train=False, transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])), test_dataset, batch_size=BATCH_SIZE, shuffle=False) return train_loader, test_loader, val_loader
[ "kamil.m.adamczewski@gmail.com" ]
kamil.m.adamczewski@gmail.com
c7462e99e7ed1e7b6c8e80a420ef23d76c534ce7
684007b4f55c6e967bbd36ec6982342950c4e8b9
/user_story/Homework05_Zhonghua_Bao.py
d9967616ee1ae12666d881c005c94f6a621f1f1a
[]
no_license
cyberKnight17/SSW-555-GEDCOM
93066de08bd48de0f562e1d8b9996ff50e33c44d
07a5aa0a79d5fa3df0e2f4f1d1d1eef93c1a9788
refs/heads/master
2021-10-28T08:10:17.499895
2019-04-23T00:15:43
2019-04-23T00:15:43
null
0
0
null
null
null
null
UTF-8
Python
false
false
3,014
py
from datetime import datetime import unittest def periodCompare(dt1, dt2, dt3, dt4): """Compare two time period dt1 to dt2, dt3 to dt4 whether they have any intersection, include the condition where some date is not given """ result = None # convert string date into the date object if dt1 is not None: dt1 = datetime.strptime(dt1, '%d%b%Y') if dt2 is not None: dt2 = datetime.strptime(dt2, '%d%b%Y') if dt3 is not None: dt3 = datetime.strptime(dt3, '%d%b%Y') if dt4 is not None: dt4 = datetime.strptime(dt4, '%d%b%Y') # two points comparison if dt2 is not None and dt3 is not None: if dt2 <= dt3: result = True # another case of two points comparison if dt1 is not None and dt4 is not None: if dt4 <= dt1: result = True # compare three points, when one time period is given if dt1 is not None and dt2 is not None: if dt3 is not None: if dt3 > dt1 and dt3 < dt2: return False if dt4 is not None: if dt4 > dt1 and dt4 < dt2: return False # compare three points, when another time period is given if dt3 is not None and dt4 is not None: if dt1 is not None: if dt1 > dt3 and dt1 < dt4: return False if dt2 is not None: if dt2 > dt3 and dt2 < dt4: return False return result class Homework05Test(unittest.TestCase): def test_periodCompare(self): self.assertTrue(periodCompare('1JAN2019', '3JAN2019', '1FEB2019', '3FEB2019')) self.assertTrue(periodCompare('1FEB2019', '3FEB2019', '1JAN2019', '3JAN2019')) self.assertTrue(periodCompare('18MAY1984', '11NOV2011', '4MAY2013', None)) self.assertTrue(periodCompare(None, '11NOV2011', '4MAY2013', None)) self.assertTrue(periodCompare('10OCT1976', None, None, '8AUG1966')) self.assertTrue(periodCompare('18MAY1984', '11NOV2011', '11NOV2011', None)) self.assertTrue(periodCompare('28FEB2019', '28FEB2019', '28FEB2019', '28FEB2019')) self.assertFalse(periodCompare('16FEB1952', '30APR1999', '3JUN1969', '3JAN2019')) self.assertFalse(periodCompare('3JUN1969', '3JAN2019', '16FEB1952', '30APR1999')) self.assertFalse(periodCompare(None, '15SEP2007', '1AUG2007', None)) self.assertFalse(periodCompare('8MAY1945', '11NOV1990', '25APR1968', None)) self.assertFalse(periodCompare(None, '4APR2013', '12DEC2009', '20JAN2017')) self.assertIsNone(periodCompare(None, None, None, None)) self.assertIsNone(periodCompare('18MAY1984', '11NOV2011', None, '10OCT2016')) self.assertIsNone(periodCompare('18MAY1984', None, '4MAY2013', '10OCT2016')) self.assertIsNone(periodCompare('18MAY2014', '11NOV2017', '4MAY2013', None)) self.assertIsNone(periodCompare(None, None, '4MAY2013', '10OCT2016')) if __name__ == '__main__': unittest.main(verbosity=2)
[ "dingtongwang@outlook.com" ]
dingtongwang@outlook.com
1eaab63b4751bf14ca331840769f850341508d9f
92a8591afa785eba467e0f31d825b3f588d4d7f6
/rendezvous/env/Scripts/django-admin.py
1c2717319af41d31bb3a2d44faa3ab959f68a5c9
[ "MIT" ]
permissive
gazh1987/Rendezvous
1c9c44d5146961d2cfae918c9377f2b041a0e711
4e58a18b6599f9255252f86cb2c36338d39e9780
refs/heads/master
2020-12-26T01:01:48.461903
2016-09-12T15:54:28
2016-09-12T15:54:28
47,191,831
0
0
null
null
null
null
UTF-8
Python
false
false
181
py
#!C:\Users\G\Documents\GitHub\Rendezvous\rendezvous\env\Scripts\python.exe from django.core import management if __name__ == "__main__": management.execute_from_command_line()
[ "gary.hely2@student.dit.ie" ]
gary.hely2@student.dit.ie
6452090ca100845c839848f14ac2d04f85352f4d
934235f70a390a3ba0d7b464cddd10872f31cda3
/rango/server/.history/tango_with_django/rango/admin_20210103130028.py
361f6ca167ae05dc1771706293718383039c718e
[]
no_license
deji100/Projects
6919041ba23e77a5c74e5ab7692bfcee38ececcb
17e64d954d1d7805be57ec5d8d4344e4944889e6
refs/heads/master
2023-04-30T05:25:03.143303
2021-05-20T15:00:43
2021-05-20T15:00:43
338,844,691
0
0
null
null
null
null
UTF-8
Python
false
false
521
py
from django.contrib import admin from .models import Category, Page, User # Register your models here. class PageInline(admin.StackedInline): list_display = ('title', 'category', 'url') # fields = ('title', 'url', 'category') model class CategoryAdmin(admin.ModelAdmin): list_display = ('name', 'views', 'likes') # prepopulated_fields = {'slug': ('name',)} inlines = [PageInline] admin.site.register(Category, CategoryAdmin) admin.site.register(Page, PageAdmin) admin.site.register(User)
[ "68882568+deji100@users.noreply.github.com" ]
68882568+deji100@users.noreply.github.com
8926982881ac5435534f5f4f05f69881544cfdf3
6412ca05fa4efc7dac8de48a2b48783bacadd31b
/RUNTriggerStudies/test/crab3_TrigVal.py
f872abe510b33fda522da1167aef687e2247741b
[]
no_license
davidsheffield/RUNA
9f218261d022ec6ef4e22d673e3e519edb7b8cdb
f2ac360844b67777c689a226b92bbe7eb1cb0f6c
refs/heads/master
2021-01-18T10:56:44.233289
2015-11-04T09:24:38
2015-11-04T09:24:38
32,084,290
0
0
null
2015-03-12T15:22:26
2015-03-12T15:22:26
null
UTF-8
Python
false
false
1,960
py
################################################################## ######## TO RUN THIS: python crab3_QCD.py ######## DO NOT DO: crab submit crab3_QCD.py ################################################################## from CRABClient.UserUtilities import config from CRABAPI.RawCommand import crabCommand from httplib import HTTPException from multiprocessing import Process config = config() name = 'RUNTriggerValidation' version = 'v1' config.General.requestName = '' config.General.workArea = 'crab_projects' config.JobType.pluginName = 'Analysis' config.JobType.psetName = 'RUNTriggerValidation_cfg.py' config.JobType.allowUndistributedCMSSW = True config.Data.inputDataset = '' config.Data.splitting = 'LumiBased' config.Data.unitsPerJob = 2 config.Data.ignoreLocality = True config.Site.storageSite = 'T3_US_FNALLPC' def submit(config): try: crabCommand('submit', config = config) except HTTPException, hte: print 'Cannot execute commend' print hte.headers if __name__ == '__main__': Samples = [ #'/JetHT/Run2015B-PromptReco-v1/MINIAOD', '/JetHT/Run2015C-PromptReco-v1/MINIAOD', ##'/MET/Run2015B-PromptReco-v1/MINIAOD', ##'/SingleMu/Run2015B-PromptReco-v1/MINIAOD', ] for dataset in Samples: config.Data.inputDataset = dataset config.General.requestName = dataset.split('/')[1]+"_"+dataset.split('/')[2]+'_'+name+'_'+version if 'Run2015' in dataset: config.JobType.pyCfgParams = [ 'RUN='+dataset.split('/')[1]+"_"+dataset.split('/')[2], 'local=0' ] if 'Run2015B' in dataset: config.Data.lumiMask = '/afs/cern.ch/cms/CAF/CMSCOMM/COMM_DQM/certification/Collisions15/13TeV/Cert_246908-255031_13TeV_PromptReco_Collisions15_50ns_JSON.txt' elif 'Run2015C' in dataset: config.Data.lumiMask = '/afs/cern.ch/cms/CAF/CMSCOMM/COMM_DQM/certification/Collisions15/13TeV/Cert_246908-255031_13TeV_PromptReco_Collisions15_25ns_JSON_v2.txt' p = Process(target=submit, args=(config,)) p.start() p.join()
[ "gomez@physics.rutgers.edu" ]
gomez@physics.rutgers.edu
0f455d7b176d86b7bb8f1da032242070f9f64c87
c57286a76d184f6b67dfba7faa111ad89e18963c
/backend/website/migrations/0001_initial.py
372cc7e2f389bd23cb665100690acd025ea78b35
[]
no_license
aabele/kartona-strops.lv
3f1b7cde387ad23a40716de7380beac3f4802dba
b56e4b28ed4b9866329922afa4b117a6411ae73f
refs/heads/master
2022-05-05T18:42:42.731670
2019-12-09T19:13:49
2019-12-09T19:13:49
125,759,279
0
0
null
2022-04-22T20:51:48
2018-03-18T19:19:18
Python
UTF-8
Python
false
false
764
py
# Generated by Django 2.0.3 on 2018-03-23 18:13 from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Feature', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(max_length=300)), ('description', models.CharField(max_length=300)), ('slug', models.SlugField(blank=True, null=True)), ('picture', models.ImageField(upload_to='features')), ], options={ 'abstract': False, }, ), ]
[ "aabele@gmail.com" ]
aabele@gmail.com
3d40546c359e6df7d0b2d8d22227284dda05fc65
a826d39ef808eb79b25723fe20caf029d2f4d6b8
/ex1.py
24c1c77191482c05d8d4b7389b460eafa347ef42
[]
no_license
paulvstheworld/functional_programming_exercises
e6f5bc12a17afbbb52249d87478840b8a8a550fa
70d97e90deb38ce6ab60989f6d6ac53351e2256c
refs/heads/master
2020-05-17T03:24:03.350354
2013-11-15T20:11:45
2013-11-15T20:11:45
14,433,478
1
0
null
null
null
null
UTF-8
Python
false
false
485
py
# Unfunctional version: names = ['Mary', 'Isla', 'Sam'] names = map(lambda x: hash(x), names) print names ''' for i in range(len(names)): names[i] = hash(names[i]) print names # => [6306819796133686941, 8135353348168144921, -1228887169324443034] ''' # Rewrite the code above as a map # Expected answer: [6306819796133686941, 8135353348168144921, -1228887169324443034] def my_map(func, l): return [func(item) for item in l] print my_map(lambda x: hash(x), names)
[ "paulvstheworld@gmail.com" ]
paulvstheworld@gmail.com
c879960cd4dc775c8db1ba284199a7d0b0825f72
d2d33fe343707afcfdc18c06547e16f7eb166983
/pretopologyx/__init__.py
058ea4059b8b017a99cdb835b4f28a6c0333a8a1
[]
no_license
jalabord/pretopologyx
4db2c8732cc51322f4001b444a33246ea585e8da
03cf8d3d2086773be2e1c1086e71d6cb97a4e528
refs/heads/master
2020-05-14T17:40:17.621699
2019-04-17T13:27:38
2019-04-17T13:27:38
181,896,219
2
0
null
null
null
null
UTF-8
Python
false
false
22
py
name = "pretopologyx"
[ "jlabordebar@gmail.com" ]
jlabordebar@gmail.com
14c625f012bd3934c0d225cf3b8a13d4edd5e01b
03aaee1451f921f37a3e59801f169086465a3788
/core/urls/v1.py
3f4f8a8fdd75c6390aba3d73aa5cf4bcdd5b79c2
[]
no_license
ChristianCastilloPY/backend_newsletter
197e1e93dad893edfa9a190fb97a08fcb5ba1c8a
43c3055b612a243746930a6d2d9537275870045d
refs/heads/master
2020-11-25T04:58:27.899484
2019-12-17T01:58:25
2019-12-17T01:58:25
228,511,366
0
0
null
null
null
null
UTF-8
Python
false
false
389
py
from django.urls import include, path from rest_framework import routers from core.views import UserViewSet from newsletters.views import NewsletterViewSets, TagsViewSets router = routers.DefaultRouter() router.register(r'users', UserViewSet) router.register(r'newsletters', NewsletterViewSets) router.register(r'tag', TagsViewSets) urlpatterns = [ path('', include(router.urls),) ]
[ "cdcb91@hotmail.com" ]
cdcb91@hotmail.com
6c28b314b4dcc23be0144989a21fad8e4a426699
792bf6c694f519ac4fd7a3ac343d4c3e17e55d9d
/a6_merge_sort.py
e2b4dbc0405a76801bd7d5c2ecee338906687597
[]
no_license
atm1992/algorithm_interview
fb96b65f5ed2d0f3d6b6c5b430e9d33476d38aa2
ac1c2b041595e9689d775c69af373ec743299e3b
refs/heads/master
2022-12-13T06:56:02.056105
2020-08-18T15:58:11
2020-08-18T15:58:11
275,198,470
0
1
null
2020-08-31T13:10:09
2020-06-26T16:18:40
Python
UTF-8
Python
false
false
1,313
py
# -*- coding: UTF-8 -*- """ 归并排序。 核心思想也是分治法,先递归分解数组,然后再合并数组 """ def merge_sort(alist): if not alist or len(alist) < 2: return alist n = len(alist) mid = int(n / 2) # left_li是对原列表左半部分进行归并排序后,返回的有序新列表 left_li = merge_sort(alist[:mid]) # right_li是对原列表右半部分进行归并排序后,返回的有序新列表 right_li = merge_sort(alist[mid:]) # 最后将这两个有序的子序列合并为一个新列表,并返回 return merge(left_li, right_li) def merge(left_li, right_li): res = [] left, right = 0, 0 while left < len(left_li) and right < len(right_li): # 稳定性排序 if left_li[left] <= right_li[right]: res.append(left_li[left]) left += 1 else: res.append(right_li[right]) right += 1 res.extend(left_li[left:]) res.extend(right_li[right:]) return res if __name__ == '__main__': alist = [2, 1, 45, 1, 21, 4, 2, 6, 9] print("归并排序前:", alist) # 这里与之前的排序不一样,这里是返回一个排好序的新列表,原列表没有改变 res = merge_sort(alist) print("归并排序后:", res)
[ "atm1992@126.com" ]
atm1992@126.com
4e9a441e005deba519bdcadeb7ea62dcdca17ba3
6c491c91b9d99a7b0a8d39bbc1a7c785cf9bba1d
/erehwon/profiles/forms.py
319004e783acd54776ba941df175729fd4e43072
[]
no_license
lucyrose93/Erehwon
7daa4e7a5c1bc5b395de54df9b6cdd86496b4f3f
4afc65305b8e4ff1e069fa444d3012ffeaa54e03
refs/heads/master
2020-05-29T08:50:53.152262
2016-10-06T18:36:10
2016-10-06T18:36:10
70,182,174
0
0
null
2016-10-06T18:56:46
2016-10-06T18:37:43
JavaScript
UTF-8
Python
false
false
380
py
from django import forms from profiles.models import Project, ErehwonUser from registration.forms import RegistrationForm class ProjectForm(forms.ModelForm): class Meta: model = Project fields = ('title','synopsis') class ErehwonUserSignUpForm(RegistrationForm): model = ErehwonUser fields = ('username', 'email','password1', 'password1')
[ "marizadima@yahoo.com" ]
marizadima@yahoo.com
ae6b3ab92e623b9b4eb6f6b440f136d3376dda32
c50a814aace937205c925b44b57cf3078d2416af
/face_detector.py
07c32bfbb992542f88636c7e9f70c252ace9bb18
[]
no_license
JamiKazmi/face_detection
f02bc018e7dbad405f5825093e06ee937c18b52a
434c7dfa6937aa2439922e0520d7fc3b26043026
refs/heads/master
2020-12-18T14:45:03.397862
2020-01-21T19:38:14
2020-01-21T19:38:14
235,424,437
0
0
null
null
null
null
UTF-8
Python
false
false
1,881
py
# import the necessary packages import cv2 as cv import numpy as np # load our serialized model from disk print('[INFO] loading model...') net = cv.dnn.readNetFromCaffe( 'files/deploy.prototxt.txt', 'files/res10_300x300_ssd_iter_140000.caffemodel' ) # load the input image and construct an input blob for the image # by resizing to a fixed 300x300 pixels and then normalizing it image = cv.imread('images/6.jpg') (h, w) = image.shape[:2] blob = cv.dnn.blobFromImage( cv.resize(image, (300, 300)), 1.0, (300, 300), (104.0, 177.0, 123.0)) # pass the blob through the network and obtain the detections and # predictions print("[INFO] computing object detections...") net.setInput(blob) detections = net.forward() # loop over the detections for i in range(0, detections.shape[2]): # extract the confidence (i.e., probability) associated with the # prediction confidence = detections[0, 0, i, 2] # filter out weak detections by ensuring the `confidence` is # greater than the minimum confidence if confidence > 0.5: # compute the (x, y)-coordinates of the bounding box for the # object box = detections[0, 0, i, 3:7] * np.array([w, h, w, h]) (startX, startY, endX, endY) = box.astype('int') # draw the bounding box of the face along with the associated # probability text = '{:.2f}%'.format(confidence * 100) y = startY - 10 if startY - 10 > 10 else startY + 10 cv.rectangle( image, (startX, startY), (endX, endY), (0, 0, 255), 2 ) cv.putText( image, text, (startX, y), cv.FONT_HERSHEY_SIMPLEX, 0.50, (0, 0, 255), 2 ) # show the output image cv.imshow("Output", image) cv.waitKey(0) cv.destroyAllWindows()
[ "noreply@github.com" ]
noreply@github.com
6ecbe9483dab0157b3744392d025a5dedadd6fb3
945f32b99fd1fb2a37dcdca3d0c8d2a434fc898d
/chap01/print_stars2.py
0063d53a065799752c49bd8d784801294865a83e
[]
no_license
MadJangE00/Algoritom_py
aa80084d21b9d6d7bcdfbc648e484d3170cbe262
890197acd5f727a6122f44e70b029e59d21a05ab
refs/heads/master
2023-08-24T19:53:28.993443
2021-09-28T08:00:49
2021-09-28T08:00:49
401,956,136
0
0
null
null
null
null
UTF-8
Python
false
false
277
py
# *를 n개 출력하되 w개마다 줄바꿈하기 2 print('*를 출력합니다.') n = int(input('몇 개를 출력할까요?: ')) w = int(input('몇 개마다 줄바꿈할까요?: ')) for _ in range(n // w): print('*' * w) rest = n % w if rest: print('*' * rest)
[ "jiniixz@naver.com" ]
jiniixz@naver.com
b8059810cfb4b4b8f554743338573e95cf4247f5
e31c411b82bee8d1ce1d45887e979fd95a4bb814
/venv/test.py
42e66d2150d310e879a63798cbc51bc05f9b6422
[]
no_license
ZTianle/reselm
9c1994e4b7907bdd4a304a4f64df00a4f956b59f
94e2732bfd3a62218ba247f41a495f0686f4239a
refs/heads/master
2020-05-03T23:18:44.398788
2019-04-02T14:13:07
2019-04-02T14:13:07
178,862,728
3
0
null
null
null
null
UTF-8
Python
false
false
783
py
import pandas as pd import numpy as np import matplotlib import matplotlib.pyplot as plt from elm_confidence import HiddenLayer import math #准备数据 y=sin(x) x = np.arange(0, 2*np.pi, 2*np.pi/100) y = np.sin(x) x = x.reshape(100, 1) y = y.reshape(100, 1) y_real = y + np.random.randn(100, 1)/5 plt.plot(x, y) plt.scatter(x, y_real) plt.show() x_train = x[0:80, -1] x_test = x[80:, -1] x_train = x_train.reshape(-1, 1) x_test = x_test.reshape(-1, 1) y_train = x[0:80, -1] y_test = x[80:, -1] y_train = y_train.reshape(-1, 1) y_test = y_test.reshape(-1, 1) print(x_train.shape) print(y_train.shape) print(x_test.shape) print(y_test.shape) my_ELM = HiddenLayer(x_train, 5, 0.0000001) my_ELM.regressor_train(y_train) y_test_ELM, y_test_ELM_c = my_ELM.regressor_test(x_test)
[ "tianlecsu@126.com" ]
tianlecsu@126.com
09f47ffa874febc1dd80bb23531d909ac281739b
694c187c8a00bee8c670c1690170099bad9b16b3
/hindex.py
edded2784cbd958ce569e1997c2a49c5589810d0
[]
no_license
ajayvenkat10/Competitive
301f220b6d296f7e34328f192c43c4d7ef208cb1
14f2ecebe10eb19f72cc412dd0c414b3b1de9b4d
refs/heads/master
2022-11-20T14:31:33.590099
2020-07-23T15:39:14
2020-07-23T15:39:14
281,599,951
0
0
null
null
null
null
UTF-8
Python
false
false
450
py
t = int(input()) for _ in range(t): n = int(input()) arr = list(map(int, input().split())) final = [1] val = 2 for i in range(1,len(arr)): count = 0 for j in range(i+1): if(arr[j] >= val): count += 1 if(count>=val): final.append(val) val += 1 else: final.append(val-1) print("Case #%d: " % (_+1) , end="") print(*final)
[ "37923623+ajayvenkat10@users.noreply.github.com" ]
37923623+ajayvenkat10@users.noreply.github.com
44b98e1c1ec0e2b236ab9eaf250583dc65a3c145
8a69bc0f671d19213d22d42e193ad9aaf157a060
/server/apps/products/urls.py
40b3826a41d05320c5757b3616f5a3353b52bc7b
[]
no_license
seanli9018/ecommerce_site
1d11741caad7f7f183b7df18d6920f9302790f73
bbe20afea850eaf44f2eec3a2b5fa5a6c4a0d82b
refs/heads/master
2023-03-07T22:56:32.514787
2021-02-19T05:15:16
2021-02-19T05:15:16
340,264,502
0
0
null
null
null
null
UTF-8
Python
false
false
225
py
from django.urls import path from .views import ProductCategoryView app_name = 'products' urlpatterns = [ path('product-categories/<int:parent_category_id>/', ProductCategoryView.as_view(), name='product-categories'), ]
[ "seanli9018@gmail.com" ]
seanli9018@gmail.com
bb53c9e191f0e6ad1a21914ca66f769ef49f3c88
ac8c31b5971161adf0b7e66d85effd0ec8f5b7dd
/Day 033/kanye/main.py
a023e6eb5e6d84c625ee4856c063de221ad7422a
[]
no_license
satuhyva/100daysOfPython
5f7e7e1959358bcb370f73abe0e5b9627acf01a2
bdd93c290434aa05bcd52cd96d602bd72acc4a41
refs/heads/master
2023-08-17T01:10:35.218447
2021-09-20T05:59:15
2021-09-20T05:59:15
394,317,321
0
0
null
null
null
null
UTF-8
Python
false
false
754
py
from tkinter import * import requests KANYE_URL = "https://api.kanye.rest" def get_quote(): response = requests.get(url=KANYE_URL) quote = response.json()["quote"] canvas.itemconfig(quote_text, text=quote) window = Tk() window.title("Kanye Says...") window.config(padx=50, pady=50) canvas = Canvas(width=300, height=414) background_img = PhotoImage(file="background.png") canvas.create_image(150, 207, image=background_img) quote_text = canvas.create_text(150, 207, text="", width=250, font=("Arial", 30, "bold"), fill="white") canvas.grid(row=0, column=0) kanye_img = PhotoImage(file="kanye.png") kanye_button = Button(image=kanye_img, highlightthickness=0, command=get_quote) kanye_button.grid(row=1, column=0) window.mainloop()
[ "hyvarinen.satu.i@gmail.com" ]
hyvarinen.satu.i@gmail.com
27c40c41352325b166a5a71d7ac5b3512c460b63
58df77a3bf48213326872a878a15a737c56fad0d
/lists/migrations/0004_auto_20191120_2237.py
3b4a2a57e34f84ec53972489585bbef61566fb8f
[]
no_license
samnoh/airbnb-clone
7d84e3a09f7ca40cf2673bf8abe7d6820f18a4df
f5d9fdc096b4c40c27b391381686d4067f263894
refs/heads/master
2023-01-24T05:50:16.922036
2019-11-25T09:18:22
2019-11-25T09:18:22
211,789,729
0
0
null
2023-01-05T00:08:24
2019-09-30T06:22:55
Python
UTF-8
Python
false
false
530
py
# Generated by Django 2.2.5 on 2019-11-20 09:37 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('lists', '0003_auto_20191120_2216'), ] operations = [ migrations.AlterField( model_name='list', name='user', field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='list', to=settings.AUTH_USER_MODEL), ), ]
[ "samnoh21@outlook.com" ]
samnoh21@outlook.com
c3e9ab55ca6831d12241535c587607e835858b78
e46a08579915bf098395a0629c237b8de8c21d7a
/migrations/versions/5992dfad25e9_.py
0aef95005ee1a17648dbbb627d8037987a69a774
[]
no_license
arashbzz/asqari
3080b4ee8bf0038ba7298e36ad8fcc125ff5e596
c7b2a3bf10519c3490eaeaf013781449d59e1c71
refs/heads/master
2023-07-10T10:26:28.255035
2021-08-17T12:46:19
2021-08-17T12:46:19
390,408,168
0
0
null
null
null
null
UTF-8
Python
false
false
1,196
py
"""empty message Revision ID: 5992dfad25e9 Revises: 3a373e361543 Create Date: 2021-07-28 15:59:16.782493 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '5992dfad25e9' down_revision = '3a373e361543' branch_labels = None depends_on = None def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.create_table('photo', sa.Column('id', sa.Integer(), nullable=False), sa.Column('description', sa.String(length=128), nullable=False), sa.Column('filename', sa.String(length=128), nullable=False), sa.PrimaryKeyConstraint('id') ) op.create_table('temp', sa.Column('id', sa.Integer(), nullable=False), sa.Column('name', sa.String(length=32), nullable=False), sa.Column('slug', sa.String(length=32), nullable=False), sa.Column('maxtemp', sa.Integer(), nullable=False), sa.Column('mintemp', sa.Integer(), nullable=False), sa.PrimaryKeyConstraint('id') ) # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### op.drop_table('temp') op.drop_table('photo') # ### end Alembic commands ###
[ "Bazaz.Arash@zistab.net" ]
Bazaz.Arash@zistab.net
0ea09ec878674f42ce2fb633727af303b0ff9662
830398bc5ae951b153ff695a40be7239742bc73e
/exercises/parse_dhcp_snooping.py
27114f9e94d30bcea5c6296a1383f9c2e461987f
[]
no_license
dmikos/pyneng
ff67f1d617a97d73103a7785a7bf86140e7baa82
543fb0d9fc63a2afee45d2465af3a4c3966e4a86
refs/heads/master
2021-01-25T14:56:44.181140
2018-04-23T04:31:00
2018-04-23T04:31:00
123,739,447
0
1
null
null
null
null
UTF-8
Python
false
false
669
py
# -*- coding: utf-8 -*- import re #'00:09:BB:3D:D6:58 10.1.10.2 86250 dhcp-snooping 10 FastEthernet0/1' regex = re.compile('(?P<mac>\S+) +(?P<ip>\S+) +\d+ +\S+ +(?P<vlan>\d+) +(?P<port>\S+)') result = [] with open('dhcp_snooping.txt') as data: for line in data: match = regex.search(line) if match: result.append(match.groupdict()) print('К коммутатору подключено {} устройства'.format(len(result))) for num, comp in enumerate(result, 1): print('Параметры устройства {}:'.format(num)) for key in comp: print('{:10}: {:10}'.format(key,comp[key]))
[ "dkostinov@gmail.com" ]
dkostinov@gmail.com
626c1f1516f5ea6e5a6e6d929b05d0d694b71eac
82f2d0c99b8f8d0b3a404b82ce69acf44d88aace
/application/factory.py
b7d317d188af5f39e1ac43d05cf13f205b972970
[]
no_license
digital-land/register-editing-tool
aedf4af00f64000194d1b1d9cf82c2d87a31c38e
a0b2b1ca2fca9f6840993e98891026063dbb672b
refs/heads/master
2022-12-23T01:12:44.303887
2019-07-01T07:56:14
2019-07-01T07:56:14
192,899,362
0
1
null
2022-12-08T05:16:47
2019-06-20T10:23:39
CSS
UTF-8
Python
false
false
568
py
from flask import Flask def create_app(config_filename): app = Flask(__name__) app.config.from_object(config_filename) register_blueprints(app) register_extensions(app) # register_errorhandlers(app) # register_extensions(app) # register_commands(app) # register_filters(app) # register_context_processors(app) return app def register_blueprints(app): from application.frontend.views import frontend app.register_blueprint(frontend) def register_extensions(app): from application.extensions import db db.init_app(app)
[ "splogy2001@hotmail.com" ]
splogy2001@hotmail.com
53d4ead111e0079f93a23edb345237fecbed5bb6
ffe599794f7b90997b9721fca9a4ca860ea6e55e
/jionlp/algorithm/ner/ner_data_converter.py
3a62a90f9bed458f6a143984c7ab8c3b4ed5bfcf
[ "Apache-2.0" ]
permissive
XrosLiang/JioNLP
828434fa41440509b0eba84278a432d00f74fc74
63ec32d6c409849d38f275a513b1c02493d85f86
refs/heads/master
2022-11-10T21:37:43.271403
2020-06-24T04:04:26
2020-06-24T04:04:26
null
0
0
null
null
null
null
UTF-8
Python
false
false
2,477
py
# -*- coding=utf-8 -*- # # NER 数据集有两种存储格式 # 默认采用的标注标准为 BIOES import json from typing import Dict, Any, Tuple, Optional, List from jionlp import logging __all__ = ['entity2tag', 'tag2entity'] def entity2tag(token_list: List[str], entities: List[Dict[str, Any]], formater='BIOES'): ''' 将实体 entity 格式转为 tag 格式,若标注过程中有重叠标注,则会自动将靠后的 实体忽略、删除。 Args: ner_entities(List[str, Dict[str, Any]]): 文本以及相应的实体。 formater(str): 选择的标注标准 return: List[List[str], List[str]]: tag 格式的数据 Examples: >>> ner_entities = [ '胡静静在水利局工作。', {'text': '胡静静', 'offset': [0, 3], 'type': 'Person'}, {'text': '水利局', 'offset': [4, 7], 'type': 'Orgnization'}]] >>> print(entity2tag(ner_entities)) [['胡', '静', '静', '在', '水', '利', '局', '工', '作', '。'], ['B-Person', 'I-Person', 'E-Person', 'O', 'B-Orgnization', 'I-Orgnization', 'E-Orgnization', 'O', 'O', 'O']] ''' tags = ['O' for i in range(len(token_list))] flag = 0 # 判断重叠标注 for idx, entity in enumerate(entities): if entity['offsets'][1] < flag: # 说明重叠标注,要删除 if 1 < idx + 1 < len(entities): logging.warning( 'The entity {} is overlapped with {}.'.format( json.dumps(entity, ensure_ascii=False), json.dumps(entities[idx - 1], ensure_ascii=False))) else: if entity['offsets'][1] - entity['offsets'][0] == 1: tags[entity['offsets'][0]] = 'S-' + entity['type'] else: tags[entity['offsets'][0]] = 'B-' + entity['type'] if entity['offsets'][1] - entity['offsets'][0] > 2: for j in range(entity['offsets'][0] + 1, entity['offsets'][1] - 1): tags[j] = 'I-' + entity['type'] tags[entity['offsets'][1] - 1] = 'E-' + entity['type'] flag = entity['offsets'][1] return tags def tag2entity(): ''' 将 tag 格式转为实体 entity 格式 '''
[ "dongrixinyu.89@163.com" ]
dongrixinyu.89@163.com
d0e7fe53fe08e74620f08cc5d9b706d7ef3555b0
055a4195d5761784742014a1b365eab2708aa9c0
/v3.py
e5b530774c9d980c71e6139c5bc38f6f68e0a596
[]
no_license
gandharv42/versions
90bf1a1aab95d3f9bdb739edfb8ca40755923404
beb5fb581b387af4bded48269c5c4767fdb8baa2
refs/heads/master
2020-03-23T12:17:59.917246
2018-07-19T11:19:38
2018-07-19T11:19:38
141,550,244
0
0
null
null
null
null
UTF-8
Python
false
false
20
py
print ("version 3")
[ "noreply@github.com" ]
noreply@github.com
9c85786dbab95966ceed294a1c09e09cff8ab036
dda2a2318238440ec424fd566a01e7b6434a55e1
/hello.py
cbdb0de971176a3f22b9f9760dd419f632cc14a2
[]
no_license
sun-ck/learn-pythongit
0004012c955d5c66365ce2ba3710d33e80d3566f
3b92705481625aef030b8e6ec2601622e70a0166
refs/heads/master
2020-04-06T19:33:52.369131
2018-11-18T15:01:13
2018-11-18T15:01:13
157,741,270
0
0
null
null
null
null
UTF-8
Python
false
false
346
py
#! python3 #这个程序用来向用户问好并登记用户的名字 print('尊敬的用户,您好!') print('请输入您的姓名或昵称:') #询问用户姓名或昵称 user_name = input() print('欢迎您加入本社区'+user_name) print('请输入您的年龄:') user_age = input() print('您快要'+str(int(user_age)+1)+'呢了')
[ "sun123scc@163.com" ]
sun123scc@163.com
737e1ba8f65b27a875696795e43eca53b3814c61
566d1c7e6fcf1ec0bf9a196feb3537ed1fec2408
/index/urls.py
c382e05be35d2701260074d334efcc5f42df2118
[]
no_license
fduecnkbc/damir-django
99398d2de81a741bd78b42a5a7b6dfbbdfc6afc5
13ec3592a486e17640952e4ac207bc3aaa9913a7
refs/heads/master
2021-01-17T21:01:36.931939
2017-03-21T04:57:59
2017-03-21T04:57:59
84,156,805
0
0
null
null
null
null
UTF-8
Python
false
false
337
py
from django.conf.urls import include, url from django.contrib import admin urlpatterns = [ # Examples: # url(r'^$', 'index.views.home', name='home'), # url(r'^blog/', include('blog.urls')), url(r'^admin/', include(admin.site.urls)), url(r'', include('news.urls')), url(r'^about/', include('about.urls')), ]
[ "fduecnkbc@gmail.com" ]
fduecnkbc@gmail.com
0939786d769d6a46962a9bc61dc23f0e581d5056
b0a049c1170ea9cab31af77f4007085bfd27b2bc
/ros_basics/src/actions_quiz/src/my_script.py
cc36cf4540da212ec33200f5e922567c374b7c51
[]
no_license
nikh1508/ros_course_ws
edddfcbe0b7f1fc28cc019d1304ec2b65139481e
a1169ae7cc5ea6d15d35d19a670207f38148a1c9
refs/heads/master
2022-11-16T04:04:18.145933
2020-07-12T18:24:52
2020-07-12T18:24:52
275,440,694
0
1
null
null
null
null
UTF-8
Python
false
false
1,543
py
#! /usr/bin/env python import rospy import actionlib from actions_quiz.msg import CustomActionMsgAction, CustomActionMsgFeedback, CustomActionMsgResult from std_msgs.msg import Empty class CustomActionClass(object): _feedback = CustomActionMsgFeedback() _result = CustomActionMsgResult() _empty_msg = Empty() def __init__(self): self._as = actionlib.SimpleActionServer('action_custom_msg_as', CustomActionMsgAction, self.goal_callback, False) self._pub_takeoff = rospy.Publisher('/drone/takeoff', Empty, queue_size=1) self._pub_land = rospy.Publisher('/drone/land', Empty, queue_size=1) self._as.start() def goal_callback(self, goal): action = goal.goal success = True r = rospy.Rate(1) if action == 'TAKEOFF': self._feedback.feedback = 'TAKING OFF' for i in range(3): self._pub_takeoff.publish(self._empty_msg) self._as.publish_feedback(self._feedback) r.sleep() elif action == 'LAND': self._feedback.feedback = 'LANDING' for i in range(3): self._pub_land.publish(self._empty_msg) self._as.publish_feedback(self._feedback) r.sleep() else: success = False if success: self._as.set_succeeded() if __name__ == '__main__': rospy.init_node('action_custom_node') CustomActionClass() rospy.loginfo('Action Server started...') rospy.spin()
[ "nikh150897@gmail.com" ]
nikh150897@gmail.com
dd59dfa1017f3a432517f488d3749ce29cdab955
be116b3c2d9f1ecf8e1abf7e9916505a6c2b9359
/secondDay/TwoColorsBolls.py
18dcf20315bce1388053222ddc70b5702ec666a2
[]
no_license
littleyellowbicycle/MyPython100Days
eee13a2ccbcc2aa76c4e327beed3bae3550273bb
106d96feb020a9e40dc47a2d9d59f67eb9a6af11
refs/heads/master
2020-06-02T03:28:30.883872
2019-07-16T15:32:28
2019-07-16T15:32:28
191,020,746
1
0
null
null
null
null
UTF-8
Python
false
false
720
py
from random import randrange, randint, sample def display(balls): ''' for index, ball in enumerate(balls): if index == len(balls) - 1: print('|', end='\n') print('%2d' % ball, end=' ') ''' for index in range(len(balls)): if index == len(balls) - 1: print("|") print('%2d' % balls[index], end=' ') pass def random_select(): red = [x for x in range(1, 33)] balls = [] balls = sample(red, 6) blue = [x for x in range(1, 17)] balls.append(blue[randint(0, 15)]) return balls pass if __name__ == "__main__": n = int(input("请输入投注数: ")) for i in range(n): display(random_select()) pass
[ "33934765+littleyellowbicycle@users.noreply.github.com" ]
33934765+littleyellowbicycle@users.noreply.github.com
5bbdcda6e08ba418163a20d725764c3c934c7ae2
ba9d6e33133709eb8ef9c643e50646596f8ab98b
/utils/tools.py
5d777f1c3521baca1b75c38a1b54257f5527b79d
[]
no_license
otniel/computer-vision
2eb5588d7662ada0999001083e5562e3c3e69fd1
82430fd60c21d3f6c6609b429b051b25526b0102
refs/heads/master
2021-01-25T07:07:51.592712
2015-05-18T17:29:10
2015-05-18T17:29:10
29,542,857
1
0
null
null
null
null
UTF-8
Python
false
false
7,169
py
__author__ = 'otniel' import Image import numpy as np import random import matplotlib.pyplot as plt from math import sqrt, floor MAX_PIXEL_INTENSITY = 255 HALF_PIXEL_INTENSITY = 127 MIN_PIXEL_INTENSITY = 0 def invert_rgb_image(image): pixels = image.load() inverted_image = Image.new(image.mode, image.size) inverted_pixels = inverted_image.load() for y in xrange(image.size[1]): # kight for x in xrange(image.size[0]): # width inverted_pixels[x, y] = _invert_rgb_pixel(pixels[x, y]) return inverted_image def _invert_rgb_pixel(pixel): r, g, b = pixel return (MAX_PIXEL_INTENSITY - r), (MAX_PIXEL_INTENSITY - g), (MAX_PIXEL_INTENSITY - b) def grayscale_rgb_image(image): print "Grayscaling image..." pixels = image.load() grayscale_image = Image.new("RGB", image.size) grayscale_pixels = grayscale_image.load() for y in xrange(image.size[1]): # height for x in xrange(image.size[0]): # width gpx = _get_grayscale_pixel(pixels[x, y]) grayscale_pixels[x, y] = (gpx, gpx, gpx) return grayscale_image def _get_grayscale_pixel(rgb_pixel): red, green, blue = rgb_pixel # Weighted sum return int(sum([x * y for (x, y) in zip([red, green, blue], [0.2126, 0.7152, 0.0722])])) # Intensity coefficients [http://u.to/qEtWCg] def binarize_rgb_image(image): grayscale_image = grayscale_rgb_image(image) return binarize_image(grayscale_image) def binarize_image(image): binary_image = image binary_pixels = binary_image.load() threshold = calculate_image_threshold(image) for y in xrange(image.size[1]): # height for x in xrange(image.size[0]): # width bp = _binarize_pixel(binary_pixels[x, y][0], threshold) binary_pixels[x, y] = (bp, bp, bp) return binary_image def _binarize_pixel(pixel, threshold=HALF_PIXEL_INTENSITY): if pixel >= threshold: return MAX_PIXEL_INTENSITY return MIN_PIXEL_INTENSITY def calculate_threshold(data): # Reference: http://goo.gl/7nP48T page 12 threshold = data.mean() previous_mean_one, previous_mean_two = 0, 0 group_one, group_two = _get_lower_group(data, threshold), _get_upper_group(data, threshold) mean_one, mean_two = group_one.mean(), group_two.mean() threshold = 0.5 * (mean_one + mean_two) while previous_mean_one != mean_one or previous_mean_two != mean_two: previous_mean_one, previous_mean_two = mean_one, mean_two group_one, group_two = _get_lower_group(data, threshold), _get_upper_group(data, threshold) mean_one, mean_two = int(group_one.mean()), int(group_two.mean()) threshold = 0.5 * (mean_one + mean_two) return threshold def calculate_image_threshold(image): data = get_pixels_list(image) data = np.array([pixels[0] for pixels in data]) return calculate_threshold(data) def _get_lower_group(data, threshold): return np.array([value for value in data if value < threshold]) def _get_upper_group(data, threshold): return np.array([value for value in data if value > threshold]) def plot_data(data): hist, bins = np.histogram(data) center = (bins[:-1] + bins[1:]) / 2 width = 0.7 * (bins[1] - bins[0]) plt.bar(center, hist, align='center', width=width) plt.show() def calculate_global_gradient(horizontal_gradient, vertical_gradient): """ :param horizontal_gradient: :param vertical_gradient: :return 2-dimensional list with pixels - gradient: """ return [sqrt(x ** 2 + y ** 2) for (x, y) in zip(horizontal_gradient, vertical_gradient)] def get_pixels_list(image): pixels = image.load() return np.array([pixels[x, y][0] for y in xrange(image.size[1]) for x in xrange(image.size[0])]) def generate_random_color(): black_limit, white_limit = MIN_PIXEL_INTENSITY + 50, MAX_PIXEL_INTENSITY - 50 r = _get_random_intensity(black_limit, white_limit) g = _get_random_intensity(black_limit, white_limit) b = _get_random_intensity(black_limit, white_limit) return r, g, b def _get_random_intensity(black_limit=MIN_PIXEL_INTENSITY, white_limit=MAX_PIXEL_INTENSITY): return random.randint(black_limit, white_limit) def generate_color_palette(number_of_colors=5): return [generate_random_color() for number in xrange(number_of_colors)] def get_image_diagonal_distance(image): return int(sqrt(image.size[0] ** 2 + image.size[1] ** 2)) def is_border_pixel(pixel): return pixel == MAX_PIXEL_INTENSITY def image_to_cartesian_coordinates(x, y, width, height): cartesian_x_axis = int(x - (width / 2.0)) cartesian_y_axis = int((height / 2.0) - y) return cartesian_x_axis, cartesian_y_axis def cartesian_to_image_coordinates(x, y, width, height): cartesian_x_axis = x + (width / 2.0) cartesian_y_axis = (height / 2.0) - y return cartesian_x_axis, cartesian_y_axis def convert_rgb_to_hex(rgb): if any(channel > 255 or channel < 0 for channel in rgb): return "All color channels must be in [0 - 255]" r, g, b = rgb r = hex(r).split('x')[1].zfill(2) g = hex(g).split('x')[1].zfill(2) b = hex(b).split('x')[1].zfill(2) return '#' + r + g + b def normalize_grayscale_image(image): frecuencies = dict() pixels = image.load() width, height = image.size for y in xrange(height): for x in xrange(width): color = pixels[x, y][0] if color in frecuencies: frecuencies[color] += 1 else: frecuencies[color] = 1 threshold = 10 max, min = -1, 999999 for color in frecuencies: if frecuencies[color] > threshold: if color < min: min = color if color > max: max = color range = float(max - min) new_image = Image.new('RGB', image.size) new_pixels = new_image.load() for y in xrange(height): for x in xrange(width): color = pixels[x, y][0] if color <= min: new_pixels[x, y] = (0, 0, 0) elif color >= max: new_pixels[x, y] = (255, 255, 255) else: i = int(255 * (color - min) / range) new_pixels[x, y] = (i, i, i) return new_image def normalize_rgb_image(image): gray_image = grayscale_rgb_image(image) return normalize_grayscale_image(gray_image) if __name__ == '__main__': rgb_image = Image.open('../test-images/mason.jpg') # print "Inverting image..." # inverted_image = invert_rgb_image(rgb_image) # inverted_image.save('../test-images/inverted_mason.png') # print "Grayscaling image..." gray_image = grayscale_rgb_image(rgb_image) gray_image.save('../test-images/grayscale_mason.png') # print "Binarizing image..." # binary_image = binarize_rgb_image(rgb_image) # binary_image.save('../test-images/automatic_binary_mason.png') # print convert_rgb_to_hex((12, 2, 1112)) norm_im = normalize_grayscale_image(gray_image) norm_im.save('../test-images/new_normalized.png')
[ "otnieel.aguilar@gmail.com" ]
otnieel.aguilar@gmail.com
86c3d10160790fc3aac4e804d5ec45920a02f446
380181172d9ccfaf373de5d5622ddf828b9a4d65
/make-sense-of-census-with-appending,slicing,mean,indexing/code.py
357385407051d0fedc514d23bfc08619328ebd63
[ "MIT" ]
permissive
ELAYARANI-MURUGESAN/greyatom-python-for-data-science
859df4ae4c4afefeb8a9cca9d420f6c9bbe6481d
67b0ad011eee86bb0dff0ee704575293809301f4
refs/heads/master
2022-10-05T12:36:56.413231
2020-06-06T12:45:14
2020-06-06T12:45:14
262,404,847
0
0
null
null
null
null
UTF-8
Python
false
false
1,480
py
# -------------- # Importing header files import numpy as np import warnings warnings.filterwarnings('ignore') #New record new_record=[[50, 9, 4, 1, 0, 0, 40, 0]] #Reading file data = np.genfromtxt(path, delimiter=",", skip_header=1) #Code starts here new_record=np.array(new_record) #print(new_record) census=np.concatenate((data,new_record),axis=0) print(census) print(data.shape) print(census.shape) age=census[:,0] max_age=np.max(age) print(max_age) min_age=np.min(age) print(min_age) age_mean=age.mean() print(age_mean) age_std=np.std(age) print(age_std) race_0=census[census[:,2]==0] race_1=census[census[:,2]==1] race_2=census[census[:,2]==2] race_3=census[census[:,2]==3] race_4=census[census[:,2]==4] len_0=len(race_0) len_1=len(race_1) len_2=len(race_2) len_3=len(race_3) len_4=len(race_4) print('race_0:',len_0) print('race_1:',len_1) print('race_2:',len_2) print('race_3:',len_3) print('race_4:',len_4) a=[len_0,len_1,len_2,len_3,len_4] minority_race=a.index(min(a)) print(minority_race) senior_citizens=census[census[:,0]>60] working_hours_sum=senior_citizens.sum(axis=0)[6] senior_citizens_len=len(senior_citizens) avg_working_hours=working_hours_sum/senior_citizens_len print(avg_working_hours) high=census[census[:,1]>10] print(high) low=census[census[:,1]<=10] print(low) avg_pay_high=high[:,7].mean() print(avg_pay_high) avg_pay_low=low[:,7].mean() print(avg_pay_low)
[ "ELAYARANI-MURUGESAN@users.noreply.github.com" ]
ELAYARANI-MURUGESAN@users.noreply.github.com
401fca7adffd5ba9509dd1df0b1cbe1c58dd4a61
ed4b355a76406afd7ae6e428f4a13b5989bc1f1a
/flow_control.py
980f2e3220f58ed341fa1d05f4403b54403621c6
[]
no_license
aymanesarrar/baby-python-steps
79ba86e11f1101440ddb6b0d208d303221416c6e
b317ac0d30338ed02669df3955d90f234f859b2a
refs/heads/main
2023-03-15T09:37:56.450743
2021-03-18T00:23:53
2021-03-18T00:23:53
343,595,237
2
0
null
null
null
null
UTF-8
Python
false
false
312
py
print('enter your age'); myAge = input() # idk man fuck python # writing some if check and learning some python syntax d zb wtf # ta chnahua hadchi azebiiiiiiiiiiiiiiiiiiiii if myAge >= 18 and myAge <= 50 : print('kbir') elif myAge > 50 and myAge <= 90 : print('kbir chuia hh') else : print('ta sir t9awd')
[ "aymensarrar1@gmail.com" ]
aymensarrar1@gmail.com
a4c1cf9e7c876efc67b76ab08038a1496f959d60
5b21ba23e00c7c9c0596be99c45fb6c02339fe5b
/ex15.py
acfc10aa9a25b6bf9f679fa6b48911fae30217e4
[]
no_license
yuanzizi/py104
b1869da749e82a2931fe6dad861ee53ee254465a
013edb63a2f4d2c177dd41cffac6f7d343818e71
refs/heads/master
2021-01-09T23:40:27.866373
2017-08-10T07:04:02
2017-08-10T07:04:02
98,857,079
0
0
null
null
null
null
UTF-8
Python
false
false
586
py
# ---- ex15 文件操作---------------- from sys import argv script,filename = argv txt = open(filename) # txt 文件指针的值 # <_io.TextIOWrapper name='ex15_sample.txt' mode='r' encoding='cp936'> # print(txt.read()) # 中文的文件要改编码,要不运行结果会报错 # UnicodeDecodeError: 'gbk' codec can't decode # byte 0xaf in position 113: illegal multibyte sequence # filename_cn = input("请输入中文文本文件名:") # 用encode() 序列化 txt_cn = open(filename_cn,encoding='utf-8') print(txt_cn.read()) # ----- ex16 文件读写 ----------------
[ "eric_chi@139.com" ]
eric_chi@139.com
d86e1749616a76d2d38d3047025c8bd2f53d42fd
53438732c6bc70b0d15eea99d961d6036f8839df
/Auth1/venv/bin/pip3.7
7e7d4a7ec99e985fd6466a5c34625337413e6453
[]
no_license
Amarjeet2629/MyPycharmProjects
6e07c972dce8ef12453ae0246bcbfcfd03cba1fb
179a87f327d7c036a6192d0c6e372f2f1e3588ff
refs/heads/master
2023-05-07T20:32:22.091132
2021-04-20T17:06:15
2021-04-20T17:06:15
224,671,445
0
0
null
2023-04-21T20:51:29
2019-11-28T14:32:13
Python
UTF-8
Python
false
false
410
7
#!/home/amarjeet-pc/PycharmProjects/Auth1/venv/bin/python # EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3.7' __requires__ = 'pip==19.0.3' import re import sys from pkg_resources import load_entry_point if __name__ == '__main__': sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) sys.exit( load_entry_point('pip==19.0.3', 'console_scripts', 'pip3.7')() )
[ "amarjeet.sinha.mec17@itbhu.ac.in" ]
amarjeet.sinha.mec17@itbhu.ac.in
ce7050ab38a7683c7b476a80901ac6beac9d0799
4fbd844113ec9d8c526d5f186274b40ad5502aa3
/algorithms/python3/maximize_distance_to_closest_person.py
37e744aa546a7f515c70e1f156bc63f0f499ee8d
[]
no_license
capric8416/leetcode
51f9bdc3fa26b010e8a1e8203a7e1bcd70ace9e1
503b2e303b10a455be9596c31975ee7973819a3c
refs/heads/master
2022-07-16T21:41:07.492706
2020-04-22T06:18:16
2020-04-22T06:18:16
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,203
py
# !/usr/bin/env python # -*- coding: utf-8 -*- """ In a row of seats, 1 represents a person sitting in that seat, and 0 represents that the seat is empty.  There is at least one empty seat, and at least one person sitting. Alex wants to sit in the seat such that the distance between him and the closest person to him is maximized.  Return that maximum distance to closest person. Example 1: Input: [1,0,0,0,1,0,1] Output: 2 Explanation: If Alex sits in the second open seat (seats[2]), then the closest person has distance 2. If Alex sits in any other open seat, the closest person has distance 1. Thus, the maximum distance to the closest person is 2. Example 2: Input: [1,0,0,0] Output: 3 Explanation: If Alex sits in the last seat, the closest person is 3 seats away. This is the maximum distance possible, so the answer is 3. Note: 1 <= seats.length <= 20000 seats contains only 0s or 1s, at least one 0, and at least one 1. """ """ ==================== body ==================== """ class Solution: def maxDistToClosest(self, seats): """ :type seats: List[int] :rtype: int """ """ ==================== body ==================== """
[ "capric8416@gmail.com" ]
capric8416@gmail.com
5582f91bf1c3cfa75269b84d834ed3f9e08e0ee2
5ef96b278a419451ea1715b37c0cca6ee7ff6ca2
/ms-labor/simpleled.py
eb45cc4c047a39eece077f5cebf81cafc92abb4a
[]
no_license
tedeler/SS17_ITSMOSY
a144715f8e7edf323286237275ccce75be0ff41e
d568ffb24bd429a7c5d3187137ab09213ced2a7f
refs/heads/master
2021-01-22T22:57:55.508159
2017-04-18T14:02:08
2017-04-18T14:02:08
85,598,353
1
0
null
null
null
null
UTF-8
Python
false
false
396
py
import RPi.GPIO as GPIO # GPIO Bibliothek importieren import time # Modul time importieren GPIO.setmode(GPIO.BOARD) # Verwende Board-Pinnummern GPIO.setup(26, GPIO.OUT) # Setze Pin 26 (GPIO7) als Ausg GPIO.output(26, True) # Lege 3.3V auf Pin 26 time.sleep(0.5) # Warte 500ms GPIO.output(26, False) # Lege 0V auf Pin 26 GPIO.cleanup() # Aufräumen
[ "torsten@localhost.localdomain" ]
torsten@localhost.localdomain
1fbcc72ce3a5b32bedfe77ded1d772570f94ba40
67047189c30d2330690b0fbc7d5e8e00fbe76ff2
/board/packages/forms/form.py
217d96b5f5308d0eb96891d2e418d00df277bf75
[]
no_license
TerisseNicolas/SoundBoard
03fa9a1232a2beb63d0acdc3c558ca8a1c2f845a
407763e8b9f0c7cc0a61795f38b263e19609673f
refs/heads/master
2020-03-28T08:13:16.383219
2018-09-09T10:12:28
2018-09-09T10:12:28
147,952,300
0
0
null
null
null
null
UTF-8
Python
false
false
98
py
from django import forms class UploadFileForm(forms.Form): file = forms.FileField(label='')
[ "nicolas.terisse@gmail.com" ]
nicolas.terisse@gmail.com
8435baa0b8beaab331ff8904a8889f896a8d23c0
9ae6ce54bf9a2a86201961fdbd5e7b0ec913ff56
/google/ads/googleads/v9/services/services/third_party_app_analytics_link_service/transports/__init__.py
502d5cf2169f355fb53779b340f3900e0e913770
[ "Apache-2.0" ]
permissive
GerhardusM/google-ads-python
73b275a06e5401e6b951a6cd99af98c247e34aa3
676ac5fcb5bec0d9b5897f4c950049dac5647555
refs/heads/master
2022-07-06T19:05:50.932553
2022-06-17T20:41:17
2022-06-17T20:41:17
207,535,443
0
0
Apache-2.0
2019-09-10T10:58:55
2019-09-10T10:58:55
null
UTF-8
Python
false
false
1,141
py
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from collections import OrderedDict from typing import Dict, Type from .base import ThirdPartyAppAnalyticsLinkServiceTransport from .grpc import ThirdPartyAppAnalyticsLinkServiceGrpcTransport # Compile a registry of transports. _transport_registry = ( OrderedDict() ) # type: Dict[str, Type[ThirdPartyAppAnalyticsLinkServiceTransport]] _transport_registry["grpc"] = ThirdPartyAppAnalyticsLinkServiceGrpcTransport __all__ = ( "ThirdPartyAppAnalyticsLinkServiceTransport", "ThirdPartyAppAnalyticsLinkServiceGrpcTransport", )
[ "noreply@github.com" ]
noreply@github.com
77cfa477b7c2922f9a2391fe201ca9abb75d5870
42d62352448b65d7890dbcef4f1a278b78ff12c8
/tox.ini
20bd31455aeff16031c69fa852822995ed6d1cd9
[]
no_license
bboalimoe/avos
76d4396ccdc8ebf655e18c193cb696bac74ede85
9722d4757ef004f368de825f45e7490905a0b6ed
refs/heads/master
2020-12-11T03:52:31.081813
2014-08-24T09:55:47
2014-08-24T09:55:47
null
0
0
null
null
null
null
UTF-8
Python
false
false
326
ini
#!/usr/bin/python # -*- coding: utf-8 -*- # This file is part of avos. # https://github.com/wumaogit/avos # Licensed under the MIT license: # http://www.opensource.org/licenses/MIT-license # Copyright (c) 2014 wumaogit actor2019@gmail.com [tox] envlist = py27, py34, pypy [testenv] commands = make setup make test
[ "actor2019@gmail.com" ]
actor2019@gmail.com
b1f6c26aa06da29d31a9474eab24f85605527cdd
1c6fa623a4d84a1d40a6c9a511932fcb429dc573
/game_agent.py
3e2d18f838b7545b37e5eda1ae53c8fa40aab036
[]
no_license
paumartinez/AIND-Isolation
c43a238f96ff521629d949115ac3513a618a61d8
3dc6e4845aa7963c26b4ac656b6d4acef9452205
refs/heads/master
2021-01-13T13:05:02.918337
2017-06-22T01:44:18
2017-06-22T01:44:18
95,062,605
0
0
null
null
null
null
UTF-8
Python
false
false
16,632
py
"""This file contains all the classes you must complete for this project. You can use the test cases in agent_test.py to help during development, and augment the test suite with your own test cases to further test your code. You must test your agent's strength against a set of agents with known relative strength using tournament.py and include the results in your report. """ import random class Timeout(Exception): """Subclass base exception for code clarity.""" pass def custom_score(game, player): """Calculate the heuristic value of a game state from the point of view of the given player. Note: this function should be called from within a Player instance as `self.score()` -- you should not need to call this function directly. Parameters ---------- game : `isolation.Board` An instance of `isolation.Board` encoding the current state of the game (e.g., player locations and blocked cells). player : object A player instance in the current game (i.e., an object corresponding to one of the player objects `game.__player_1__` or `game.__player_2__`.) Returns ------- float The heuristic value of the current game state to the specified player. """ return heuristica4(game, player) def heuristica1(game, player): # First heuristic. # This function evaluates the difference between the amount of legal moves available for the player and its oppnent. # If the player already won the game the function return +inf and if the player already losses it returns -inf. # If game already won if game.is_winner(player): return float("inf") # If gameover if game.is_loser(player): return float("-inf") # Amount of legal moves available player_moves = len(game.get_legal_moves(player)) opponent_moves = len(game.get_legal_moves(game.get_opponent(player))) valor_h1 = float(player_moves - opponent_moves) return valor_h1 def heuristica2(game, player): # Second heuristic. # This function evaluates the difference between the amount of legal moves available and for the player and its opponent, # plus the difference between the moves available for each one after (the sum of moves available on each ramification of legal moves available). # If the player already won the game the function return +inf and if the player already losses it returns -inf. # If game already won if game.is_winner(player): return float("inf") # If gameover if game.is_loser(player): return float("-inf") # Legal moves available aux_player_moves = game.get_legal_moves(player) aux_opponent_moves = game.get_legal_moves(game.get_opponent(player)) # Amount of legal moves available player_moves = len(aux_player_moves) opponent_moves = len(aux_opponent_moves) # Moves available of the player and its opponent player_availablemoves = float(sum([len(game.__get_moves__(move)) for move in aux_player_moves ])) opponent_availablemoves = float(sum([len(game.__get_moves__(move)) for move in aux_opponent_moves ])) # Value oh the second heuristic valor_h2 = float(player_moves + player_availablemoves - opponent_moves - opponent_availablemoves) return valor_h2 def heuristica3(game, player): # Third heuristic. # This function evaluates the difference between the amount of legal moves available for the player and its oppnent, # plus a penalty if the position available is in a edge of the board (because in general is more dangerous to be # in an edge than in the middle of the board) # If the player already won the game the function return +inf and if the player already losses it returns -inf. # If game already won if game.is_winner(player): return float("inf") # If gameover if game.is_loser(player): return float("-inf") # Legal moves available player_moves = game.get_legal_moves(player) opponent_moves = game.get_legal_moves(game.get_opponent(player)) # Edges of the board edges = [(0,0)] baux = range((game.width-1)) aaux = range((game.height-1)) for a in aaux: edges.append((a,0)) edges.append((a,(game.width-1))) for b in baux: edges.append((0,b)) edges.append(((game.height-1),b)) # advanced_game ponderates higher if the game is advanced, because it is more dangerous to be at an edge if there are fewer blank spaces advanced_game = 0.5 if len(game.get_blank_spaces()) < game.width * game.height / 4: advanced_game = 1 player_edges = [move for move in player_moves if move in edges] opponent_edges = [move for move in opponent_moves if move in edges] # Value oh the third heuristic valor_h3 = float(len(player_moves) - len(opponent_moves) + advanced_game * (len(opponent_edges) - len(player_edges))) return valor_h3 def heuristica4(game, player): # Third heuristic. # This function evaluates the difference between the amount of legal moves available for the player and its oppnent, # plus a penalty if the position available is in a corner of the board (because in general is more dangerous to be # in a corner than in the middle of the board) # If the player already won the game the function return +inf and if the player already losses it returns -inf. # If game already won if game.is_winner(player): return float("inf") # If gameover if game.is_loser(player): return float("-inf") # Legal moves available player_moves = game.get_legal_moves(player) opponent_moves = game.get_legal_moves(game.get_opponent(player)) # Corners corners = [(0, 0), (0, (game.width - 1)), ((game.height - 1), 0), ((game.height - 1), (game.width - 1))] # advanced_game ponderates higher if the game is advanced, because it is more dangerous to be at corner if there are fewer blank spaces advanced_game = 0.5 if len(game.get_blank_spaces()) < game.width * game.height / 4: advanced_game = 1 player_corner = [move for move in player_moves if move in corners] opponent_corner = [move for move in opponent_moves if move in corners] # Value oh the fourth heuristic valor_h4 = float(len(player_moves) - len(opponent_moves) + advanced_game * (len(opponent_corner) - len(player_corner))) return valor_h4 class CustomPlayer: """Game-playing agent that chooses a move using your evaluation function and a depth-limited minimax algorithm with alpha-beta pruning. You must finish and test this player to make sure it properly uses minimax and alpha-beta to return a good move before the search time limit expires. Parameters ---------- search_depth : int (optional) A strictly positive integer (i.e., 1, 2, 3,...) for the number of layers in the game tree to explore for fixed-depth search. (i.e., a depth of one (1) would only explore the immediate sucessors of the current state.) score_fn : callable (optional) A function to use for heuristic evaluation of game states. iterative : boolean (optional) Flag indicating whether to perform fixed-depth search (False) or iterative deepening search (True). method : {'minimax', 'alphabeta'} (optional) The name of the search method to use in get_move(). timeout : float (optional) Time remaining (in milliseconds) when search is aborted. Should be a positive value large enough to allow the function to return before the timer expires. """ def __init__(self, search_depth=3, score_fn=custom_score, iterative=True, method='minimax', timeout=10.): self.search_depth = search_depth self.iterative = iterative self.score = score_fn self.method = method self.time_left = None self.TIMER_THRESHOLD = timeout def get_move(self, game, legal_moves, time_left): """Search for the best move from the available legal moves and return a result before the time limit expires. This function must perform iterative deepening if self.iterative=True, and it must use the search method (minimax or alphabeta) corresponding to the self.method value. ********************************************************************** NOTE: If time_left < 0 when this function returns, the agent will forfeit the game due to timeout. You must return _before_ the timer reaches 0. ********************************************************************** Parameters ---------- game : `isolation.Board` An instance of `isolation.Board` encoding the current state of the game (e.g., player locations and blocked cells). legal_moves : list<(int, int)> A list containing legal moves. Moves are encoded as tuples of pairs of ints defining the next (row, col) for the agent to occupy. time_left : callable A function that returns the number of milliseconds left in the current turn. Returning with any less than 0 ms remaining forfeits the game. Returns ------- (int, int) Board coordinates corresponding to a legal move; may return (-1, -1) if there are no available legal moves. """ self.time_left = time_left # TODO: finish this function! # Perform any required initializations, including selecting an initial # move from the game board (i.e., an opening book), or returning # immediately if there are no legal moves if len(legal_moves) == 0: return (-1,-1) # If first move, pick center position. if game.move_count == 0: return(int(game.height/2), int(game.width/2)) last_move = (-1,-1) try: # The search method call (alpha beta or minimax) should happen in # here in order to avoid timeout. The try/except block will # automatically catch the exception raised by the search method # when the timer gets close to expiring if self.iterative: aux_depth = 1 if self.method == "minimax": while True: last_value, last_move = self.minimax(game, aux_depth) if last_value == float("inf") or last_value == float("-inf"): break aux_depth += 1 if self.method == "alphabeta": while True: last_value, last_move = self.alphabeta(game, aux_depth) if last_value == float("inf") or last_value == float("-inf"): break aux_depth += 1 else: if self.method == "minimax": valor, last_move = self.minimax(game, self.search_depth) if self.method == "alphabeta": valor, last_move = self.alphabeta(game, self.search_depth) except Timeout: # Handle any actions required at timeout, if necessary return last_move pass # Return the best move from the last completed search iteration return last_move def minimax(self, game, depth, maximizing_player=True): """Implement the minimax search algorithm as described in the lectures. Parameters ---------- game : isolation.Board An instance of the Isolation game `Board` class representing the current game state depth : int Depth is an integer representing the maximum number of plies to search in the game tree before aborting maximizing_player : bool Flag indicating whether the current search depth corresponds to a maximizing layer (True) or a minimizing layer (False) Returns ------- float The score for the current search branch tuple(int, int) The best move for the current branch; (-1, -1) for no legal moves Notes ----- (1) You MUST use the `self.score()` method for board evaluation to pass the project unit tests; you cannot call any other evaluation function directly. """ if self.time_left() < self.TIMER_THRESHOLD: raise Timeout() # Possible legal moves for player legal_moves = game.get_legal_moves(game.active_player) # Stop conditions if depth == 0: return self.score(game,self), (-1,-1) if len(legal_moves) == 0: return self.score(game,self), (-1,-1) # Set Move improved move_imp = (-1,-1) # Set old_value if maximizing_player: old_value = float("-inf") else: old_value = float("inf") # Recursive minimax for move in legal_moves: new_value, move1 = self.minimax(game.forecast_move(move), depth-1, not maximizing_player) # Update variables if maximizing_player: if new_value > old_value: old_value = new_value move_imp = move else: if new_value < old_value: old_value = new_value move_imp = move # Return of the function Minimax return old_value, move_imp def alphabeta(self, game, depth, alpha=float("-inf"), beta=float("inf"), maximizing_player=True): """Implement minimax search with alpha-beta pruning as described in the lectures. Parameters ---------- game : isolation.Board An instance of the Isolation game `Board` class representing the current game state depth : int Depth is an integer representing the maximum number of plies to search in the game tree before aborting alpha : float Alpha limits the lower bound of search on minimizing layers beta : float Beta limits the upper bound of search on maximizing layers maximizing_player : bool Flag indicating whether the current search depth corresponds to a maximizing layer (True) or a minimizing layer (False) Returns ------- float The score for the current search branch tuple(int, int) The best move for the current branch; (-1, -1) for no legal moves Notes ----- (1) You MUST use the `self.score()` method for board evaluation to pass the project unit tests; you cannot call any other evaluation function directly. """ if self.time_left() < self.TIMER_THRESHOLD: raise Timeout() # Possible legal moves for player legal_moves = game.get_legal_moves(game.active_player) # Stop conditions if depth == 0: return self.score(game,self), (-1,-1) if len(legal_moves) == 0: return self.score(game,self), (-1,-1) # Set Move improved move_imp = (-1,-1) # Set old_value if maximizing_player: old_value = float("-inf") else: old_value = float("inf") # Recursive Alphbeta for move in legal_moves: new_value, move1 = self.alphabeta(game.forecast_move(move), depth-1, alpha, beta, not maximizing_player) # Update variables if maximizing_player: if new_value > old_value: old_value = new_value move_imp = move # Prune next node? if old_value >= beta: return old_value, move_imp alpha = max(alpha,old_value) else: if new_value < old_value: old_value = new_value move_imp = move if old_value <= alpha: return old_value, move_imp beta = min(beta,old_value) # Return of the function Alphabeta return old_value, move_imp
[ "paulamartinez86@gmail.com" ]
paulamartinez86@gmail.com
7d8d38cff56abaccc46b4bc63af8baaa72e792f3
36ccb119c5a6bb1eb0f40ac34441bc78ae929fc4
/i3/blocks/imap
374a33535dfe4a4b0fb2682f1cae044d2a8017cb
[]
no_license
saraiva132/mydotfiles
dc6b3535af4a84ecf3d53a69a0a1db5820baa8e5
a09d2becfd47a21bb67e5575c2e85c27aa495378
refs/heads/master
2021-01-22T03:57:53.563105
2015-08-24T10:43:04
2015-08-24T10:43:04
35,304,697
0
0
null
null
null
null
UTF-8
Python
false
false
615
#!/usr/bin/env python import imaplib cache_timeout = 60 criterion = 'UNSEEN' imap_server = 'imap' mailbox = 'INBOX' name = 'unread' password = 'password' port = '993' user = 'user' def _get_mail_count(): try: connection = imaplib.IMAP4_SSL(imap_server, port) connection.login(user, password) connection.select(mailbox) unseen_response = connection.search(None, criterion) mails = unseen_response[1][0].split() mail_count = len(mails) return mail_count except: return 'N/A' if __name__ == "__main__": x = _get_mail_count() print(x)
[ "rafael.saraiva@ua.pt" ]
rafael.saraiva@ua.pt
a4a27e3eb0c39273105293f96a89dc9b05e6f10a
b6a84594f8c29d968014faaddd49abeb7537a5fc
/python/1040.moving-stones-until-consecutive-ii.py
799deed3b361b4636ffa827b1e859308649b708d
[]
no_license
nickyfoto/lc
8a6af3df114e693e265d0ede03f4d4e1283e010e
3633b4df3e24968057c7d684689b931c5a8032d3
refs/heads/master
2020-09-16T19:23:07.765917
2020-06-07T17:18:06
2020-06-07T17:18:06
223,866,098
0
0
null
null
null
null
UTF-8
Python
false
false
1,913
py
# # @lc app=leetcode id=1040 lang=python3 # # [1040] Moving Stones Until Consecutive II # # https://leetcode.com/problems/moving-stones-until-consecutive-ii/description/ # # algorithms # Medium (52.07%) # Likes: 152 # Dislikes: 231 # Total Accepted: 4.5K # Total Submissions: 8.7K # Testcase Example: '[7,4,9]' # # On an infinite number line, the position of the i-th stone is given by # stones[i].  Call a stone an endpoint stone if it has the smallest or largest # position. # # Each turn, you pick up an endpoint stone and move it to an unoccupied # position so that it is no longer an endpoint stone. # # In particular, if the stones are at say, stones = [1,2,5], you cannot move # the endpoint stone at position 5, since moving it to any position (such as 0, # or 3) will still keep that stone as an endpoint stone. # # The game ends when you cannot make any more moves, ie. the stones are in # consecutive positions. # # When the game ends, what is the minimum and maximum number of moves that you # could have made?  Return the answer as an length 2 array: answer = # [minimum_moves, maximum_moves] # # # # Example 1: # # # Input: [7,4,9] # Output: [1,2] # Explanation: # We can move 4 -> 8 for one move to finish the game. # Or, we can move 9 -> 5, 4 -> 6 for two moves to finish the game. # # # # Example 2: # # # Input: [6,5,4,3,10] # Output: [2,3] # We can move 3 -> 8 then 10 -> 7 to finish the game. # Or, we can move 3 -> 7, 4 -> 8, 5 -> 9 to finish the game. # Notice we cannot move 10 -> 2 to finish the game, because that would be an # illegal move. # # # # Example 3: # # # Input: [100,101,104,102,103] # Output: [0,0] # # # # # # Note: # # # 3 <= stones.length <= 10^4 # 1 <= stones[i] <= 10^9 # stones[i] have distinct values. # # # # # # # # # # @lc code=start class Solution: def numMovesStonesII(self, stones): pass # @lc code=end
[ "nickyfoto@gmail.com" ]
nickyfoto@gmail.com
6facf6cbf0378817c10bb92998c839f580db38b2
6fb294c9433cd8e829f325c70ac49cde0e3f8b62
/day3/ex14.py
39bbf46dd4cb42d07e35ed76e09357c8828c8e6d
[ "MIT" ]
permissive
dsky1990/python_30days
52fd479027b9f7ec9f9cc868a9b170b025aa3a7a
3a9d8a29bd32979be1f4ef01be44999073dab5c4
refs/heads/master
2020-04-01T23:50:12.806760
2018-11-07T15:16:27
2018-11-07T15:16:27
153,775,207
1
0
null
null
null
null
UTF-8
Python
false
false
509
py
from sys import argv script, user_name = argv prompt = '>' print("Hi %s, I'm the %s script" %(user_name, script)) print("I'd like to ask you a few questions") print("Do you like me, %s?" %user_name) likes = input(prompt) print("Where do you live %s" %user_name) lives = input(prompt) print("What kind of computer do you have?") computer = input(prompt) print(""" Alright, so you said %r about liking me You live in %r, Not sure where that is And you have a %r computer Nice """ %(likes, lives, computer))
[ "dutian1990@gmail.com" ]
dutian1990@gmail.com
ff4c06f31536caffeed006b0bbb64555379489a6
96c3bd445497314c6aa72941e2bbee7a452f0ac4
/fabfile.py
25784f79c98c4b765d99c882a5be23187b3b4845
[]
no_license
L00J/LJblog
c729640845f180afa5bdb3c0c183f19148b82226
44361cbd801a03ae4196bf3e25f2374ee23e1ee3
refs/heads/master
2020-05-22T13:51:29.235362
2019-05-13T08:07:56
2019-05-13T08:07:56
186,367,261
0
0
null
null
null
null
UTF-8
Python
true
false
2,506
py
from fabric.api import env,run,cd,local,task,abort from fabric.colors import green,yellow,red from fabric.context_managers import settings,hide from fabric.contrib.console import confirm import datetime nowTime=datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') env.user = 'root' env.port = '55555' env.hosts = 'ops.attacker.club' #主机地址 env.key_filename = ['~/.ssh/id_rsa'] #本地秘钥 def get_git_status(): git_status_result = local("git status", capture=True) #if "无文件要提交,干净的工作区" not in git_status_result: if "modified" in git_status_result: print (red("当前分支还有文件没有提交")) print (git_status_result) abort("已经终止") def local_unit_test(): with settings(warn_only=True): test_result = local("python3 manage.py test") if test_result.failed: print (test_result) if not confirm(red("单元测试失败,是否继续?")): abort("已经终止") def download_code(): run("git checkout .") run("git pull") print(green("\n[%s] 完成代码下载" % env.hosts)) def app (): run("pip3 install -i http://mirrors.aliyun.com/pypi/simple --trusted-host mirrors.aliyun.com -r requirements.txt") run("python3 manage.py collectstatic --noinput") run("python3 manage.py makemigrations && python3 manage.py migrate") run("python3 manage.py rebuild_index") run('''sed -i "/ALLOWED_HOSTS/c ALLOWED_HOSTS= \['127.0.0.1','.attacker.club'\]" mysite/settings.py ''' ) run("sed -i 's#/data/LJblog#/www/django/blog#' mysite/settings.py") run("sed -i 's/DEBUG = True/DEBUG = False/' mysite/settings.py") run("/usr/bin/supervisord -c /etc/supervisor/supervisord.conf") print(green("\n[%s] app完成部署" % env.hosts)) @task def upload(): with settings(hide('running','stderr'), warn_only=True): local("git add .") local("git commit -m '%s 提交' " % (nowTime)) local("git push") print(green("\n\n 完成代码上传")) @task def deploy(): with settings(hide('running','stderr'), warn_only=True): with cd("/www/django/blog"): get_git_status() local_unit_test() run ("pkill sup") download_code() app() print(yellow("\n\n[%s] 部署完毕 !!!" % env.hosts)) @task def ping (): with settings(hide('running','stderr'),warn_only=True): run("ping -c 1 114.114.114.114")
[ "admin@attacker.club" ]
admin@attacker.club
bda4e649a366f06ac199f0d86b0577ac277b22a9
7a5d867c9f80661900ee711389e272dc2bfb47ab
/PubSub/gcp_pubsub_client.py
53fa5eedffe5e45010c2b1d1ac4b809dad81ea3a
[]
no_license
AliAbdelaal/pubsub-lib
20478b75fa436402f966415db0a5c3da09e4a40c
a3a079abfe7164ecee9044e162897bf1301d3e13
refs/heads/master
2023-03-11T00:52:31.351791
2021-02-21T21:49:07
2021-02-21T21:49:07
340,931,014
1
0
null
2021-02-21T20:23:29
2021-02-21T15:13:26
Python
UTF-8
Python
false
false
1,832
py
import os import json from typing import Callable, Tuple from google.auth import jwt from google.cloud import pubsub_v1 from PubSub.broker import BrokerClient PUBLISHER_AUDIENCE = "https://pubsub.googleapis.com/google.pubsub.v1.Publisher" AUDIENCE = "https://pubsub.googleapis.com/google.pubsub.v1.Subscriber" def ack_msg(func): def wrapper(msg): func(msg.attributes.get('key', None), msg.data) msg.ack() return wrapper class GooglePubSubClient(BrokerClient): def __init__(self, project_id: str, topic: str, subscription_id: str, gcp_configs: dict, callback:Callable=None) -> None: # auth credentials = jwt.Credentials.from_service_account_info( gcp_configs, audience=AUDIENCE ) credentials_pub = credentials.with_claims(audience=PUBLISHER_AUDIENCE) self.topic_name = f'projects/{project_id}/topics/{topic}' self.subscription_path = f'projects/{project_id}/subscriptions/{subscription_id}' self.consumer = pubsub_v1.SubscriberClient(credentials=credentials) self.producer = pubsub_v1.PublisherClient(credentials=credentials_pub) if callback: self.consumer.subscribe(self.subscription_path, ack_msg(callback)) def push_msg(self, value: bytes, key: str = None): """Push a message on the topic with the given key and value. Parameters ---------- value : bytes The value bytes key : bytes The optional key, by default None """ self.producer.publish(self.topic_name, value, key=key) def pull_msg(self) -> Tuple[str, bytes]: """A blocking function to get the message. Returns ------- Tuple[str, bytes] key and value in bytes """ raise NotImplementedError()
[ "aliabdelaal369@gmail.com" ]
aliabdelaal369@gmail.com
d7c149230b5773e89538397e73a63aabde5f11dc
274f13f5e1a2a440f262fb289edd035589cc1b89
/main.py
ceff8c6964352822efb5c35c4fe2ca367d12daeb
[]
no_license
brendanjhays/CavernRL
ba97cf010e5f43cd12efc4b6f905b5b47b6f2276
d4be7bd390b6c7333412baa64dc04f44b9b395ad
refs/heads/master
2023-06-30T14:34:44.526360
2021-07-31T00:28:26
2021-07-31T00:28:26
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,782
py
from typing import Set import tcod from const import Settings, Colors from engine import Engine from dungeon_procgen import generate_dungeon import copy import entity_factories import traceback def main() -> None: tileset = tcod.tileset.load_tilesheet( "sampleRoguelikeGraphic.png", 32, 8, tcod.tileset.CHARMAP_TCOD ) player = copy.deepcopy(entity_factories.player) engine = Engine(player=player) engine.game_map = generate_dungeon( max_rooms=Settings.MAX_ROOMS, room_min_size=Settings.ROOM_MIN_SIZE, room_max_size=Settings.ROOM_MAX_SIZE, map_width=Settings.MAP_WIDTH, map_height=Settings.MAP_HEIGHT, max_monsters_per_room=Settings.MAX_MONSTERS_PER_ROOM, engine=engine ) engine.update_fov() engine.message_log.add_message( "You wake up in a musty cave, with all of your belongings gone", Colors.welcome_text ) with tcod.context.new_terminal( Settings.SCREEN_WIDTH, Settings.SCREEN_HEIGHT, tileset=tileset, title=Settings.TITLE, vsync=Settings.VSYNC ) as context: root_console = tcod.Console( Settings.SCREEN_WIDTH, Settings.SCREEN_HEIGHT, order="F") while True: root_console.clear() engine.event_handler.on_render(console=root_console) context.present(root_console) try: for event in tcod.event.wait(): context.convert_event(event) engine.event_handler.handle_events(event) except Exception: traceback.print_exc() engine.message_log.add_message( traceback.format_exc(), Colors.error) if __name__ == "__main__": main()
[ "brendan@luggle.com" ]
brendan@luggle.com
3e39303e74df429285427f7db29619d28902ae04
9165eeb69b43fbc200f6dfb895bd50137424de5a
/爬虫脚本/分布式招聘/demos/settings.py
2ae85ebfb9d92dab649563298a569d7b7dd95cbe
[]
no_license
a605226760/jybian.github.com
65ff9831d2bad01d98a72cfff4f9f39b3c5086d1
7e72ecb3bdbd9ce7dde64b6db74ec9b108d52312
refs/heads/master
2020-04-26T11:33:24.563686
2019-03-05T11:32:11
2019-03-05T11:32:11
173,520,838
1
0
null
null
null
null
UTF-8
Python
false
false
3,998
py
# -*- coding: utf-8 -*- # Scrapy settings for demos project # # For simplicity, this file contains only settings considered important or # commonly used. You can find more settings consulting the documentation: # # https://doc.scrapy.org/en/latest/topics/settings.html # https://doc.scrapy.org/en/latest/topics/downloader-middleware.html # https://doc.scrapy.org/en/latest/topics/spider-middleware.html BOT_NAME = 'demos' SPIDER_MODULES = ['demos.spiders'] NEWSPIDER_MODULE = 'demos.spiders' # Crawl responsibly by identifying yourself (and your website) on the user-agent #USER_AGENT = 'demos (+http://www.yourdomain.com)' # Obey robots.txt rules ROBOTSTXT_OBEY = False # Configure maximum concurrent requests performed by Scrapy (default: 16) #CONCURRENT_REQUESTS = 32 # Configure a delay for requests for the same website (default: 0) # See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay # See also autothrottle settings and docs #DOWNLOAD_DELAY = 3 # The download delay setting will honor only one of: #CONCURRENT_REQUESTS_PER_DOMAIN = 16 #CONCURRENT_REQUESTS_PER_IP = 16 # Disable cookies (enabled by default) #COOKIES_ENABLED = False # Disable Telnet Console (enabled by default) #TELNETCONSOLE_ENABLED = False # Override the default request headers: #DEFAULT_REQUEST_HEADERS = { # 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', # 'Accept-Language': 'en', #} # Enable or disable spider middlewares # See https://doc.scrapy.org/en/latest/topics/spider-middleware.html #SPIDER_MIDDLEWARES = { # 'demos.middlewares.DemosSpiderMiddleware': 543, #} # Enable or disable downloader middlewares # See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html # DOWNLOADER_MIDDLEWARES = { # 'demos.middlewares.DemosDownloaderMiddleware': 543, # } # Enable or disable extensions # See https://doc.scrapy.org/en/latest/topics/extensions.html #EXTENSIONS = { # 'scrapy.extensions.telnet.TelnetConsole': None, #} # Configure item pipelines # See https://doc.scrapy.org/en/latest/topics/item-pipeline.html ITEM_PIPELINES = { # 'demos.pipelines.DemosPipeline': 300, 'scrapy_redis.pipelines.RedisPipeline': 400, # redis管道文件,自动把数据加载到redis } # Enable and configure the AutoThrottle extension (disabled by default) # See https://doc.scrapy.org/en/latest/topics/autothrottle.html #AUTOTHROTTLE_ENABLED = True # The initial download delay #AUTOTHROTTLE_START_DELAY = 5 # The maximum download delay to be set in case of high latencies #AUTOTHROTTLE_MAX_DELAY = 60 # The average number of requests Scrapy should be sending in parallel to # each remote server #AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0 # Enable showing throttling stats for every response received: #AUTOTHROTTLE_DEBUG = False # Enable and configure HTTP caching (disabled by default) # See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings #HTTPCACHE_ENABLED = True #HTTPCACHE_EXPIRATION_SECS = 0 #HTTPCACHE_DIR = 'httpcache' #HTTPCACHE_IGNORE_HTTP_CODES = [] #HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage' # url指纹过滤器 DUPEFILTER_CLASS = "scrapy_redis.dupefilter.RFPDupeFilter" # 调度器 SCHEDULER = "scrapy_redis.scheduler.Scheduler" # 设置爬虫是否可以中断 SCHEDULER_PERSIST = True # 设置请求队列类型 # SCHEDULER_QUEUE_CLASS ="scrapy_redis.queue.SpiderPriorityQueue" # 按优先级入队列 SCHEDULER_QUEUE_CLASS = "scrapy_redis.queue.SpiderQueue" # 按照队列模式 # SCHEDULER_QUEUE_CLASS = "scrapy_redis.queue.SpiderStack" #按照栈进行请求的调度 # DOWNLOADER_MIDDLEWARES = { # 'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': # None, # 'example.middlewares.RandomUserAgent': 1, # # 'py03_spider_day14.middlewares.FreeRandomProxy': 2, # } DOWNLOAD_DELAY = 1 REDIS_HOST = '192.168.21.128' REDIS_PORT = 6379 CONCURRENT_ITEMS = 100 # 并发解析 CONCURRENT_REQUEST = 16 # 并发请求
[ "noreply@github.com" ]
noreply@github.com
e75ee1e9861da0acd45b3bc85f7e44d490151f8b
a7d248260c2209b2acae5f182a850badef01882a
/helloworld/tddtests/migrations/0003_sentiments_goodbad.py
e5699e977c7e375a90b95365618fe5ce1fead555
[]
no_license
pepea23/GIT-TEST-101
95c9bdc8c61d426a9afbdc0554f3b76b678bfb2c
43949032248d6dfabc24944451c56b7cb6ba230d
refs/heads/master
2020-06-05T04:10:03.005334
2019-06-17T09:08:35
2019-06-17T09:08:35
192,308,762
0
1
null
2019-10-18T08:59:09
2019-06-17T08:43:43
Python
UTF-8
Python
false
false
398
py
# Generated by Django 2.2.1 on 2019-06-14 01:34 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('tddtests', '0002_sentiments_word'), ] operations = [ migrations.AddField( model_name='sentiments', name='goodbad', field=models.IntegerField(blank=True, null=True), ), ]
[ "pepea.23@live.com" ]
pepea.23@live.com
f3057fbc1b612e77ddaea8db1695d3c1244a8059
1d925c3989e80d4cc9d635bf928318646621f722
/PythonClassData/P4-P5/School_Assignment/school_ease/manage.py
3de13f640b7a5add6311137596620bafd4c7ce6d
[]
no_license
mukeshbhoria/PythonClassData
0af81db1c9635d4a08db92bd36d1bbf1db251212
e2378b7ddb0acc5123e0c7227ce617fb63c701ff
refs/heads/master
2022-04-22T05:14:15.285934
2020-04-15T07:07:57
2020-04-15T07:07:57
255,844,437
0
0
null
null
null
null
UTF-8
Python
false
false
631
py
#!/usr/bin/env python """Django's command-line utility for administrative tasks.""" import os import sys def main(): os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'school_ease.settings') try: from django.core.management import execute_from_command_line except ImportError as exc: raise ImportError( "Couldn't import Django. Are you sure it's installed and " "available on your PYTHONPATH environment variable? Did you " "forget to activate a virtual environment?" ) from exc execute_from_command_line(sys.argv) if __name__ == '__main__': main()
[ "mukesh.bhoria555@gmail.com" ]
mukesh.bhoria555@gmail.com
9adf50d27141869fb0693ddeb11ca31431191545
bd93fa910151c278be8249055bc084e5a5c35a6a
/Python/DjangoTest2/booktest/models.py
3735ebb85498b072da8a92f26cec7a80e612790c
[]
no_license
ahojcn/practice-code
bd81595b80239cd2550183093566bd536a83ed3f
b65f4e76271479269463e92fd3fd41585c2ac792
refs/heads/master
2021-07-10T14:15:08.036592
2020-07-09T11:32:16
2020-07-09T11:32:16
153,059,349
2
2
null
null
null
null
UTF-8
Python
false
false
704
py
from django.db import models # Create your models here. # 创建模型 class BookInfo(models.Model): """图书模型类""" # 图书名 btitle = models.CharField(max_length=20) # 出版日期 bpub_date = models.DateField() def __str__(self): return self.btitle class HeroInfo(models.Model): """书中的英雄人物类""" # 英雄名 hname = models.CharField(max_length=20) # 性别 Boolean 类型,默认 False 代表男 hgender = models.BooleanField(default=False) # 备注 hcomment = models.CharField(max_length=128) # 关系 hbook = models.ForeignKey(BookInfo, on_delete=None) def __str__(self): return self.hname
[ "hanoi_ahoj@icloud.com" ]
hanoi_ahoj@icloud.com
bdc5ced79424f14b50fb5c169992cc1ff2600102
d066177b3ab0a6b95542012a6d7ec5ad0ea5c8a7
/routes.py
0f232b861daa1eaacbe886e6ff9fbee0fdc33ce4
[]
no_license
kat-temp/wildfire-app
9e3cb099c06e9bb2f2d3cdb3b86816f45a1a5802
750cd2cac6d7650cfe70aab04628c399ae1195a5
refs/heads/master
2020-07-16T06:09:16.768637
2019-11-18T17:54:37
2019-11-18T17:54:37
205,735,798
0
0
null
null
null
null
UTF-8
Python
false
false
10,761
py
from app import app, db from flask import render_template, flash, redirect, url_for, request from app.forms import LoginForm, RegistrationForm, DeviceForm from app.models import User, Device, Temperature_Sensor, Humidity_Sensor, Rainfall_Sensor, Soil_Moisture_Sensor, Smoke_Sensor, Fire_Sensor, GPS_Module, Image from flask_login import current_user, login_user, logout_user, login_required from werkzeug.urls import url_parse from sqlalchemy import desc #route to home page #decorators to modify functions #for each of the following URLs Flask will pass the return value to the browserç @app.route('/') @app.route('/index', methods=['GET', 'POST']) @login_required #protects the main page from unauthorized viewers def index(): form=DeviceForm() if form.validate_on_submit(): device=Device.query.filter_by(device_id=form.device_id.data).first() # search the database for the user that has matching username if device is None: # if the username is not in the database or the password is does not match flash('Invalid Device ID.') return redirect(url_for('index')) else: if device.user_id is None: device.user_id=current_user.id db.session.commit() return redirect(url_for('device', device_id=str(device.device_id))) else: if device.user_id == current_user.id: return redirect(url_for('device', device_id=str(device.device_id))) elif device.user_id != current_user.id: flash('Invalid Device ID.') return redirect(url_for('index')) return render_template('index.html', title='Home', form=form) # returns the template with the now filled in variables @app.route('/device/<device_id>') @login_required def device(device_id): device_title=device_id device=Device.query.filter_by(device_id=device_id).first() single_device_id=device.id temp=Temperature_Sensor.query.filter_by(device_id=single_device_id).order_by(desc(Temperature_Sensor.timestamp)).first() humd=Humidity_Sensor.query.filter_by(device_id=single_device_id).order_by(desc(Humidity_Sensor.timestamp)).first() rain=Rainfall_Sensor.query.filter_by(device_id=single_device_id).order_by(desc(Rainfall_Sensor.timestamp)).first() smoke=Smoke_Sensor.query.filter_by(device_id=single_device_id).order_by(desc(Smoke_Sensor.timestamp)).first() fire=Fire_Sensor.query.filter_by(device_id=single_device_id).order_by(desc(Fire_Sensor.timestamp)).first() sm=Soil_Moisture_Sensor.query.filter_by(device_id=single_device_id).order_by(desc(Soil_Moisture_Sensor.timestamp)).first() gps=GPS_Module.query.filter_by(device_id=single_device_id).order_by(desc(GPS_Module.timestamp)).first() images=Image.query.filter_by(device_id=single_device_id).order_by(desc(Image.timestamp)).first() if fire.reading == 1: fire_reading = " Fire Detected" else: fire_reading = " No Fire Detected" if smoke.reading == 1: smoke_reading = " Smoke Detected" else: smoke_reading = " No Smoke Detected" if rain.reading == 1: rain_reading = " Current Rainfall Detected" else: rain_reading = " No Rain Detected" import requests gps_lat = gps.latitude gps_long = gps.longitude lat=gps_lat[:-1] lon=gps_long[:-1] current_weather_url = 'http://api.weatherstack.com/current?access_key=90a599db6293c173bdbabf8d63354946&query=' final_current_weather_url = current_weather_url + lat + ',' + lon response = requests.get(final_current_weather_url) data = response.json() print("accessing weather data from apixu...") print("for latitude: "+lat+" and longitude: "+lon) print("the wind speed in mph is: " + str(data['current']['wind_speed'])) print("the wind degree is: " + str(data['current']['wind_degree'])) print("the wind direction is: " + str(data['current']['wind_dir'])) wind = [ { 'data' : {'wind_data' : 'Wind Speed'}, 'reading' : str(data['current']['wind_speed']) }, { 'data' : {'wind_data' : 'Wind Degree'}, 'reading' : str(data['current']['wind_degree']) }, { 'data' : {'wind_data' : 'Wind Direction'}, 'reading' : str(data['current']['wind_dir']) } ] #precipitation=data['current']['precip_mm'] sensors = [ { 'type' : 'Fire', 'data' : {'sensor' : fire.sensor_id}, 'reading' : fire_reading, 'unit' : '' }, { 'type' : 'Smoke', 'data' : {'sensor' : smoke.sensor_id}, 'reading' : smoke_reading, 'unit' :'' }, { 'type' : 'Temperature', 'data' : {'sensor' : temp.sensor_id}, 'reading' : temp.reading, 'unit' :'° C' }, { 'type' : 'Humidity', 'data' : {'sensor' : humd.sensor_id}, 'reading' : humd.reading, 'unit' :'%' }, { 'type' : 'Rainfall', 'data' : {'sensor' : rain.sensor_id}, 'reading' : rain_reading, 'unit' :'' }, # { # 'type' : 'Precipitation', # 'data' : {'sensor' : rain.sensor_id}, # 'reading' : precipitation, # 'unit' :'mm' # }, { 'type' : 'Soil Moisture', 'data' : {'sensor' : sm.sensor_id}, 'reading' : sm.reading, 'unit' :'%' }, ] timestamp=temp.timestamp gps = [ { 'type' : 'Latitude', 'reading' : gps.latitude, 'unit' : ' ' }, { 'type' : 'Longitude', 'reading' : gps.longitude, 'unit' : ' ' } ] images = [ { 'animal' : 'Bears', 'reading' : images.bears }, { 'animal' : 'Deer', 'reading' : images.deer }, { 'animal' : 'Lynx', 'reading' : images.lynx }, { 'animal' : 'Wolves', 'reading' : images.wolves } ] CBI=(((110-1.373*float(humd.reading))-0.54*(10.20-float(temp.reading)))*124*10**(-0.0142*float(humd.reading)))/60 if CBI <= 50: fire_danger_level= "Low" elif 50 < CBI <= 75: fire_danger_level= "Moderate" elif 75 < CBI <= 90: fire_danger_level= "High" elif 90 < CBI <= 97.5: fire_danger_level= "Very High" elif CBI > 97.5: fire_danger_level= "Extreme" else: fire_danger_level= "Error" if 0 <= float(data['current']['wind_speed']) < 4: rate_of_spread=1 elif 4 <= float(data['current']['wind_speed']) < 8: rate_of_spread=2 elif 8 <= float(data['current']['wind_speed']) < 13: rate_of_spread=3 elif 13 <= float(data['current']['wind_speed']) < 19: rate_of_spread=5 elif 19 <= float(data['current']['wind_speed']) < 25: rate_of_spread=7 elif 25 <= float(data['current']['wind_speed']) < 32: rate_of_spread=8 elif 32 <= float(data['current']['wind_speed']) < 38: rate_of_spread=11 elif 38 <= float(data['current']['wind_speed']): rate_of_spread=12 else: rate_of_spread=0 no_fire_predictions = [ { 'type' : 'Fire Danger Level', 'reading' : fire_danger_level }, { 'type' : 'CBI', 'reading': CBI } ] fire_predictions = [ { 'type' : 'Rate of Fire Spread', 'reading' : rate_of_spread, 'unit' : ' mph' }, { 'type' : 'Direction of Fire Spread', 'reading' : str(data['current']['wind_dir']), 'unit' : '' } ] return render_template('device.html', title='Device Information', device_title=device_title, sensors=sensors, gps=gps, images=images, wind=wind, fire_alarm=fire.reading, no_fire_predictions=no_fire_predictions, fire_predictions=fire_predictions, timestamp=timestamp) @app.route('/login', methods=['GET', 'POST']) #view function accepts post get and post requests, rather than just get request default def login(): if current_user.is_authenticated: # if user is currently logged in return redirect('/index') # do not allow the user to go to the login page and redirect to index form = LoginForm() if form.validate_on_submit(): # validating user information user=User.query.filter_by(username=form.username.data).first() # search the database for the user that has matching username if user is None or not user.check_password(form.password.data): # if the username is not in the database or the password is does not match flash('Invalid username or password') #show a message to the user that password was incorrect return redirect(url_for('login')) # navigate back to login page upon unsuccessful login login_user(user, remember=form.remember_me.data) # user is logged in and is now current_user next_page=request.args.get('index') # get the URL of the page that the anonymous user wanted to visit if not next_page or url_parse(next_page).netloc != '': # if there is not a next page or the next page is not a page on the site next_page=url_for('index') return redirect(next_page) # navigate to the page the user tried to access upon successful login return render_template('login.html', title='Sign In', form=form) # information to pass to html login form @app.route('/logout') def logout(): logout_user() # logout user by using Flask-Login's logout function return redirect(url_for('index')) # redirect user to index page @app.route('/register', methods=['GET', 'POST']) def register(): if current_user.is_authenticated: return redirect(url_for('index')) form = RegistrationForm() if form.validate_on_submit(): user = User(username=form.username.data, email=form.email.data, phone=form.phone.data) user.set_password(form.password.data) db.session.add(user) db.session.commit() flash('Congratulations, you are now a registered user!') return redirect(url_for('login')) return render_template('register.html', title='Register', form=form) @app.route('/user_help') @login_required def user_help(): return render_template('user_help.html', title='User Help')
[ "noreply@github.com" ]
noreply@github.com
ff07c2b3a2221434ea44dabaa054aad91240ca18
5b50f00bd39791aa264128c352c78004b6252d43
/fav_books/urls.py
2a55820ffa1dd7d6dc273e8b30efa60a28ad8f43
[]
no_license
DaThresh/Favorite-Books
49612b71cbdf8aceec75ab28fca3729d82636f58
4f4682c7240e671469aaad4f820ed511261f9db9
refs/heads/master
2020-08-17T11:32:17.623293
2019-10-16T23:01:57
2019-10-16T23:01:57
215,659,563
0
0
null
null
null
null
UTF-8
Python
false
false
747
py
"""fav_books URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/1.10/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.conf.urls import url, include 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls')) """ from django.conf.urls import url, include urlpatterns = [ url(r'^', include('apps.books.urls')), ]
[ "thresh3@me.com" ]
thresh3@me.com
d22b6020a2b3d2bfacf12fcb9cb93b0bc3d641d9
a30362e51cb3291daf26d0c62e56c42caeec837f
/python/codeup/solved/_1068.py
87813e822e0529ad4c300ab4f9c21997748b240f
[]
no_license
TERADA-DANTE/algorithm
03bf52764c6fcdb93d7c8a0ed7a672834f488412
20bdfa1a5a6b9c378e588b17073e77a0126f7339
refs/heads/master
2023-04-14T21:40:11.250022
2023-04-12T13:00:37
2023-04-12T13:00:37
288,335,057
0
0
null
null
null
null
UTF-8
Python
false
false
131
py
n = int(input()) if 90 <= n: print('A') elif 70 <= n: print('B') elif 40 <= n: print('C') elif 0 <= n: print('D')
[ "55175301+TERADA-DANTE@users.noreply.github.com" ]
55175301+TERADA-DANTE@users.noreply.github.com
b3faa68ddf38c6d15ad43fc82a48744cdae5c15b
56f5b2ea36a2258b8ca21e2a3af9a5c7a9df3c6e
/CMGTools/H2TauTau/prod/25aug_corrMC/up/mc/DY2JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7C-v1/AODSIM/V5_B/PAT_CMG_V5_16_0_1377544840/HTT_24Jul_newTES_manzoni_Up_Jobs/Job_225/run_cfg.py
c1d2c055a93f6f5950d43132a49f5e864889fafd
[]
no_license
rmanzoni/HTT
18e6b583f04c0a6ca10142d9da3dd4c850cddabc
a03b227073b2d4d8a2abe95367c014694588bf98
refs/heads/master
2016-09-06T05:55:52.602604
2014-02-20T16:35:34
2014-02-20T16:35:34
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,499
py
import FWCore.ParameterSet.Config as cms import os,sys sys.path.append('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/H2TauTau/prod/25aug_corrMC/up/mc/DY2JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7C-v1/AODSIM/V5_B/PAT_CMG_V5_16_0_1377544840/HTT_24Jul_newTES_manzoni_Up_Jobs') from base_cfg import * process.source = cms.Source("PoolSource", noEventSort = cms.untracked.bool(True), inputCommands = cms.untracked.vstring('keep *', 'drop cmgStructuredPFJets_cmgStructuredPFJetSel__PAT'), duplicateCheckMode = cms.untracked.string('noDuplicateCheck'), fileNames = cms.untracked.vstring('/store/cmst3/user/cmgtools/CMG/DY2JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7C-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_2006.root', '/store/cmst3/user/cmgtools/CMG/DY2JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7C-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_2007.root', '/store/cmst3/user/cmgtools/CMG/DY2JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7C-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_2008.root', '/store/cmst3/user/cmgtools/CMG/DY2JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7C-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_2009.root', '/store/cmst3/user/cmgtools/CMG/DY2JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7C-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_201.root') )
[ "riccardo.manzoni@cern.ch" ]
riccardo.manzoni@cern.ch
89b9f9f0e3926f6078121dbcf6dd64da9e22f77a
4543983bdc58ab1edfaddb7092433cc50f993399
/abstract_factory.py
2e234debdb9d5e8576dd7f38e51d5b21de50b797
[]
no_license
lchojnacki/python_design_patterns
68c78d430163021cab148c45d60105f33216cab2
06b2e155f22f1fd27d380885c8c7d44202cca548
refs/heads/master
2020-08-01T16:47:46.659319
2019-09-26T09:32:19
2019-09-26T09:32:19
211,051,769
0
0
null
null
null
null
UTF-8
Python
false
false
3,079
py
from abc import ABC, abstractmethod # moduł Abstract Base Class class Konfiguracja: def __init__(self): self.niska_rozdzielczosc = FabrykaNisRozdz() self.wysoka_rozdzielczosc = FabrykaWysRozdz() def poierz_sterownik_niskiej_rozdzielczosci(self): return self.niska_rozdzielczosc def poierz_sterownik_wysokiej_rozdzielczosci(self): return self.wysoka_rozdzielczosc class FabrykaSter(ABC): @abstractmethod def pobierz_sterownik_ekranu(self): pass @abstractmethod def pobierz_sterownik_drukarki(self): pass class FabrykaNisRozdz(FabrykaSter): def __init__(self): pass def pobierz_sterownik_ekranu(self): return SENR() def pobierz_sterownik_drukarki(self): return SDNR() class FabrykaWysRozdz(FabrykaSter): def __init__(self): pass def pobierz_sterownik_ekranu(self): return SEWR() def pobierz_sterownik_drukarki(self): return SDWR() class ApNadzorujaca: def __init__(self, fabryka): self.sterownik_drukarki = fabryka.pobierz_sterownik_drukarki() self.sterownik_ekranu = fabryka.pobierz_sterownik_ekranu() def rysuj(self): self.sterownik_ekranu.rysuj() def drukuj(self): self.sterownik_drukarki.drukuj() class SterownikEkranu(ABC): @abstractmethod def rysuj(self): pass class SterEkrnNisRozdz(SterownikEkranu): def __init__(self): self.sterownik = SENR() def rysuj(self): self.sterownik.rysuj() class SENR: def __init__(self): self.tekst = "Rysuję figurę za pomocą sterownika ekranu niskiej rozdzielczości." def rysuj(self): print(self.tekst) class SterEkrnWysRozdz(SterownikEkranu): def __init__(self): self.sterownik = SEWR() def rysuj(self): self.sterownik.rysuj() class SEWR: def __init__(self): self.tekst = "Rysuję figurę za pomocą sterownika ekranu wysokiej rozdzielczości." def rysuj(self): print(self.tekst) class SterownikDrukarki(ABC): @abstractmethod def drukuj(self): pass class SterDrukNisRozdz(SterownikDrukarki): def __init__(self): self.sterownik = SDNR() def drukuj(self): self.sterownik.drukuj() class SDNR: def __init__(self): self.tekst = "Drukuję figurę za pomocą sterownika drukarki niskiej rozdzielczości." def drukuj(self): print(self.tekst) class SterDrukWysRozdz(SterownikDrukarki): def __init__(self): self.sterownik = SDWR() def drukuj(self): self.sterownik.drukuj() class SDWR: def __init__(self): self.tekst = "Drukuję figurę za pomocą sterownika drukarki wysokiej rozdzielczości." def drukuj(self): print(self.tekst) if __name__ == '__main__': konf = Konfiguracja() ap = ApNadzorujaca(konf.poierz_sterownik_niskiej_rozdzielczosci()) ap.drukuj() ap.rysuj() print() ap = ApNadzorujaca(konf.poierz_sterownik_wysokiej_rozdzielczosci()) ap.rysuj() ap.drukuj()
[ "lukaszchojnacki2@gmail.com" ]
lukaszchojnacki2@gmail.com
2dd164c21f51f254b46bd69ad2fcaba6affe1449
4564271e589df65d62a0d1c9d26a3d66342991cf
/Missions_to_Mars/scrape_mars.py
617e3331c6314bcdb5da970ac8d3b7e9f59eaf70
[]
no_license
katshamai/web-scraping-challenge
bc5aefe2b33882b41ddc49ec090200f402295c8b
9b4be5027891705f7650d3d97c7be2da32c93813
refs/heads/master
2022-12-03T06:18:41.108178
2020-08-22T15:57:25
2020-08-22T15:57:25
null
0
0
null
null
null
null
UTF-8
Python
false
false
3,416
py
# Dependencies from splinter import Browser from bs4 import BeautifulSoup as bs import time from webdriver_manager.chrome import ChromeDriverManager import pandas as pd import requests from IPython.display import HTML def init_browser(): executable_path = {'executable_path': ChromeDriverManager().install()} browser = Browser('chrome', **executable_path, headless=False) return browser mars_collection={} def scrape(): # URL of page to be scraped nasa_url = 'https://mars.nasa.gov/news/' # Retrieve page with the requests module response = requests.get(nasa_url) # Create BeautifulSoup object; parse with 'lxml' nasa_soup = bs(response.text, 'lxml') # Extract title text news_title = nasa_soup.find('div',class_='content_title').find('a').text mars_collection['news_title']=news_title # Extract Paragraph text news_paragraph = nasa_soup.find('div', class_="rollover_description_inner").text mars_collection['news_paragraph'] = news_paragraph # # JPL Mars Space Images - Featured Image browser = init_browser() #Visit Nasa's JPL Mars Space url using splinter module jplNasa_url='https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars' browser.visit(jplNasa_url) #create HTML object html = browser.html jpl_soup = bs(html, 'html.parser') #get base Nasa link main_url ='https://www.jpl.nasa.gov' #get image url from the soup object. featured_image_url = jpl_soup.find('article')['style'].replace('background-image: url(','').replace(');', '')[1:-1] mars_collection['featured_image_url'] = f"{main_url}{featured_image_url}" # # Mars Facts # URL for Mars Facts facts_url = 'https://space-facts.com/mars/' # Use Pandas to parse html results = pd.read_html(facts_url) # Add parsed data into DataFrame mars_df = results[0] mars_df.head() # Add column names mars_df.columns = ['Information', 'Dimensions'] mars_df.head() # Create HTML table from dataframe mars_collection['mars_html_table'] = mars_df.to_html(header=True, index=False) # # Mars Hemispheres # Set up connection to url hemi_url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars' core_url = 'https://astrogeology.usgs.gov/search/map/Mars/Viking/' browser.visit(hemi_url) html = browser.html hemi_soup = bs(html,'html.parser') # Identify all hemispheres list on page hemispheres = hemi_soup.find('div',class_='collapsible results') results = hemispheres.find_all('a') # Create a list to hold image url string and hemisphere title hemispheres_image_urls = [] # Populate list with the images data for result in results: if result.h3: title = result.h3.text link = 'https://astrogeology.usgs.gov' + result['href'] print(title,link) browser.visit(link) image_html = browser.html soup = bs(image_html,'html.parser') soup_image = soup.find('div', class_='downloads').find('li').a['href'] print(soup_image) mars_images = {'title':title, 'img_url':soup_image} hemispheres_image_urls.append(mars_images) # Check hemispheres_image_urls dictionary mars_collection['hemispheres_image_urls'] = hemispheres_image_urls return mars_collection
[ "64203831+katshamai@users.noreply.github.com" ]
64203831+katshamai@users.noreply.github.com
bc204ab56e855fa8c56fd746ff56d047dc26ef49
4d115936b966158d28ebcbc0c8a6eaf1134b7b09
/problems/0020-valid-parentheses.py
f44c7b857c772989c61f4af6fb8a3f64c55801d5
[ "MIT" ]
permissive
tzxyz/leetcode
14f96db680e0b5f45dcbbc8e7f29e93edecc02b6
d599e223bed1b957a1283face7606cb651b28ec4
refs/heads/master
2020-05-27T06:22:48.406946
2019-11-14T14:04:21
2019-11-14T14:04:21
188,520,416
0
0
null
null
null
null
UTF-8
Python
false
false
1,601
py
class Solution: """ 给定一个只包括 '(',')','{','}','[',']' 的字符串,判断字符串是否有效。 有效字符串需满足: 左括号必须用相同类型的右括号闭合。 左括号必须以正确的顺序闭合。 注意空字符串可被认为是有效字符串。 示例 1: 输入: "()" 输出: true 示例 2: 输入: "()[]{}" 输出: true 示例 3: 输入: "(]" 输出: false 示例 4: 输入: "([)]" 输出: false 示例 5: 输入: "{[]}" 输出: true """ def isValid(self, s: str) -> bool: mappings = { ')': '(', ']': '[', '}': '{' } chars = list(s) stack = [] while chars: c = chars.pop(0) if stack: top = stack[-1] if mappings.get(c) is top: stack.pop(-1) else: stack.append(c) else: stack.append(c) return len(stack) == 0 def isValid2(self, s: str) -> bool: while '()' in s or '[]' in s or '{}' in s: s = s.replace('()', '') s = s.replace('[]', '') s = s.replace('{}', '') return s == '' if __name__ == '__main__': tests = [ '', '()', '()[]{}', '(]', '([)]', '{[]}' ] for test in tests: print(Solution().isValid(test)) print(Solution().isValid2(test))
[ "tz.zhuo@protonmail.com" ]
tz.zhuo@protonmail.com
fc8f7fd662fe988e7f5f65c94869efdafc5af3eb
7f0548b7191b7589712af19baebafddae1d0505f
/dojoassignments/python/django/full_stack_django/login_and_registration/apps/login_registration_app/migrations/0001_initial.py
2e5994f927a8fa2ce9b4a5d96fd6c594f3453aa5
[]
no_license
mtjhartley/codingdojo
dd8eab1bd61fb847e44766e89fe3db2340468102
65dc558d19adbe62f85ad61c32cb1c392b56567c
refs/heads/master
2022-12-14T23:06:11.927445
2017-08-16T21:08:35
2017-08-16T21:08:35
92,218,728
1
5
null
2022-12-07T23:59:48
2017-05-23T20:46:03
Python
UTF-8
Python
false
false
884
py
# -*- coding: utf-8 -*- # Generated by Django 1.10 on 2017-06-20 19:02 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='User', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('first_name', models.CharField(max_length=255)), ('last_name', models.CharField(max_length=255)), ('email', models.CharField(max_length=255)), ('password', models.CharField(max_length=45)), ('created_at', models.DateTimeField(auto_now_add=True)), ('updated_at', models.DateTimeField(auto_now=True)), ], ), ]
[ "mtjhartley@gmail.com" ]
mtjhartley@gmail.com
75fa745ec0847cfc72aa911151a27c4d373a3c39
0e76739a359df29cd8e89096e07054430edb36d7
/fileScanner.py
ff890a3e5f409b628d5ba1ac4adc53cbfd668f80
[]
no_license
Fjolnirr/FileScanner
c52ed19201c92f20fdc5d270bcb85f43683ccbfb
28379baf0578c8962ae7b203ced1418d6de42646
refs/heads/master
2023-06-18T16:04:27.834923
2021-07-17T10:35:50
2021-07-17T10:35:50
386,906,624
1
0
null
null
null
null
UTF-8
Python
false
false
2,574
py
import cv2 import numpy as np #Constants imHeight = 640 imWidth = 480 kernel = np.ones((5,5)) def preProcessing(src): imGray = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY) imBlur = cv2.GaussianBlur(imGray, (5,5),1) imCanny = cv2.Canny(imBlur, 120, 200) imDilation = cv2.dilate(imCanny, kernel, iterations=3) imErosion = cv2.erode(imDilation, kernel, iterations=1) return imErosion def getContours(src): maxArea = 0 biggest = np.array([]) contours, hierarchy = cv2.findContours(src, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) for cnt in contours: area = cv2.contourArea(cnt) if area > 4000: peri = cv2.arcLength(cnt, True) approx = cv2.approxPolyDP(cnt, 0.02 * peri, True) cornerCount = len(approx) if area > maxArea and cornerCount == 4: biggest = approx maxArea = area cv2.drawContours(img, biggest, -1, (255, 0, 0), 20) return biggest def reorder(myPoints): myPoints = myPoints.reshape((4,2)) myPointsNew = np.zeros((4,1,2),np.int32) add = myPoints.sum(1) myPointsNew[0] = myPoints[np.argmin(add)] myPointsNew[3] = myPoints[np.argmax(add)] diff = np.diff(myPoints,axis=1) myPointsNew[1] = myPoints[np.argmin(diff)] myPointsNew[2] = myPoints[np.argmax(diff)] return myPointsNew def getWarp(src, biggest): biggest = reorder(biggest) pts1 = np.float32(biggest) pts2 = np.float32([[0, 0], [imWidth, 0], [0, imHeight], [imWidth, imHeight]]) matrix = cv2.getPerspectiveTransform(pts1, pts2) imRes = cv2.warpPerspective(src, matrix, (imWidth, imHeight)) imRes = imRes[10:imRes.shape[0]-10, 10:imRes.shape[1]-10] imRes = cv2.resize(imRes,(imWidth,imHeight)) return imRes # Read and Show video or webcam feed cap = cv2.VideoCapture(1) cap.set(3,640) cap.set(4,480) cap.set(10,150) while True: success, frame = cap.read() if success: frame = cv2.resize(frame, (640, 480)) img = frame.copy() imThresh = preProcessing(img) biggest = getContours(imThresh) if len(biggest) == 4: imRes = getWarp(frame, biggest) cv2.imshow("Result Stream", imRes) cv2.imshow("Main Stream", img) cv2.imshow("Thresh", imThresh) k = cv2.waitKey(1) if k & 0xFF == ord('q'): break elif k == ord('c'): pass else: print("Video capture is " + success) break
[ "yusufcan2617@gmail.com" ]
yusufcan2617@gmail.com
da748d34cb6a27059cecf0ee84bd84376e2809bf
d5ad13232e3f1ced55f6956bc4cbda87925c8085
/cc_mcc_seq/SNVINDEL/tmp/3.1_tumor_minus_normal_exome_somatic_number/1_tumor_minus_normal_somatic.py
fb9fd44707bd9d72ef21d0edd8631473db5d86f3
[]
no_license
arvin580/SIBS
c0ba9a8a41f59cb333517c286f7d80300b9501a2
0cc2378bf62359ec068336ea4de16d081d0f58a4
refs/heads/master
2021-01-23T21:57:35.658443
2015-04-09T23:11:34
2015-04-09T23:11:34
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,722
py
def tumor_minus_normal_to_somatic(tumorFile,normalFile,oFile) : dict_Normal=dict() ouFile=open(oFile,'w') inFile=open(normalFile) for line in inFile : line=line.strip() fields=line.split('\t') k='\t'.join(fields[1:-1]) dict_Normal[k]=1 inFile.close() inFile=open(tumorFile) for line in inFile : line=line.strip() fields=line.split('\t') k='\t'.join(fields[1:-1]) if k not in dict_Normal : ouFile.write(line+'\n') ouFile.close() tumor_minus_normal_to_somatic('sum_snp.exome_summary.pass012.ICC10A','sum_snp34.exome_summary.pass012.ICC10B','sum_snp.exome_summary.pass012.ICC10') tumor_minus_normal_to_somatic('sum_snp.exome_summary.pass012.ICC4A','sum_snp34.exome_summary.pass012.ICC4B','sum_snp.exome_summary.pass012.ICC4') tumor_minus_normal_to_somatic('sum_snp.exome_summary.pass012.ICC5A','sum_snp34.exome_summary.pass012.ICC5B','sum_snp.exome_summary.pass012.ICC5') tumor_minus_normal_to_somatic('sum_snp.exome_summary.pass012.ICC9A','sum_snp34.exome_summary.pass012.ICC9B','sum_snp.exome_summary.pass012.ICC9') tumor_minus_normal_to_somatic('sum_snp2.exome_summary.pass012.CHC10A','sum_snp34.exome_summary.pass012.CHC10B','sum_snp.exome_summary.pass012.CHC10') tumor_minus_normal_to_somatic('sum_snp2.exome_summary.pass012.CHC5A','sum_snp34.exome_summary.pass012.CHC5B','sum_snp.exome_summary.pass012.CHC5') tumor_minus_normal_to_somatic('sum_snp2.exome_summary.pass012.CHC6A','sum_snp34.exome_summary.pass012.CHC6B','sum_snp.exome_summary.pass012.CHC6') tumor_minus_normal_to_somatic('sum_snp2.exome_summary.pass012.CHC7A','sum_snp34.exome_summary.pass012.CHC7B','sum_snp.exome_summary.pass012.CHC7')
[ "sunhanice@gmail.com" ]
sunhanice@gmail.com
564d81d0051cf261ea8cf3a8060afb2cc81c2406
718f4a6f53da14dbd79031928900a26c4de65ccb
/optimize_NMDA_KIN2.py
bf70c8a044f4ff48aa4d86895cd5e68a6e41e55f
[]
no_license
neurosutras/CA1Sim
ff37e5ae96cc00d923bbcf333d75842c34156b5b
9a5796e5de9b9be477d61837c164fcbccbe3c8ce
refs/heads/master
2023-04-08T01:39:09.559475
2022-01-13T20:20:45
2022-01-13T20:20:45
29,497,263
4
3
null
null
null
null
UTF-8
Python
false
false
7,046
py
__author__ = 'Aaron D. Milstein' from specify_cells import * from plot_results import * import scipy.optimize as optimize import random """ This simulation uses scipy.optimize to iterate through NMDA_KIN mechanism parameters to fit target EPSP kinetics. """ #morph_filename = 'EB1-early-bifurcation.swc' morph_filename = 'EB2-late-bifurcation.swc' #mech_filename = '043015 pas_exp_scale kdr ka_scale ih_sig_scale - EB2' #mech_filename = '072515 optimized basal ka_scale dend_sh_ar_nas - EB2' mech_filename = '102915 interim dendritic excitability' def synaptic_kinetics_error(x, plot=0): """ :param x: list of parameters :param plot: int or bool: method can be called manually to compare actual to target and fit waveforms :return: float: Error """ spike_times = h.Vector([equilibrate]) for i, syn in enumerate(stim_syn_list): syn.target(syn_type).kon = x[0] syn.target(syn_type).koff = x[1] syn.target(syn_type).CC = x[2] syn.target(syn_type).CO = x[3] syn.target(syn_type).Beta = x[4] syn.target(syn_type).Alpha = x[5] syn.source.play(spike_times) sim.run(v_init) t = np.array(sim.tvec) g = np.array(sim.rec_list[0]['vec']) interp_t = np.arange(0, duration, 0.001) interp_g = np.interp(interp_t, t, g) """ Rc = np.interp(interp_t, t, np.array(sim.rec_list[1]['vec'])) Ro = np.interp(interp_t, t, np.array(sim.rec_list[2]['vec'])) Rb = np.interp(interp_t, t, np.array(sim.rec_list[3]['vec'])) Ro_peak = np.max(Ro) Ro_peak_loc = np.where(Ro == Ro_peak)[0][0] Rc_max = Ro_peak + Rc[Ro_peak_loc] + Rb[Ro_peak_loc] """ start, end = time2index(interp_t, equilibrate, duration) y = interp_g[start:end] interp_t = interp_t[start:end] interp_t -= interp_t[0] amp = np.max(y) t_peak = np.where(y == amp)[0][0] y /= amp rise_10 = np.where(y[0:t_peak] >= 0.1)[0][0] rise_90 = np.where(y[0:t_peak] >= 0.9)[0][0] rise_tau = interp_t[rise_90] - interp_t[rise_10] decay_90 = np.where(y[t_peak:] <= 0.9)[0][0] decay_10 = np.where(y[t_peak:] <= 0.1)[0] if decay_10.any(): decay_tau = interp_t[decay_10[0]] - interp_t[decay_90] else: decay_tau = 1000. # large error if trace has not decayed to 10% in 1 second result = {'rise_tau': rise_tau, 'decay_tau': decay_tau} # , 'Rc_max': Rc_max} spike_times = h.Vector([equilibrate + i * 10. for i in range(5)]) for i, syn in enumerate(stim_syn_list): syn.source.play(spike_times) sim.run(v_init) for i, syn in enumerate(stim_syn_list): syn.source.play(h.Vector()) t = np.array(sim.tvec) g = np.array(sim.rec_list[0]['vec']) interp_t = np.arange(0, duration, 0.001) interp_g = np.interp(interp_t, t, g) start, end = time2index(interp_t, equilibrate, duration) yf = interp_g[start:end] interp_t = interp_t[start:end] interp_t -= interp_t[0] facil_amp = np.max(yf) result['facilitation'] = facil_amp / amp yf /= amp Err = 0. for target in result: Err += ((target_val[target] - result[target])/target_range[target])**2. print('[kon, koff, CC, CO, Beta, Alpha]: [%.3f, %.3f, %.3f, %.3f, %.3f, %.3f], Error: %.3E, Rise: %.3f, Decay: ' '%.3f, facilitation: %.2f' % (x[0], x[1], x[2], x[3], x[4], x[5], Err, rise_tau, decay_tau, result['facilitation'])) if plot: plt.plot(interp_t, y) plt.plot(interp_t, yf) plt.show() plt.close() return Err equilibrate = 250. # time to steady-state duration = 1250. v_init = -67. num_syns = 1 cell = CA1_Pyr(morph_filename, mech_filename, full_spines=True) cell.zero_na() syn_type = 'NMDA_KIN2' sim = QuickSim(duration) # look for a trunk bifurcation trunk_bifurcation = [trunk for trunk in cell.trunk if len(trunk.children) > 1 and trunk.children[0].type == 'trunk' and trunk.children[1].type == 'trunk'] # get where the thickest trunk branch gives rise to the tuft if trunk_bifurcation: # follow the thicker trunk trunk = max(trunk_bifurcation[0].children[:2], key=lambda node: node.sec(0.).diam) trunk = (node for node in cell.trunk if cell.node_in_subtree(trunk, node) and 'tuft' in (child.type for child in node.children)).next() else: trunk = (node for node in cell.trunk if 'tuft' in (child.type for child in node.children)).next() tuft = (child for child in trunk.children if child.type == 'tuft').next() trunk = trunk_bifurcation[0] #sim.append_rec(cell, trunk, loc=1., description='trunk vm') spine_list = [] spine_list.extend(trunk.spines) for spine in spine_list: syn = Synapse(cell, spine, [syn_type], stochastic=0) local_random = random.Random() local_random.seed(0) stim_syn_list = [spine_list[i].synapses[0] for i in local_random.sample(range(len(spine_list)), num_syns)] for i, syn in enumerate(stim_syn_list): syn.target(syn_type).mg = 0.1 #syn.target(syn_type).gmax = 0.005 sim.append_rec(cell, syn.node, object=syn.target(syn_type), param='_ref_g') sim.append_rec(cell, syn.node, object=syn.target(syn_type), param='_ref_Rc') sim.append_rec(cell, syn.node, object=syn.target(syn_type), param='_ref_Ro') sim.append_rec(cell, syn.node, object=syn.target(syn_type), param='_ref_Rb') #the target values and acceptable ranges target_val = {'rise_tau': 3., 'decay_tau': 75., 'Rc_max': 0.6, 'facilitation': 1.3} # extrapolating from Chen...Murphy and Harnett...Magee, Popescu et al. target_range = {'rise_tau': 0.1, 'decay_tau': .5, 'Rc_max': 0.01, 'facilitation': 0.01} #the initial guess and bounds #x = [kon, koff, CC, CO, Beta, Alpha) #x0 = [10., .02, 1., 0.1, 0.04, 0.09] #x0 = [26.414, 1.903, 3.185, 5.119, 0.274, 0.0299] #x0 = [44.35, 2.46, 10.34, 1.06, 0.40, 0.045] x0 = [85.47, 0.68, 9.48, 2.56, 0.72, 0.078] xmin = [10., .01, .1, .1, .01, .01] xmax = [100., 10., 20., 20., 1., 1.] #x1 = [1099.70, 0.07, 1.70, 14.12, 4.64, 0.19] # old NMDA_KIN2, unrealistic kon x1 = [68.74, 1.43, 5.86, 3.32, 0.270, 0.034] mytakestep = Normalized_Step(x0, xmin, xmax) minimizer_kwargs = dict(method=null_minimizer) """ result = optimize.basinhopping(synaptic_kinetics_error, x0, niter=720, niter_success=200, disp=True, interval=20, minimizer_kwargs=minimizer_kwargs, take_step=mytakestep) synaptic_kinetics_error(result.x, plot=1) polished_result = optimize.minimize(synaptic_kinetics_error, result.x, method='Nelder-Mead', options={'ftol': 1e-3, 'xtol': 1e-3, 'disp': True}) """ polished_result = optimize.minimize(synaptic_kinetics_error, x0, method='Nelder-Mead', options={'ftol': 1e-3, 'xtol': 1e-3, 'disp': True}) synaptic_kinetics_error(polished_result.x, plot=1) #synaptic_kinetics_error(x1, plot=1)
[ "neurosutras@gmail.com" ]
neurosutras@gmail.com
e4bc7b6b121c3119273fd3ba6f036e1f1642683d
85be14600f251c09e364d712e629a4c2bb6082de
/CIS_211_CS_II/BlackJack/Deck.py
d37ed70b599adbe2570d07b7fcaac1ddf33bb6b1
[]
no_license
MrCQuinn/Homework-2014-15
04afc94d1ffc63919a48fbe66910457a9f8d05c7
cbaae190a3f086b7786cf391998f57b74217f72a
refs/heads/master
2020-07-03T06:22:51.718106
2016-11-19T07:35:30
2016-11-19T07:35:30
74,193,022
0
0
null
null
null
null
UTF-8
Python
false
false
1,912
py
#Charlie Quinn #Deck class from sys import argv from Card import * import random class Deck(list): """ deck class, inherits from list class and constructor builds a list of 52 card objects. shuffle randomizes list, deal returns first n items, and restore adds card items back into the list """ def __init__(self): for i in range(52): self.append(Card(i)) def shuffle(self): """ randomizes order of Card objects in Deck object """ random.shuffle(self) def deal(self,n): """ removes first n items from Deck object and then returns those items in a list """ h = self[:n] self[:n] = [] return h def restore(self,h): """ adds items in h back to the end of Deck object """ self = self.extend(h) class PinochleDeck(Deck): """ type of deck used in Pinochle, inherits from deck class and constructor builds deck with cards 9 and up with two of each card """ def __init__(self): for i in range(52): if Card(i).rank() > 6: self.append(Card(i)) self.append(Card(i)) def main(): print("Create a deck:") d = Deck() print(d) print("length of deck:") print(len(d)) d.shuffle() print("shuffled deck:") print(d) h = d.deal(7) print("Hand dealt:") print(h) print("updated deck") print(d) print("restored deck:") d.restore(h) print(d) print("Create a Pinochle deck:") d = PinochleDeck() print(d) print("length of deck:") print(len(d)) d.shuffle() print("shuffled deck:") print(d) h = d.deal(7) print("Hand dealt:") print(h) print("updated deck:") print(d) print("restored deck:") d.restore(h) print(d) if __name__ == '__main__': main()
[ "cquinn@uoregon.edu" ]
cquinn@uoregon.edu
f966fbe0f466c6922f12b7a2b81e15aaa0d47821
75257a35a3359c10e3633c469215fd1666293098
/nexmoverify/nexmo_verify.py
5d3cd8c4427e4290def9a0f60015755c8267f3b7
[]
no_license
kpnn/mezzanine-nexmoverify
83e77f19094dbb7b419529efac9135b6c0cb2ac5
c89fec04a03de4b80b86046687b8578d76ffb244
refs/heads/master
2021-01-23T13:29:14.780456
2015-04-27T12:26:57
2015-04-27T12:26:57
34,574,771
1
0
null
null
null
null
UTF-8
Python
false
false
2,814
py
import requests NEXMO_ERRORS = { '1': 'Throttled', '2': 'A parameter is missing', '3': 'Invalid value for parameter', '4': 'Invalid credentials were provided', '5': 'Internal Error', '6': 'Un-routable request', '7': 'The number is blacklisted for verification', '8': 'The api_key you supplied is for an account that has been barred from submitting messages', '9': 'Partner quota exceeded', '15': 'The destination number is not in a supported network', '16': 'The code inserted does not match the expected value', '17': 'A wrong code was provided too many times', '18': 'Too many request_ids provided', '101': 'No response found', } NEXMO_NUMBER_VERIFY_URL = 'https://api.nexmo.com/verify/json' NEXMO_NUMBER_VERIFY_CHECK_URL = 'https://api.nexmo.com/verify/check/json' #check_respone = { #"event_id":"eventId", #"status":"status", #"price":"price", #"currency":"currency", #"error_text":"error" #} def get_error_msg(status): return NEXMO_ERRORS.get(status) or 'Unknown error' class NexmoException(Exception): """Base exception for all Nexmo errors.""" pass class NexmoVerify(object): def __init__(self, api_key, api_secret, brand): self.api_key = api_key self.api_secret = api_secret self.brand = brand def verify(self, number, **kwargs): """ https://docs.nexmo.com/index.php/verify/verify """ data = { 'api_key': self.api_key, 'api_secret': self.api_secret, 'number': number, 'brand': self.brand } data.update(kwargs) return self._send_request(NEXMO_NUMBER_VERIFY_URL, data) def check(self, request_id, code, ip_address=None): """ https://docs.nexmo.com/index.php/verify/check """ data = { 'api_key': self.api_key, 'api_secret': self.api_secret, 'request_id': request_id, 'code': code, 'ip_address':ip_address } return self._send_request(NEXMO_NUMBER_VERIFY_CHECK_URL, data=data) def _send_request(self, url, data): r = requests.post(url, data=data) print r if r.status_code != 200: raise Exception("Respone status: " + r.status_code) res = r.json() print res try: status = res.get('status') except:pass if '0' != status : raise NexmoException(get_error_msg(status), res) return res # if __name__=='__main__': # verify = NexmoVerify(api_key='', # api_secret='', # brand='MyBrand') # print verify.verify('phone', country='') # print verify.check('request_id', 'code')
[ "ppp1@w-station.site" ]
ppp1@w-station.site
f3d0ad0a8d1c4d551aee77afba5afdf9bf6873a3
222760b72cf00f618655a57cf3fec411dd3e4e66
/project/migrations/0001_initial.py
bc2777a171bcb73f857314db4afd6f28e052f305
[]
no_license
Nieaunder7/cms
cd8affbc8cb2172335090d71555f554fa6018fe3
86572985ab30f3750c7603ecc8bbe752a14dfa60
refs/heads/master
2021-07-22T06:58:53.203359
2017-10-31T01:03:31
2017-10-31T01:03:31
107,638,245
0
0
null
2017-10-30T07:33:45
2017-10-20T06:05:10
JavaScript
UTF-8
Python
false
false
1,681
py
# -*- coding: utf-8 -*- # Generated by Django 1.11.6 on 2017-10-23 07:23 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ('member', '0001_initial'), ('device', '0001_initial'), ] operations = [ migrations.CreateModel( name='Project', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=200)), ('start_date', models.DateTimeField()), ('end_date', models.DateTimeField()), ], ), migrations.CreateModel( name='ProjectDevice', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('device', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='device.BaseDevice')), ('project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='project.Project')), ], ), migrations.CreateModel( name='ProjectMember', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('member', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='member.Member')), ('project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='project.Project')), ], ), ]
[ "sieging@gmail.com" ]
sieging@gmail.com
31f2b8745f0c05efbfdc00f2a5baeca03f082f32
5115944414e0baa824008d33b715f66ccf7c3926
/mysite/settings.py
59b21c70ed35e19d2236040ee185bf03d518d1a1
[]
no_license
Dezhikov/my-first-blog
17de57d5fcd51f106ff6b4da399b7a7756013797
1835c9798443ef5b0fc79e525714f7119379693b
refs/heads/master
2020-04-18T02:22:29.421580
2019-01-23T15:20:44
2019-01-23T15:20:44
167,054,962
0
0
null
null
null
null
UTF-8
Python
false
false
3,207
py
""" Django settings for mysite project. Generated by 'django-admin startproject' using Django 1.11.18. For more information on this file, see https://docs.djangoproject.com/en/1.11/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.11/ref/settings/ """ import os # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'vz=9ech0a16)4g3y)$uz6j7388o%$!+4$id5fe7gkc6ihi%r_&' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = ['127.0.0.1', 'Dezhikov.pythonanywhere.com'] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'blog', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'mysite.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'mysite.wsgi.application' # Database # https://docs.djangoproject.com/en/1.11/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Password validation # https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/1.11/topics/i18n/ LANGUAGE_CODE = 'ru-ru' TIME_ZONE = 'Europe/Moscow' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.11/howto/static-files/ STATIC_URL = '/static/' STATIC_ROOT = os.path.join(BASE_DIR, 'static')
[ "vladislav.dezhikov@mail.ru" ]
vladislav.dezhikov@mail.ru
0e9eb1dc9ccde3353f68563e2b328daf405bed95
8ee9ce243beb31c876c79edbfd04155ae1466f97
/products/views.py
8abb60cfdcdf2657fe51afcb5d03959c67d821c8
[]
no_license
Code-Institute-Submissions/ms-4-kuk-marketing
1d23a519300c5b5b1883d2b830df80f5775bc896
a159587e1877cd5adc00cff5d42ce27beece0488
refs/heads/master
2022-12-20T09:23:41.165437
2020-10-01T18:38:25
2020-10-01T18:38:25
null
0
0
null
null
null
null
UTF-8
Python
false
false
4,629
py
from django.shortcuts import render, redirect, reverse, get_object_or_404 from django.contrib import messages from django.contrib.auth.decorators import login_required from django.db.models import Q from django.db.models.functions import Lower from .models import Product, Category from .forms import ProductForm # Create views here. Copied and modified from Boutique-Ado # def all_products(request): """ A view to show all products, including sorting and search queries """ products = Product.objects.all() query = None categories = None sort = None direction = None if request.GET: if 'sort' in request.GET: sortkey = request.GET['sort'] sort = sortkey if sortkey == 'name': sortkey = 'lower_name' products = products.annotate(lower_name=Lower('name')) if sortkey == 'category': sortkey = 'category__name' if 'direction' in request.GET: direction = request.GET['direction'] if direction == 'desc': sortkey = f'-{sortkey}' products = products.order_by(sortkey) if 'category' in request.GET: categories = request.GET['category'].split(',') products = products.filter(category__name__in=categories) categories = Category.objects.filter(name__in=categories) if 'q' in request.GET: query = request.GET['q'] if not query: messages.error(request, "Please enter search criteria!") return redirect(reverse('products')) queries = Q(name__icontains=query) | Q(description__icontains=query) products = products.filter(queries) current_sorting = f'{sort}_{direction}' context = { 'products': products, 'search_term': query, 'current_categories': categories, 'current_sorting': current_sorting, } return render(request, 'products/products.html', context) def product_detail(request, product_id): """ A view to show individual product details """ product = get_object_or_404(Product, pk=product_id) context = { 'product': product, } return render(request, 'products/product_detail.html', context) @login_required def add_product(request): """ Add a product to the store """ if not request.user.is_superuser: messages.error(request, 'Sorry not allowed - authorised personel only.') return redirect(reverse('home')) if request.method == 'POST': form = ProductForm(request.POST, request.FILES) if form.is_valid(): product = form.save() messages.success(request, 'Successfully added product!') return redirect(reverse('product_detail', args=[product.id])) else: messages.error(request, 'Add product failed. Please make sure form is valid.') else: form = ProductForm() template = 'products/add_product.html' context = { 'form': form, } return render(request, template, context) @login_required def edit_product(request, product_id): """ Edit a product in the store """ if not request.user.is_superuser: messages.error(request, 'Sorry - authorised personel only ') return redirect(reverse('home')) product = get_object_or_404(Product, pk=product_id) if request.method == 'POST': form = ProductForm(request.POST, request.FILES, instance=product) if form.is_valid(): form.save() messages.success(request, 'Product successfully updated!') return redirect(reverse('product_detail', args=[product.id])) else: messages.error(request, 'Update product failed, Please ensure form is valid') else: form = ProductForm(instance=product) messages.info(request, f'You are editing {product.name}') template = 'products/edit_product.html' context = { 'form': form, 'product': product, } return render(request, template, context) @login_required def delete_product(request, product_id): """ Delete a product from the store """ if not request.user.is_superuser: messages.error(request, 'Sorry - authorised personel only.') return redirect(reverse('home')) product = get_object_or_404(Product, pk=product_id) product.delete() messages.success(request, 'Product deleted!') return redirect(reverse('products'))
[ "s.oliver1@icloud.com" ]
s.oliver1@icloud.com
40decd2f4d8e3de4229e22f1e7dbb250b25cccf8
a281e3b74e0b29a317dcac75b65f25c74faeba98
/code/models/hedwig/models/kim_cnn/args.py
d14e45b6e13bb2a7f9e8302ae6f0a37d35d2470f
[ "Apache-2.0" ]
permissive
elisaF/subjective_discourse
44b507a8e7fd3e66a0edbb00869d14a67d5191ea
a0fd62e089b9adf843a8e1a094c48fd9d1beb400
refs/heads/master
2023-06-22T18:30:29.803378
2021-04-12T15:25:30
2021-04-12T15:25:30
279,589,934
3
1
null
2023-06-12T21:29:07
2020-07-14T13:16:57
Jupyter Notebook
UTF-8
Python
false
false
1,267
py
import os import models.args def get_args(): parser = models.args.get_args() parser.add_argument('--dataset', type=str, default='CongressionalHearing', choices=['CongressionalHearing']) parser.add_argument('--mode', type=str, default='multichannel', choices=['rand', 'static', 'non-static', 'multichannel']) parser.add_argument('--output-channel', type=int, default=100) parser.add_argument('--words-dim', type=int, default=300) parser.add_argument('--embed-dim', type=int, default=300) parser.add_argument('--dropout', type=float, default=0.5) parser.add_argument('--epoch-decay', type=int, default=15) parser.add_argument('--weight-decay', type=float, default=0) parser.add_argument('--word-vectors-dir', default=os.path.join(os.pardir, os.pardir, os.pardir, 'hedwig-data', 'embeddings', 'word2vec')) parser.add_argument('--word-vectors-file', default='GoogleNews-vectors-negative300.txt') parser.add_argument('--save-path', type=str, default=os.path.join('model_checkpoints', 'kim_cnn')) parser.add_argument('--resume-snapshot', type=str) parser.add_argument('--trained-model', type=str) args = parser.parse_args() return args
[ "elisa@ferracane.com" ]
elisa@ferracane.com
8d1a3522d4cfd4b873a8f1089516307ab89e8605
d0281cecabd070c399d18612bbb3ba11913c0ab1
/venv/lib/python3.6/site-packages/tensorflow/python/ops/gen_array_ops.py
2f6941ef65e3378a7104657698538b9f1db55d8d
[ "MIT" ]
permissive
yuxuan1995liu/darkflowyolo_detection
f0b7aa0a667591da9736fb2860d6080b2fc41577
a7807e9b85833e3f877d46bb60e8fa7d0596a10b
refs/heads/master
2022-11-03T04:00:42.996414
2019-05-10T01:58:59
2019-05-10T01:58:59
185,880,108
0
1
MIT
2022-10-30T16:38:49
2019-05-09T22:28:01
Python
UTF-8
Python
false
false
514,131
py
"""Python wrappers around TensorFlow ops. This file is MACHINE GENERATED! Do not edit. Original C++ source file: array_ops.cc """ import collections as _collections import six as _six from tensorflow.python import pywrap_tensorflow as _pywrap_tensorflow from tensorflow.python.eager import context as _context from tensorflow.python.eager import core as _core from tensorflow.python.eager import execute as _execute from tensorflow.python.framework import dtypes as _dtypes from tensorflow.python.framework import errors as _errors from tensorflow.python.framework import tensor_shape as _tensor_shape from tensorflow.core.framework import op_def_pb2 as _op_def_pb2 # Needed to trigger the call to _set_call_cpp_shape_fn. from tensorflow.python.framework import common_shapes as _common_shapes from tensorflow.python.framework import op_def_registry as _op_def_registry from tensorflow.python.framework import ops as _ops from tensorflow.python.framework import op_def_library as _op_def_library from tensorflow.python.util.deprecation import deprecated_endpoints from tensorflow.python.util import dispatch as _dispatch from tensorflow.python.util.tf_export import tf_export def batch_matrix_band_part(input, num_lower, num_upper, name=None): r"""TODO: add doc. Args: input: A `Tensor`. num_lower: A `Tensor` of type `int64`. num_upper: A `Tensor` of type `int64`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "BatchMatrixBandPart", name, _ctx._post_execution_callbacks, input, num_lower, num_upper) return _result except _core._FallbackException: try: return batch_matrix_band_part_eager_fallback( input, num_lower, num_upper, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. _, _, _op = _op_def_lib._apply_op_helper( "BatchMatrixBandPart", input=input, num_lower=num_lower, num_upper=num_upper, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T")) _execute.record_gradient( "BatchMatrixBandPart", _inputs_flat, _attrs, _result, name) _result, = _result return _result def batch_matrix_band_part_eager_fallback(input, num_lower, num_upper, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function batch_matrix_band_part """ _ctx = ctx if ctx else _context.context() _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx) num_lower = _ops.convert_to_tensor(num_lower, _dtypes.int64) num_upper = _ops.convert_to_tensor(num_upper, _dtypes.int64) _inputs_flat = [input, num_lower, num_upper] _attrs = ("T", _attr_T) _result = _execute.execute(b"BatchMatrixBandPart", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "BatchMatrixBandPart", _inputs_flat, _attrs, _result, name) _result, = _result return _result def batch_matrix_diag(diagonal, name=None): r"""TODO: add doc. Args: diagonal: A `Tensor`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `diagonal`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "BatchMatrixDiag", name, _ctx._post_execution_callbacks, diagonal) return _result except _core._FallbackException: try: return batch_matrix_diag_eager_fallback( diagonal, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. _, _, _op = _op_def_lib._apply_op_helper( "BatchMatrixDiag", diagonal=diagonal, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T")) _execute.record_gradient( "BatchMatrixDiag", _inputs_flat, _attrs, _result, name) _result, = _result return _result def batch_matrix_diag_eager_fallback(diagonal, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function batch_matrix_diag """ _ctx = ctx if ctx else _context.context() _attr_T, (diagonal,) = _execute.args_to_matching_eager([diagonal], _ctx) _inputs_flat = [diagonal] _attrs = ("T", _attr_T) _result = _execute.execute(b"BatchMatrixDiag", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "BatchMatrixDiag", _inputs_flat, _attrs, _result, name) _result, = _result return _result def batch_matrix_diag_part(input, name=None): r"""TODO: add doc. Args: input: A `Tensor`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "BatchMatrixDiagPart", name, _ctx._post_execution_callbacks, input) return _result except _core._FallbackException: try: return batch_matrix_diag_part_eager_fallback( input, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. _, _, _op = _op_def_lib._apply_op_helper( "BatchMatrixDiagPart", input=input, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T")) _execute.record_gradient( "BatchMatrixDiagPart", _inputs_flat, _attrs, _result, name) _result, = _result return _result def batch_matrix_diag_part_eager_fallback(input, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function batch_matrix_diag_part """ _ctx = ctx if ctx else _context.context() _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx) _inputs_flat = [input] _attrs = ("T", _attr_T) _result = _execute.execute(b"BatchMatrixDiagPart", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "BatchMatrixDiagPart", _inputs_flat, _attrs, _result, name) _result, = _result return _result def batch_matrix_set_diag(input, diagonal, name=None): r"""TODO: add doc. Args: input: A `Tensor`. diagonal: A `Tensor`. Must have the same type as `input`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "BatchMatrixSetDiag", name, _ctx._post_execution_callbacks, input, diagonal) return _result except _core._FallbackException: try: return batch_matrix_set_diag_eager_fallback( input, diagonal, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. _, _, _op = _op_def_lib._apply_op_helper( "BatchMatrixSetDiag", input=input, diagonal=diagonal, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T")) _execute.record_gradient( "BatchMatrixSetDiag", _inputs_flat, _attrs, _result, name) _result, = _result return _result def batch_matrix_set_diag_eager_fallback(input, diagonal, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function batch_matrix_set_diag """ _ctx = ctx if ctx else _context.context() _attr_T, _inputs_T = _execute.args_to_matching_eager([input, diagonal], _ctx) (input, diagonal) = _inputs_T _inputs_flat = [input, diagonal] _attrs = ("T", _attr_T) _result = _execute.execute(b"BatchMatrixSetDiag", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "BatchMatrixSetDiag", _inputs_flat, _attrs, _result, name) _result, = _result return _result def batch_to_space(input, crops, block_size, name=None): r"""BatchToSpace for 4-D tensors of type T. This is a legacy version of the more general BatchToSpaceND. Rearranges (permutes) data from batch into blocks of spatial data, followed by cropping. This is the reverse transformation of SpaceToBatch. More specifically, this op outputs a copy of the input tensor where values from the `batch` dimension are moved in spatial blocks to the `height` and `width` dimensions, followed by cropping along the `height` and `width` dimensions. Args: input: A `Tensor`. 4-D tensor with shape `[batch*block_size*block_size, height_pad/block_size, width_pad/block_size, depth]`. Note that the batch size of the input tensor must be divisible by `block_size * block_size`. crops: A `Tensor`. Must be one of the following types: `int32`, `int64`. 2-D tensor of non-negative integers with shape `[2, 2]`. It specifies how many elements to crop from the intermediate result across the spatial dimensions as follows: crops = [[crop_top, crop_bottom], [crop_left, crop_right]] block_size: An `int` that is `>= 2`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "BatchToSpace", name, _ctx._post_execution_callbacks, input, crops, "block_size", block_size) return _result except _core._FallbackException: try: return batch_to_space_eager_fallback( input, crops, block_size=block_size, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. block_size = _execute.make_int(block_size, "block_size") _, _, _op = _op_def_lib._apply_op_helper( "BatchToSpace", input=input, crops=crops, block_size=block_size, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T"), "block_size", _op.get_attr("block_size"), "Tidx", _op.get_attr("Tidx")) _execute.record_gradient( "BatchToSpace", _inputs_flat, _attrs, _result, name) _result, = _result return _result def batch_to_space_eager_fallback(input, crops, block_size, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function batch_to_space """ _ctx = ctx if ctx else _context.context() block_size = _execute.make_int(block_size, "block_size") _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx) _attr_Tidx, (crops,) = _execute.args_to_matching_eager([crops], _ctx, _dtypes.int32) _inputs_flat = [input, crops] _attrs = ("T", _attr_T, "block_size", block_size, "Tidx", _attr_Tidx) _result = _execute.execute(b"BatchToSpace", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "BatchToSpace", _inputs_flat, _attrs, _result, name) _result, = _result return _result @_dispatch.add_dispatch_list @tf_export(v1=['batch_to_space_nd', 'manip.batch_to_space_nd']) @deprecated_endpoints('batch_to_space_nd', 'manip.batch_to_space_nd') def batch_to_space_nd(input, block_shape, crops, name=None): r"""BatchToSpace for N-D tensors of type T. This operation reshapes the "batch" dimension 0 into `M + 1` dimensions of shape `block_shape + [batch]`, interleaves these blocks back into the grid defined by the spatial dimensions `[1, ..., M]`, to obtain a result with the same rank as the input. The spatial dimensions of this intermediate result are then optionally cropped according to `crops` to produce the output. This is the reverse of SpaceToBatch. See below for a precise description. Args: input: A `Tensor`. N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`, where spatial_shape has M dimensions. block_shape: A `Tensor`. Must be one of the following types: `int32`, `int64`. 1-D with shape `[M]`, all values must be >= 1. crops: A `Tensor`. Must be one of the following types: `int32`, `int64`. 2-D with shape `[M, 2]`, all values must be >= 0. `crops[i] = [crop_start, crop_end]` specifies the amount to crop from input dimension `i + 1`, which corresponds to spatial dimension `i`. It is required that `crop_start[i] + crop_end[i] <= block_shape[i] * input_shape[i + 1]`. This operation is equivalent to the following steps: 1. Reshape `input` to `reshaped` of shape: [block_shape[0], ..., block_shape[M-1], batch / prod(block_shape), input_shape[1], ..., input_shape[N-1]] 2. Permute dimensions of `reshaped` to produce `permuted` of shape [batch / prod(block_shape), input_shape[1], block_shape[0], ..., input_shape[M], block_shape[M-1], input_shape[M+1], ..., input_shape[N-1]] 3. Reshape `permuted` to produce `reshaped_permuted` of shape [batch / prod(block_shape), input_shape[1] * block_shape[0], ..., input_shape[M] * block_shape[M-1], input_shape[M+1], ..., input_shape[N-1]] 4. Crop the start and end of dimensions `[1, ..., M]` of `reshaped_permuted` according to `crops` to produce the output of shape: [batch / prod(block_shape), input_shape[1] * block_shape[0] - crops[0,0] - crops[0,1], ..., input_shape[M] * block_shape[M-1] - crops[M-1,0] - crops[M-1,1], input_shape[M+1], ..., input_shape[N-1]] Some examples: (1) For the following input of shape `[4, 1, 1, 1]`, `block_shape = [2, 2]`, and `crops = [[0, 0], [0, 0]]`: ``` [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] ``` The output tensor has shape `[1, 2, 2, 1]` and value: ``` x = [[[[1], [2]], [[3], [4]]]] ``` (2) For the following input of shape `[4, 1, 1, 3]`, `block_shape = [2, 2]`, and `crops = [[0, 0], [0, 0]]`: ``` [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]] ``` The output tensor has shape `[1, 2, 2, 3]` and value: ``` x = [[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]] ``` (3) For the following input of shape `[4, 2, 2, 1]`, `block_shape = [2, 2]`, and `crops = [[0, 0], [0, 0]]`: ``` x = [[[[1], [3]], [[9], [11]]], [[[2], [4]], [[10], [12]]], [[[5], [7]], [[13], [15]]], [[[6], [8]], [[14], [16]]]] ``` The output tensor has shape `[1, 4, 4, 1]` and value: ``` x = [[[1], [2], [3], [4]], [[5], [6], [7], [8]], [[9], [10], [11], [12]], [[13], [14], [15], [16]]] ``` (4) For the following input of shape `[8, 1, 3, 1]`, `block_shape = [2, 2]`, and `crops = [[0, 0], [2, 0]]`: ``` x = [[[[0], [1], [3]]], [[[0], [9], [11]]], [[[0], [2], [4]]], [[[0], [10], [12]]], [[[0], [5], [7]]], [[[0], [13], [15]]], [[[0], [6], [8]]], [[[0], [14], [16]]]] ``` The output tensor has shape `[2, 2, 4, 1]` and value: ``` x = [[[[1], [2], [3], [4]], [[5], [6], [7], [8]]], [[[9], [10], [11], [12]], [[13], [14], [15], [16]]]] ``` name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "BatchToSpaceND", name, _ctx._post_execution_callbacks, input, block_shape, crops) return _result except _core._FallbackException: try: return batch_to_space_nd_eager_fallback( input, block_shape, crops, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except (TypeError, ValueError): result = _dispatch.dispatch( batch_to_space_nd, input=input, block_shape=block_shape, crops=crops, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. try: _, _, _op = _op_def_lib._apply_op_helper( "BatchToSpaceND", input=input, block_shape=block_shape, crops=crops, name=name) except (TypeError, ValueError): result = _dispatch.dispatch( batch_to_space_nd, input=input, block_shape=block_shape, crops=crops, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T"), "Tblock_shape", _op.get_attr("Tblock_shape"), "Tcrops", _op.get_attr("Tcrops")) _execute.record_gradient( "BatchToSpaceND", _inputs_flat, _attrs, _result, name) _result, = _result return _result def batch_to_space_nd_eager_fallback(input, block_shape, crops, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function batch_to_space_nd """ _ctx = ctx if ctx else _context.context() _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx) _attr_Tblock_shape, (block_shape,) = _execute.args_to_matching_eager([block_shape], _ctx, _dtypes.int32) _attr_Tcrops, (crops,) = _execute.args_to_matching_eager([crops], _ctx, _dtypes.int32) _inputs_flat = [input, block_shape, crops] _attrs = ("T", _attr_T, "Tblock_shape", _attr_Tblock_shape, "Tcrops", _attr_Tcrops) _result = _execute.execute(b"BatchToSpaceND", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "BatchToSpaceND", _inputs_flat, _attrs, _result, name) _result, = _result return _result @_dispatch.add_dispatch_list @tf_export('bitcast') def bitcast(input, type, name=None): r"""Bitcasts a tensor from one type to another without copying data. Given a tensor `input`, this operation returns a tensor that has the same buffer data as `input` with datatype `type`. If the input datatype `T` is larger than the output datatype `type` then the shape changes from [...] to [..., sizeof(`T`)/sizeof(`type`)]. If `T` is smaller than `type`, the operator requires that the rightmost dimension be equal to sizeof(`type`)/sizeof(`T`). The shape then goes from [..., sizeof(`type`)/sizeof(`T`)] to [...]. *NOTE*: Bitcast is implemented as a low-level cast, so machines with different endian orderings will give different results. Args: input: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `uint32`, `uint64`, `int8`, `int16`, `complex64`, `complex128`, `qint8`, `quint8`, `qint16`, `quint16`, `qint32`. type: A `tf.DType` from: `tf.bfloat16, tf.half, tf.float32, tf.float64, tf.int64, tf.int32, tf.uint8, tf.uint16, tf.uint32, tf.uint64, tf.int8, tf.int16, tf.complex64, tf.complex128, tf.qint8, tf.quint8, tf.qint16, tf.quint16, tf.qint32`. name: A name for the operation (optional). Returns: A `Tensor` of type `type`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "Bitcast", name, _ctx._post_execution_callbacks, input, "type", type) return _result except _core._FallbackException: try: return bitcast_eager_fallback( input, type=type, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except (TypeError, ValueError): result = _dispatch.dispatch( bitcast, input=input, type=type, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. type = _execute.make_type(type, "type") try: _, _, _op = _op_def_lib._apply_op_helper( "Bitcast", input=input, type=type, name=name) except (TypeError, ValueError): result = _dispatch.dispatch( bitcast, input=input, type=type, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T"), "type", _op.get_attr("type")) _execute.record_gradient( "Bitcast", _inputs_flat, _attrs, _result, name) _result, = _result return _result def bitcast_eager_fallback(input, type, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function bitcast """ _ctx = ctx if ctx else _context.context() type = _execute.make_type(type, "type") _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx) _inputs_flat = [input] _attrs = ("T", _attr_T, "type", type) _result = _execute.execute(b"Bitcast", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Bitcast", _inputs_flat, _attrs, _result, name) _result, = _result return _result def broadcast_args(s0, s1, name=None): r"""Return the shape of s0 op s1 with broadcast. Given `s0` and `s1`, tensors that represent shapes, compute `r0`, the broadcasted shape. `s0`, `s1` and `r0` are all integer vectors. Args: s0: A `Tensor`. Must be one of the following types: `int32`, `int64`. s1: A `Tensor`. Must have the same type as `s0`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `s0`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "BroadcastArgs", name, _ctx._post_execution_callbacks, s0, s1) return _result except _core._FallbackException: try: return broadcast_args_eager_fallback( s0, s1, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. _, _, _op = _op_def_lib._apply_op_helper( "BroadcastArgs", s0=s0, s1=s1, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T")) _execute.record_gradient( "BroadcastArgs", _inputs_flat, _attrs, _result, name) _result, = _result return _result def broadcast_args_eager_fallback(s0, s1, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function broadcast_args """ _ctx = ctx if ctx else _context.context() _attr_T, _inputs_T = _execute.args_to_matching_eager([s0, s1], _ctx, _dtypes.int32) (s0, s1) = _inputs_T _inputs_flat = [s0, s1] _attrs = ("T", _attr_T) _result = _execute.execute(b"BroadcastArgs", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "BroadcastArgs", _inputs_flat, _attrs, _result, name) _result, = _result return _result _broadcast_gradient_args_outputs = ["r0", "r1"] _BroadcastGradientArgsOutput = _collections.namedtuple( "BroadcastGradientArgs", _broadcast_gradient_args_outputs) def broadcast_gradient_args(s0, s1, name=None): r"""Return the reduction indices for computing gradients of s0 op s1 with broadcast. This is typically used by gradient computations for a broadcasting operation. Args: s0: A `Tensor`. Must be one of the following types: `int32`, `int64`. s1: A `Tensor`. Must have the same type as `s0`. name: A name for the operation (optional). Returns: A tuple of `Tensor` objects (r0, r1). r0: A `Tensor`. Has the same type as `s0`. r1: A `Tensor`. Has the same type as `s0`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "BroadcastGradientArgs", name, _ctx._post_execution_callbacks, s0, s1) _result = _BroadcastGradientArgsOutput._make(_result) return _result except _core._FallbackException: try: return broadcast_gradient_args_eager_fallback( s0, s1, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. _, _, _op = _op_def_lib._apply_op_helper( "BroadcastGradientArgs", s0=s0, s1=s1, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T")) _execute.record_gradient( "BroadcastGradientArgs", _inputs_flat, _attrs, _result, name) _result = _BroadcastGradientArgsOutput._make(_result) return _result def broadcast_gradient_args_eager_fallback(s0, s1, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function broadcast_gradient_args """ _ctx = ctx if ctx else _context.context() _attr_T, _inputs_T = _execute.args_to_matching_eager([s0, s1], _ctx, _dtypes.int32) (s0, s1) = _inputs_T _inputs_flat = [s0, s1] _attrs = ("T", _attr_T) _result = _execute.execute(b"BroadcastGradientArgs", 2, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "BroadcastGradientArgs", _inputs_flat, _attrs, _result, name) _result = _BroadcastGradientArgsOutput._make(_result) return _result @_dispatch.add_dispatch_list @tf_export('broadcast_to') def broadcast_to(input, shape, name=None): r"""Broadcast an array for a compatible shape. Broadcasting is the process of making arrays to have compatible shapes for arithmetic operations. Two shapes are compatible if for each dimension pair they are either equal or one of them is one. When trying to broadcast a Tensor to a shape, it starts with the trailing dimensions, and works its way forward. For example, ``` >>> x = tf.constant([1, 2, 3]) >>> y = tf.broadcast_to(x, [3, 3]) >>> sess.run(y) array([[1, 2, 3], [1, 2, 3], [1, 2, 3]], dtype=int32) ``` In the above example, the input Tensor with the shape of `[1, 3]` is broadcasted to output Tensor with shape of `[3, 3]`. Args: input: A `Tensor`. A Tensor to broadcast. shape: A `Tensor`. Must be one of the following types: `int32`, `int64`. An 1-D `int` Tensor. The shape of the desired output. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "BroadcastTo", name, _ctx._post_execution_callbacks, input, shape) return _result except _core._FallbackException: try: return broadcast_to_eager_fallback( input, shape, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except (TypeError, ValueError): result = _dispatch.dispatch( broadcast_to, input=input, shape=shape, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. try: _, _, _op = _op_def_lib._apply_op_helper( "BroadcastTo", input=input, shape=shape, name=name) except (TypeError, ValueError): result = _dispatch.dispatch( broadcast_to, input=input, shape=shape, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T"), "Tidx", _op.get_attr("Tidx")) _execute.record_gradient( "BroadcastTo", _inputs_flat, _attrs, _result, name) _result, = _result return _result def broadcast_to_eager_fallback(input, shape, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function broadcast_to """ _ctx = ctx if ctx else _context.context() _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx) _attr_Tidx, (shape,) = _execute.args_to_matching_eager([shape], _ctx, _dtypes.int32) _inputs_flat = [input, shape] _attrs = ("T", _attr_T, "Tidx", _attr_Tidx) _result = _execute.execute(b"BroadcastTo", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "BroadcastTo", _inputs_flat, _attrs, _result, name) _result, = _result return _result @_dispatch.add_dispatch_list @tf_export('debugging.check_numerics', v1=['debugging.check_numerics', 'check_numerics']) @deprecated_endpoints('check_numerics') def check_numerics(tensor, message, name=None): r"""Checks a tensor for NaN and Inf values. When run, reports an `InvalidArgument` error if `tensor` has any values that are not a number (NaN) or infinity (Inf). Otherwise, passes `tensor` as-is. Args: tensor: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`. message: A `string`. Prefix of the error message. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `tensor`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "CheckNumerics", name, _ctx._post_execution_callbacks, tensor, "message", message) return _result except _core._FallbackException: try: return check_numerics_eager_fallback( tensor, message=message, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except (TypeError, ValueError): result = _dispatch.dispatch( check_numerics, tensor=tensor, message=message, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. message = _execute.make_str(message, "message") try: _, _, _op = _op_def_lib._apply_op_helper( "CheckNumerics", tensor=tensor, message=message, name=name) except (TypeError, ValueError): result = _dispatch.dispatch( check_numerics, tensor=tensor, message=message, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T"), "message", _op.get_attr("message")) _execute.record_gradient( "CheckNumerics", _inputs_flat, _attrs, _result, name) _result, = _result return _result def check_numerics_eager_fallback(tensor, message, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function check_numerics """ _ctx = ctx if ctx else _context.context() message = _execute.make_str(message, "message") _attr_T, (tensor,) = _execute.args_to_matching_eager([tensor], _ctx) _inputs_flat = [tensor] _attrs = ("T", _attr_T, "message", message) _result = _execute.execute(b"CheckNumerics", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "CheckNumerics", _inputs_flat, _attrs, _result, name) _result, = _result return _result def concat(concat_dim, values, name=None): r"""Concatenates tensors along one dimension. Args: concat_dim: A `Tensor` of type `int32`. 0-D. The dimension along which to concatenate. Must be in the range [0, rank(values)). values: A list of at least 2 `Tensor` objects with the same type. The `N` Tensors to concatenate. Their ranks and types must match, and their sizes must match in all dimensions except `concat_dim`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `values`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "Concat", name, _ctx._post_execution_callbacks, concat_dim, values) return _result except _core._FallbackException: try: return concat_eager_fallback( concat_dim, values, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. if not isinstance(values, (list, tuple)): raise TypeError( "Expected list for 'values' argument to " "'concat' Op, not %r." % values) _attr_N = len(values) _, _, _op = _op_def_lib._apply_op_helper( "Concat", concat_dim=concat_dim, values=values, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("N", _op.get_attr("N"), "T", _op.get_attr("T")) _execute.record_gradient( "Concat", _inputs_flat, _attrs, _result, name) _result, = _result return _result def concat_eager_fallback(concat_dim, values, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function concat """ _ctx = ctx if ctx else _context.context() if not isinstance(values, (list, tuple)): raise TypeError( "Expected list for 'values' argument to " "'concat' Op, not %r." % values) _attr_N = len(values) _attr_T, values = _execute.args_to_matching_eager(list(values), _ctx) concat_dim = _ops.convert_to_tensor(concat_dim, _dtypes.int32) _inputs_flat = [concat_dim] + list(values) _attrs = ("N", _attr_N, "T", _attr_T) _result = _execute.execute(b"Concat", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Concat", _inputs_flat, _attrs, _result, name) _result, = _result return _result def concat_offset(concat_dim, shape, name=None): r"""Computes offsets of concat inputs within its output. For example: ``` # 'x' is [2, 2, 7] # 'y' is [2, 3, 7] # 'z' is [2, 5, 7] concat_offset(2, [x, y, z]) => [0, 0, 0], [0, 2, 0], [0, 5, 0] ``` This is typically used by gradient computations for a concat operation. Args: concat_dim: A `Tensor` of type `int32`. The dimension along which to concatenate. shape: A list of at least 2 `Tensor` objects with type `int32`. The `N` int32 vectors representing shape of tensors being concatenated. name: A name for the operation (optional). Returns: A list with the same length as `shape` of `Tensor` objects with type `int32`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "ConcatOffset", name, _ctx._post_execution_callbacks, concat_dim, shape) return _result except _core._FallbackException: try: return concat_offset_eager_fallback( concat_dim, shape, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. if not isinstance(shape, (list, tuple)): raise TypeError( "Expected list for 'shape' argument to " "'concat_offset' Op, not %r." % shape) _attr_N = len(shape) _, _, _op = _op_def_lib._apply_op_helper( "ConcatOffset", concat_dim=concat_dim, shape=shape, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("N", _op.get_attr("N")) _execute.record_gradient( "ConcatOffset", _inputs_flat, _attrs, _result, name) return _result def concat_offset_eager_fallback(concat_dim, shape, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function concat_offset """ _ctx = ctx if ctx else _context.context() if not isinstance(shape, (list, tuple)): raise TypeError( "Expected list for 'shape' argument to " "'concat_offset' Op, not %r." % shape) _attr_N = len(shape) concat_dim = _ops.convert_to_tensor(concat_dim, _dtypes.int32) shape = _ops.convert_n_to_tensor(shape, _dtypes.int32) _inputs_flat = [concat_dim] + list(shape) _attrs = ("N", _attr_N) _result = _execute.execute(b"ConcatOffset", _attr_N, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "ConcatOffset", _inputs_flat, _attrs, _result, name) return _result def concat_v2(values, axis, name=None): r"""Concatenates tensors along one dimension. Args: values: A list of at least 2 `Tensor` objects with the same type. List of `N` Tensors to concatenate. Their ranks and types must match, and their sizes must match in all dimensions except `concat_dim`. axis: A `Tensor`. Must be one of the following types: `int32`, `int64`. 0-D. The dimension along which to concatenate. Must be in the range [-rank(values), rank(values)). name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `values`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "ConcatV2", name, _ctx._post_execution_callbacks, values, axis) return _result except _core._FallbackException: try: return concat_v2_eager_fallback( values, axis, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. if not isinstance(values, (list, tuple)): raise TypeError( "Expected list for 'values' argument to " "'concat_v2' Op, not %r." % values) _attr_N = len(values) _, _, _op = _op_def_lib._apply_op_helper( "ConcatV2", values=values, axis=axis, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("N", _op.get_attr("N"), "T", _op.get_attr("T"), "Tidx", _op.get_attr("Tidx")) _execute.record_gradient( "ConcatV2", _inputs_flat, _attrs, _result, name) _result, = _result return _result def concat_v2_eager_fallback(values, axis, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function concat_v2 """ _ctx = ctx if ctx else _context.context() if not isinstance(values, (list, tuple)): raise TypeError( "Expected list for 'values' argument to " "'concat_v2' Op, not %r." % values) _attr_N = len(values) _attr_T, values = _execute.args_to_matching_eager(list(values), _ctx) _attr_Tidx, (axis,) = _execute.args_to_matching_eager([axis], _ctx, _dtypes.int32) _inputs_flat = list(values) + [axis] _attrs = ("N", _attr_N, "T", _attr_T, "Tidx", _attr_Tidx) _result = _execute.execute(b"ConcatV2", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "ConcatV2", _inputs_flat, _attrs, _result, name) _result, = _result return _result def conjugate_transpose(x, perm, name=None): r"""Shuffle dimensions of x according to a permutation and conjugate the result. The output `y` has the same rank as `x`. The shapes of `x` and `y` satisfy: `y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]` `y[i,j,k,...,s,t,u] == conj(x[perm[i], perm[j], perm[k],...,perm[s], perm[t], perm[u]])` Args: x: A `Tensor`. perm: A `Tensor`. Must be one of the following types: `int32`, `int64`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `x`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "ConjugateTranspose", name, _ctx._post_execution_callbacks, x, perm) return _result except _core._FallbackException: try: return conjugate_transpose_eager_fallback( x, perm, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. _, _, _op = _op_def_lib._apply_op_helper( "ConjugateTranspose", x=x, perm=perm, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T"), "Tperm", _op.get_attr("Tperm")) _execute.record_gradient( "ConjugateTranspose", _inputs_flat, _attrs, _result, name) _result, = _result return _result def conjugate_transpose_eager_fallback(x, perm, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function conjugate_transpose """ _ctx = ctx if ctx else _context.context() _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx) _attr_Tperm, (perm,) = _execute.args_to_matching_eager([perm], _ctx, _dtypes.int32) _inputs_flat = [x, perm] _attrs = ("T", _attr_T, "Tperm", _attr_Tperm) _result = _execute.execute(b"ConjugateTranspose", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "ConjugateTranspose", _inputs_flat, _attrs, _result, name) _result, = _result return _result def const(value, dtype, name=None): r"""Returns a constant tensor. Args: value: A `tf.TensorProto`. Attr `value` is the tensor to return. dtype: A `tf.DType`. name: A name for the operation (optional). Returns: A `Tensor` of type `dtype`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "Const", name, _ctx._post_execution_callbacks, "value", value, "dtype", dtype) return _result except _core._FallbackException: try: return const_eager_fallback( value=value, dtype=dtype, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. value = _execute.make_tensor(value, "value") dtype = _execute.make_type(dtype, "dtype") _, _, _op = _op_def_lib._apply_op_helper( "Const", value=value, dtype=dtype, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("value", _op.get_attr("value"), "dtype", _op.get_attr("dtype")) _execute.record_gradient( "Const", _inputs_flat, _attrs, _result, name) _result, = _result return _result def const_eager_fallback(value, dtype, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function const """ _ctx = ctx if ctx else _context.context() value = _execute.make_tensor(value, "value") dtype = _execute.make_type(dtype, "dtype") _inputs_flat = [] _attrs = ("value", value, "dtype", dtype) _result = _execute.execute(b"Const", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Const", _inputs_flat, _attrs, _result, name) _result, = _result return _result def debug_gradient_identity(input, name=None): r"""Identity op for gradient debugging. This op is hidden from public in Python. It is used by TensorFlow Debugger to register gradient tensors for gradient debugging. This op operates on non-reference-type tensors. Args: input: A `Tensor`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "DebugGradientIdentity", name, _ctx._post_execution_callbacks, input) return _result except _core._FallbackException: try: return debug_gradient_identity_eager_fallback( input, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. _, _, _op = _op_def_lib._apply_op_helper( "DebugGradientIdentity", input=input, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T")) _execute.record_gradient( "DebugGradientIdentity", _inputs_flat, _attrs, _result, name) _result, = _result return _result def debug_gradient_identity_eager_fallback(input, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function debug_gradient_identity """ _ctx = ctx if ctx else _context.context() _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx) _inputs_flat = [input] _attrs = ("T", _attr_T) _result = _execute.execute(b"DebugGradientIdentity", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "DebugGradientIdentity", _inputs_flat, _attrs, _result, name) _result, = _result return _result def debug_gradient_ref_identity(input, name=None): r"""Identity op for gradient debugging. This op is hidden from public in Python. It is used by TensorFlow Debugger to register gradient tensors for gradient debugging. This op operates on reference-type tensors. Args: input: A mutable `Tensor`. name: A name for the operation (optional). Returns: A mutable `Tensor`. Has the same type as `input`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: raise RuntimeError("debug_gradient_ref_identity op does not support eager execution. Arg 'output' is a ref.") # Add nodes to the TensorFlow graph. _, _, _op = _op_def_lib._apply_op_helper( "DebugGradientRefIdentity", input=input, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T")) _execute.record_gradient( "DebugGradientRefIdentity", _inputs_flat, _attrs, _result, name) _result, = _result return _result def debug_gradient_ref_identity_eager_fallback(input, name=None, ctx=None): raise RuntimeError("debug_gradient_ref_identity op does not support eager execution. Arg 'output' is a ref.") def deep_copy(x, name=None): r"""Makes a copy of `x`. Args: x: A `Tensor`. The source tensor of type `T`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `x`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "DeepCopy", name, _ctx._post_execution_callbacks, x) return _result except _core._FallbackException: try: return deep_copy_eager_fallback( x, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. _, _, _op = _op_def_lib._apply_op_helper( "DeepCopy", x=x, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T")) _execute.record_gradient( "DeepCopy", _inputs_flat, _attrs, _result, name) _result, = _result return _result def deep_copy_eager_fallback(x, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function deep_copy """ _ctx = ctx if ctx else _context.context() _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx) _inputs_flat = [x] _attrs = ("T", _attr_T) _result = _execute.execute(b"DeepCopy", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "DeepCopy", _inputs_flat, _attrs, _result, name) _result, = _result return _result def depth_to_space(input, block_size, data_format="NHWC", name=None): r"""DepthToSpace for tensors of type T. Rearranges data from depth into blocks of spatial data. This is the reverse transformation of SpaceToDepth. More specifically, this op outputs a copy of the input tensor where values from the `depth` dimension are moved in spatial blocks to the `height` and `width` dimensions. The attr `block_size` indicates the input block size and how the data is moved. * Chunks of data of size `block_size * block_size` from depth are rearranged into non-overlapping blocks of size `block_size x block_size` * The width the output tensor is `input_depth * block_size`, whereas the height is `input_height * block_size`. * The Y, X coordinates within each block of the output image are determined by the high order component of the input channel index. * The depth of the input tensor must be divisible by `block_size * block_size`. The `data_format` attr specifies the layout of the input and output tensors with the following options: "NHWC": `[ batch, height, width, channels ]` "NCHW": `[ batch, channels, height, width ]` "NCHW_VECT_C": `qint8 [ batch, channels / 4, height, width, 4 ]` It is useful to consider the operation as transforming a 6-D Tensor. e.g. for data_format = NHWC, Each element in the input tensor can be specified via 6 coordinates, ordered by decreasing memory layout significance as: n,iY,iX,bY,bX,oC (where n=batch index, iX, iY means X or Y coordinates within the input image, bX, bY means coordinates within the output block, oC means output channels). The output would be the input transposed to the following layout: n,iY,bY,iX,bX,oC This operation is useful for resizing the activations between convolutions (but keeping all data), e.g. instead of pooling. It is also useful for training purely convolutional models. For example, given an input of shape `[1, 1, 1, 4]`, data_format = "NHWC" and block_size = 2: ``` x = [[[[1, 2, 3, 4]]]] ``` This operation will output a tensor of shape `[1, 2, 2, 1]`: ``` [[[[1], [2]], [[3], [4]]]] ``` Here, the input has a batch of 1 and each batch element has shape `[1, 1, 4]`, the corresponding output will have 2x2 elements and will have a depth of 1 channel (1 = `4 / (block_size * block_size)`). The output element shape is `[2, 2, 1]`. For an input tensor with larger depth, here of shape `[1, 1, 1, 12]`, e.g. ``` x = [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]] ``` This operation, for block size of 2, will return the following tensor of shape `[1, 2, 2, 3]` ``` [[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]] ``` Similarly, for the following input of shape `[1 2 2 4]`, and a block size of 2: ``` x = [[[[1, 2, 3, 4], [5, 6, 7, 8]], [[9, 10, 11, 12], [13, 14, 15, 16]]]] ``` the operator will return the following tensor of shape `[1 4 4 1]`: ``` x = [[[ [1], [2], [5], [6]], [ [3], [4], [7], [8]], [ [9], [10], [13], [14]], [ [11], [12], [15], [16]]]] ``` Args: input: A `Tensor`. block_size: An `int` that is `>= 2`. The size of the spatial block, same as in Space2Depth. data_format: An optional `string` from: `"NHWC", "NCHW", "NCHW_VECT_C"`. Defaults to `"NHWC"`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "DepthToSpace", name, _ctx._post_execution_callbacks, input, "block_size", block_size, "data_format", data_format) return _result except _core._FallbackException: try: return depth_to_space_eager_fallback( input, block_size=block_size, data_format=data_format, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. block_size = _execute.make_int(block_size, "block_size") if data_format is None: data_format = "NHWC" data_format = _execute.make_str(data_format, "data_format") _, _, _op = _op_def_lib._apply_op_helper( "DepthToSpace", input=input, block_size=block_size, data_format=data_format, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T"), "block_size", _op.get_attr("block_size"), "data_format", _op.get_attr("data_format")) _execute.record_gradient( "DepthToSpace", _inputs_flat, _attrs, _result, name) _result, = _result return _result def depth_to_space_eager_fallback(input, block_size, data_format="NHWC", name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function depth_to_space """ _ctx = ctx if ctx else _context.context() block_size = _execute.make_int(block_size, "block_size") if data_format is None: data_format = "NHWC" data_format = _execute.make_str(data_format, "data_format") _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx) _inputs_flat = [input] _attrs = ("T", _attr_T, "block_size", block_size, "data_format", data_format) _result = _execute.execute(b"DepthToSpace", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "DepthToSpace", _inputs_flat, _attrs, _result, name) _result, = _result return _result @_dispatch.add_dispatch_list @tf_export('quantization.dequantize', v1=['quantization.dequantize', 'dequantize']) @deprecated_endpoints('dequantize') def dequantize(input, min_range, max_range, mode="MIN_COMBINED", name=None): r"""Dequantize the 'input' tensor into a float Tensor. [min_range, max_range] are scalar floats that specify the range for the 'input' data. The 'mode' attribute controls exactly which calculations are used to convert the float values to their quantized equivalents. In 'MIN_COMBINED' mode, each value of the tensor will undergo the following: ``` if T == qint8: in[i] += (range(T) + 1)/ 2.0 out[i] = min_range + (in[i]* (max_range - min_range) / range(T)) ``` here `range(T) = numeric_limits<T>::max() - numeric_limits<T>::min()` *MIN_COMBINED Mode Example* If the input comes from a QuantizedRelu6, the output type is quint8 (range of 0-255) but the possible range of QuantizedRelu6 is 0-6. The min_range and max_range values are therefore 0.0 and 6.0. Dequantize on quint8 will take each value, cast to float, and multiply by 6 / 255. Note that if quantizedtype is qint8, the operation will additionally add each value by 128 prior to casting. If the mode is 'MIN_FIRST', then this approach is used: ```c++ num_discrete_values = 1 << (# of bits in T) range_adjust = num_discrete_values / (num_discrete_values - 1) range = (range_max - range_min) * range_adjust range_scale = range / num_discrete_values const double offset_input = static_cast<double>(input) - lowest_quantized; result = range_min + ((input - numeric_limits<T>::min()) * range_scale) ``` *SCALED mode Example* `SCALED` mode matches the quantization approach used in `QuantizeAndDequantize{V2|V3}`. If the mode is `SCALED`, we do not use the full range of the output type, choosing to elide the lowest possible value for symmetry (e.g., output range is -127 to 127, not -128 to 127 for signed 8 bit quantization), so that 0.0 maps to 0. We first find the range of values in our tensor. The range we use is always centered on 0, so we find m such that ```c++ m = max(abs(input_min), abs(input_max)) ``` Our input tensor range is then `[-m, m]`. Next, we choose our fixed-point quantization buckets, `[min_fixed, max_fixed]`. If T is signed, this is ``` num_bits = sizeof(T) * 8 [min_fixed, max_fixed] = [-(1 << (num_bits - 1) - 1), (1 << (num_bits - 1)) - 1] ``` Otherwise, if T is unsigned, the fixed-point range is ``` [min_fixed, max_fixed] = [0, (1 << num_bits) - 1] ``` From this we compute our scaling factor, s: ```c++ s = (2 * m) / (max_fixed - min_fixed) ``` Now we can dequantize the elements of our tensor: ```c++ result = input * s ``` Args: input: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`. min_range: A `Tensor` of type `float32`. The minimum scalar value possibly produced for the input. max_range: A `Tensor` of type `float32`. The maximum scalar value possibly produced for the input. mode: An optional `string` from: `"MIN_COMBINED", "MIN_FIRST", "SCALED"`. Defaults to `"MIN_COMBINED"`. name: A name for the operation (optional). Returns: A `Tensor` of type `float32`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "Dequantize", name, _ctx._post_execution_callbacks, input, min_range, max_range, "mode", mode) return _result except _core._FallbackException: try: return dequantize_eager_fallback( input, min_range, max_range, mode=mode, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except (TypeError, ValueError): result = _dispatch.dispatch( dequantize, input=input, min_range=min_range, max_range=max_range, mode=mode, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. if mode is None: mode = "MIN_COMBINED" mode = _execute.make_str(mode, "mode") try: _, _, _op = _op_def_lib._apply_op_helper( "Dequantize", input=input, min_range=min_range, max_range=max_range, mode=mode, name=name) except (TypeError, ValueError): result = _dispatch.dispatch( dequantize, input=input, min_range=min_range, max_range=max_range, mode=mode, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T"), "mode", _op.get_attr("mode")) _execute.record_gradient( "Dequantize", _inputs_flat, _attrs, _result, name) _result, = _result return _result def dequantize_eager_fallback(input, min_range, max_range, mode="MIN_COMBINED", name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function dequantize """ _ctx = ctx if ctx else _context.context() if mode is None: mode = "MIN_COMBINED" mode = _execute.make_str(mode, "mode") _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx) min_range = _ops.convert_to_tensor(min_range, _dtypes.float32) max_range = _ops.convert_to_tensor(max_range, _dtypes.float32) _inputs_flat = [input, min_range, max_range] _attrs = ("T", _attr_T, "mode", mode) _result = _execute.execute(b"Dequantize", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Dequantize", _inputs_flat, _attrs, _result, name) _result, = _result return _result @_dispatch.add_dispatch_list @tf_export('linalg.tensor_diag', v1=['linalg.tensor_diag', 'diag']) @deprecated_endpoints('diag') def diag(diagonal, name=None): r"""Returns a diagonal tensor with a given diagonal values. Given a `diagonal`, this operation returns a tensor with the `diagonal` and everything else padded with zeros. The diagonal is computed as follows: Assume `diagonal` has dimensions [D1,..., Dk], then the output is a tensor of rank 2k with dimensions [D1,..., Dk, D1,..., Dk] where: `output[i1,..., ik, i1,..., ik] = diagonal[i1, ..., ik]` and 0 everywhere else. For example: ``` # 'diagonal' is [1, 2, 3, 4] tf.diag(diagonal) ==> [[1, 0, 0, 0] [0, 2, 0, 0] [0, 0, 3, 0] [0, 0, 0, 4]] ``` Args: diagonal: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`. Rank k tensor where k is at most 1. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `diagonal`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "Diag", name, _ctx._post_execution_callbacks, diagonal) return _result except _core._FallbackException: try: return diag_eager_fallback( diagonal, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except (TypeError, ValueError): result = _dispatch.dispatch( diag, diagonal=diagonal, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. try: _, _, _op = _op_def_lib._apply_op_helper( "Diag", diagonal=diagonal, name=name) except (TypeError, ValueError): result = _dispatch.dispatch( diag, diagonal=diagonal, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T")) _execute.record_gradient( "Diag", _inputs_flat, _attrs, _result, name) _result, = _result return _result def diag_eager_fallback(diagonal, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function diag """ _ctx = ctx if ctx else _context.context() _attr_T, (diagonal,) = _execute.args_to_matching_eager([diagonal], _ctx) _inputs_flat = [diagonal] _attrs = ("T", _attr_T) _result = _execute.execute(b"Diag", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Diag", _inputs_flat, _attrs, _result, name) _result, = _result return _result @_dispatch.add_dispatch_list @tf_export('linalg.tensor_diag_part', v1=['linalg.tensor_diag_part', 'diag_part']) @deprecated_endpoints('diag_part') def diag_part(input, name=None): r"""Returns the diagonal part of the tensor. This operation returns a tensor with the `diagonal` part of the `input`. The `diagonal` part is computed as follows: Assume `input` has dimensions `[D1,..., Dk, D1,..., Dk]`, then the output is a tensor of rank `k` with dimensions `[D1,..., Dk]` where: `diagonal[i1,..., ik] = input[i1, ..., ik, i1,..., ik]`. For example: ``` # 'input' is [[1, 0, 0, 0] [0, 2, 0, 0] [0, 0, 3, 0] [0, 0, 0, 4]] tf.diag_part(input) ==> [1, 2, 3, 4] ``` Args: input: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`. Rank k tensor where k is even and not zero. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "DiagPart", name, _ctx._post_execution_callbacks, input) return _result except _core._FallbackException: try: return diag_part_eager_fallback( input, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except (TypeError, ValueError): result = _dispatch.dispatch( diag_part, input=input, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. try: _, _, _op = _op_def_lib._apply_op_helper( "DiagPart", input=input, name=name) except (TypeError, ValueError): result = _dispatch.dispatch( diag_part, input=input, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T")) _execute.record_gradient( "DiagPart", _inputs_flat, _attrs, _result, name) _result, = _result return _result def diag_part_eager_fallback(input, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function diag_part """ _ctx = ctx if ctx else _context.context() _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx) _inputs_flat = [input] _attrs = ("T", _attr_T) _result = _execute.execute(b"DiagPart", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "DiagPart", _inputs_flat, _attrs, _result, name) _result, = _result return _result def edit_distance(hypothesis_indices, hypothesis_values, hypothesis_shape, truth_indices, truth_values, truth_shape, normalize=True, name=None): r"""Computes the (possibly normalized) Levenshtein Edit Distance. The inputs are variable-length sequences provided by SparseTensors (hypothesis_indices, hypothesis_values, hypothesis_shape) and (truth_indices, truth_values, truth_shape). The inputs are: Args: hypothesis_indices: A `Tensor` of type `int64`. The indices of the hypothesis list SparseTensor. This is an N x R int64 matrix. hypothesis_values: A `Tensor`. The values of the hypothesis list SparseTensor. This is an N-length vector. hypothesis_shape: A `Tensor` of type `int64`. The shape of the hypothesis list SparseTensor. This is an R-length vector. truth_indices: A `Tensor` of type `int64`. The indices of the truth list SparseTensor. This is an M x R int64 matrix. truth_values: A `Tensor`. Must have the same type as `hypothesis_values`. The values of the truth list SparseTensor. This is an M-length vector. truth_shape: A `Tensor` of type `int64`. truth indices, vector. normalize: An optional `bool`. Defaults to `True`. boolean (if true, edit distances are normalized by length of truth). The output is: name: A name for the operation (optional). Returns: A `Tensor` of type `float32`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "EditDistance", name, _ctx._post_execution_callbacks, hypothesis_indices, hypothesis_values, hypothesis_shape, truth_indices, truth_values, truth_shape, "normalize", normalize) return _result except _core._FallbackException: try: return edit_distance_eager_fallback( hypothesis_indices, hypothesis_values, hypothesis_shape, truth_indices, truth_values, truth_shape, normalize=normalize, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. if normalize is None: normalize = True normalize = _execute.make_bool(normalize, "normalize") _, _, _op = _op_def_lib._apply_op_helper( "EditDistance", hypothesis_indices=hypothesis_indices, hypothesis_values=hypothesis_values, hypothesis_shape=hypothesis_shape, truth_indices=truth_indices, truth_values=truth_values, truth_shape=truth_shape, normalize=normalize, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("normalize", _op.get_attr("normalize"), "T", _op.get_attr("T")) _execute.record_gradient( "EditDistance", _inputs_flat, _attrs, _result, name) _result, = _result return _result def edit_distance_eager_fallback(hypothesis_indices, hypothesis_values, hypothesis_shape, truth_indices, truth_values, truth_shape, normalize=True, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function edit_distance """ _ctx = ctx if ctx else _context.context() if normalize is None: normalize = True normalize = _execute.make_bool(normalize, "normalize") _attr_T, _inputs_T = _execute.args_to_matching_eager([hypothesis_values, truth_values], _ctx) (hypothesis_values, truth_values) = _inputs_T hypothesis_indices = _ops.convert_to_tensor(hypothesis_indices, _dtypes.int64) hypothesis_shape = _ops.convert_to_tensor(hypothesis_shape, _dtypes.int64) truth_indices = _ops.convert_to_tensor(truth_indices, _dtypes.int64) truth_shape = _ops.convert_to_tensor(truth_shape, _dtypes.int64) _inputs_flat = [hypothesis_indices, hypothesis_values, hypothesis_shape, truth_indices, truth_values, truth_shape] _attrs = ("normalize", normalize, "T", _attr_T) _result = _execute.execute(b"EditDistance", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "EditDistance", _inputs_flat, _attrs, _result, name) _result, = _result return _result def empty(shape, dtype, init=False, name=None): r"""Creates a tensor with the given shape. This operation creates a tensor of `shape` and `dtype`. Args: shape: A `Tensor` of type `int32`. 1-D. Represents the shape of the output tensor. dtype: A `tf.DType`. init: An optional `bool`. Defaults to `False`. If True, initialize the returned tensor with the default value of dtype. Otherwise, the implementation is free not to initializethe tensor's content. name: A name for the operation (optional). Returns: A `Tensor` of type `dtype`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "Empty", name, _ctx._post_execution_callbacks, shape, "dtype", dtype, "init", init) return _result except _core._FallbackException: try: return empty_eager_fallback( shape, dtype=dtype, init=init, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. dtype = _execute.make_type(dtype, "dtype") if init is None: init = False init = _execute.make_bool(init, "init") _, _, _op = _op_def_lib._apply_op_helper( "Empty", shape=shape, dtype=dtype, init=init, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("dtype", _op.get_attr("dtype"), "init", _op.get_attr("init")) _execute.record_gradient( "Empty", _inputs_flat, _attrs, _result, name) _result, = _result return _result def empty_eager_fallback(shape, dtype, init=False, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function empty """ _ctx = ctx if ctx else _context.context() dtype = _execute.make_type(dtype, "dtype") if init is None: init = False init = _execute.make_bool(init, "init") shape = _ops.convert_to_tensor(shape, _dtypes.int32) _inputs_flat = [shape] _attrs = ("dtype", dtype, "init", init) _result = _execute.execute(b"Empty", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Empty", _inputs_flat, _attrs, _result, name) _result, = _result return _result def ensure_shape(input, shape, name=None): r"""Ensures that the tensor's shape matches the expected shape. Raises an error if the input tensor's shape does not match the specified shape. Returns the input tensor otherwise. Args: input: A `Tensor`. A tensor, whose shape is to be validated. shape: A `tf.TensorShape` or list of `ints`. The expected (possibly partially specified) shape of the input tensor. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "EnsureShape", name, _ctx._post_execution_callbacks, input, "shape", shape) return _result except _core._FallbackException: try: return ensure_shape_eager_fallback( input, shape=shape, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. shape = _execute.make_shape(shape, "shape") _, _, _op = _op_def_lib._apply_op_helper( "EnsureShape", input=input, shape=shape, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("shape", _op.get_attr("shape"), "T", _op.get_attr("T")) _execute.record_gradient( "EnsureShape", _inputs_flat, _attrs, _result, name) _result, = _result return _result def ensure_shape_eager_fallback(input, shape, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function ensure_shape """ _ctx = ctx if ctx else _context.context() shape = _execute.make_shape(shape, "shape") _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx) _inputs_flat = [input] _attrs = ("shape", shape, "T", _attr_T) _result = _execute.execute(b"EnsureShape", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "EnsureShape", _inputs_flat, _attrs, _result, name) _result, = _result return _result def expand_dims(input, axis, name=None): r"""Inserts a dimension of 1 into a tensor's shape. Given a tensor `input`, this operation inserts a dimension of 1 at the dimension index `axis` of `input`'s shape. The dimension index `axis` starts at zero; if you specify a negative number for `axis` it is counted backward from the end. This operation is useful if you want to add a batch dimension to a single element. For example, if you have a single image of shape `[height, width, channels]`, you can make it a batch of 1 image with `expand_dims(image, 0)`, which will make the shape `[1, height, width, channels]`. Other examples: ``` # 't' is a tensor of shape [2] shape(expand_dims(t, 0)) ==> [1, 2] shape(expand_dims(t, 1)) ==> [2, 1] shape(expand_dims(t, -1)) ==> [2, 1] # 't2' is a tensor of shape [2, 3, 5] shape(expand_dims(t2, 0)) ==> [1, 2, 3, 5] shape(expand_dims(t2, 2)) ==> [2, 3, 1, 5] shape(expand_dims(t2, 3)) ==> [2, 3, 5, 1] ``` This operation requires that: `-1-input.dims() <= dim <= input.dims()` This operation is related to `squeeze()`, which removes dimensions of size 1. Args: input: A `Tensor`. axis: A `Tensor`. Must be one of the following types: `int32`, `int64`. 0-D (scalar). Specifies the dimension index at which to expand the shape of `input`. Must be in the range `[-rank(input) - 1, rank(input)]`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "ExpandDims", name, _ctx._post_execution_callbacks, input, axis) return _result except _core._FallbackException: try: return expand_dims_eager_fallback( input, axis, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. _, _, _op = _op_def_lib._apply_op_helper( "ExpandDims", input=input, dim=axis, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T"), "Tdim", _op.get_attr("Tdim")) _execute.record_gradient( "ExpandDims", _inputs_flat, _attrs, _result, name) _result, = _result return _result def expand_dims_eager_fallback(input, axis, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function expand_dims """ _ctx = ctx if ctx else _context.context() _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx) _attr_Tdim, (axis,) = _execute.args_to_matching_eager([axis], _ctx, _dtypes.int32) _inputs_flat = [input, axis] _attrs = ("T", _attr_T, "Tdim", _attr_Tdim) _result = _execute.execute(b"ExpandDims", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "ExpandDims", _inputs_flat, _attrs, _result, name) _result, = _result return _result def extract_image_patches(images, ksizes, strides, rates, padding, name=None): r"""Extract `patches` from `images` and put them in the "depth" output dimension. Args: images: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`. 4-D Tensor with shape `[batch, in_rows, in_cols, depth]`. ksizes: A list of `ints` that has length `>= 4`. The size of the sliding window for each dimension of `images`. strides: A list of `ints` that has length `>= 4`. 1-D of length 4. How far the centers of two consecutive patches are in the images. Must be: `[1, stride_rows, stride_cols, 1]`. rates: A list of `ints` that has length `>= 4`. 1-D of length 4. Must be: `[1, rate_rows, rate_cols, 1]`. This is the input stride, specifying how far two consecutive patch samples are in the input. Equivalent to extracting patches with `patch_sizes_eff = patch_sizes + (patch_sizes - 1) * (rates - 1)`, followed by subsampling them spatially by a factor of `rates`. This is equivalent to `rate` in dilated (a.k.a. Atrous) convolutions. padding: A `string` from: `"SAME", "VALID"`. The type of padding algorithm to use. We specify the size-related attributes as: ```python ksizes = [1, ksize_rows, ksize_cols, 1] strides = [1, strides_rows, strides_cols, 1] rates = [1, rates_rows, rates_cols, 1] ``` name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `images`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "ExtractImagePatches", name, _ctx._post_execution_callbacks, images, "ksizes", ksizes, "strides", strides, "rates", rates, "padding", padding) return _result except _core._FallbackException: try: return extract_image_patches_eager_fallback( images, ksizes=ksizes, strides=strides, rates=rates, padding=padding, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. if not isinstance(ksizes, (list, tuple)): raise TypeError( "Expected list for 'ksizes' argument to " "'extract_image_patches' Op, not %r." % ksizes) ksizes = [_execute.make_int(_i, "ksizes") for _i in ksizes] if not isinstance(strides, (list, tuple)): raise TypeError( "Expected list for 'strides' argument to " "'extract_image_patches' Op, not %r." % strides) strides = [_execute.make_int(_i, "strides") for _i in strides] if not isinstance(rates, (list, tuple)): raise TypeError( "Expected list for 'rates' argument to " "'extract_image_patches' Op, not %r." % rates) rates = [_execute.make_int(_i, "rates") for _i in rates] padding = _execute.make_str(padding, "padding") _, _, _op = _op_def_lib._apply_op_helper( "ExtractImagePatches", images=images, ksizes=ksizes, strides=strides, rates=rates, padding=padding, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("ksizes", _op.get_attr("ksizes"), "strides", _op.get_attr("strides"), "rates", _op.get_attr("rates"), "T", _op.get_attr("T"), "padding", _op.get_attr("padding")) _execute.record_gradient( "ExtractImagePatches", _inputs_flat, _attrs, _result, name) _result, = _result return _result def extract_image_patches_eager_fallback(images, ksizes, strides, rates, padding, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function extract_image_patches """ _ctx = ctx if ctx else _context.context() if not isinstance(ksizes, (list, tuple)): raise TypeError( "Expected list for 'ksizes' argument to " "'extract_image_patches' Op, not %r." % ksizes) ksizes = [_execute.make_int(_i, "ksizes") for _i in ksizes] if not isinstance(strides, (list, tuple)): raise TypeError( "Expected list for 'strides' argument to " "'extract_image_patches' Op, not %r." % strides) strides = [_execute.make_int(_i, "strides") for _i in strides] if not isinstance(rates, (list, tuple)): raise TypeError( "Expected list for 'rates' argument to " "'extract_image_patches' Op, not %r." % rates) rates = [_execute.make_int(_i, "rates") for _i in rates] padding = _execute.make_str(padding, "padding") _attr_T, (images,) = _execute.args_to_matching_eager([images], _ctx) _inputs_flat = [images] _attrs = ("ksizes", ksizes, "strides", strides, "rates", rates, "T", _attr_T, "padding", padding) _result = _execute.execute(b"ExtractImagePatches", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "ExtractImagePatches", _inputs_flat, _attrs, _result, name) _result, = _result return _result @_dispatch.add_dispatch_list @tf_export('extract_volume_patches') def extract_volume_patches(input, ksizes, strides, padding, name=None): r"""Extract `patches` from `input` and put them in the "depth" output dimension. 3D extension of `extract_image_patches`. Args: input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`. 5-D Tensor with shape `[batch, in_planes, in_rows, in_cols, depth]`. ksizes: A list of `ints` that has length `>= 5`. The size of the sliding window for each dimension of `input`. strides: A list of `ints` that has length `>= 5`. 1-D of length 5. How far the centers of two consecutive patches are in `input`. Must be: `[1, stride_planes, stride_rows, stride_cols, 1]`. padding: A `string` from: `"SAME", "VALID"`. The type of padding algorithm to use. We specify the size-related attributes as: ```python ksizes = [1, ksize_planes, ksize_rows, ksize_cols, 1] strides = [1, stride_planes, strides_rows, strides_cols, 1] ``` name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "ExtractVolumePatches", name, _ctx._post_execution_callbacks, input, "ksizes", ksizes, "strides", strides, "padding", padding) return _result except _core._FallbackException: try: return extract_volume_patches_eager_fallback( input, ksizes=ksizes, strides=strides, padding=padding, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except (TypeError, ValueError): result = _dispatch.dispatch( extract_volume_patches, input=input, ksizes=ksizes, strides=strides, padding=padding, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. if not isinstance(ksizes, (list, tuple)): raise TypeError( "Expected list for 'ksizes' argument to " "'extract_volume_patches' Op, not %r." % ksizes) ksizes = [_execute.make_int(_i, "ksizes") for _i in ksizes] if not isinstance(strides, (list, tuple)): raise TypeError( "Expected list for 'strides' argument to " "'extract_volume_patches' Op, not %r." % strides) strides = [_execute.make_int(_i, "strides") for _i in strides] padding = _execute.make_str(padding, "padding") try: _, _, _op = _op_def_lib._apply_op_helper( "ExtractVolumePatches", input=input, ksizes=ksizes, strides=strides, padding=padding, name=name) except (TypeError, ValueError): result = _dispatch.dispatch( extract_volume_patches, input=input, ksizes=ksizes, strides=strides, padding=padding, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("ksizes", _op.get_attr("ksizes"), "strides", _op.get_attr("strides"), "T", _op.get_attr("T"), "padding", _op.get_attr("padding")) _execute.record_gradient( "ExtractVolumePatches", _inputs_flat, _attrs, _result, name) _result, = _result return _result def extract_volume_patches_eager_fallback(input, ksizes, strides, padding, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function extract_volume_patches """ _ctx = ctx if ctx else _context.context() if not isinstance(ksizes, (list, tuple)): raise TypeError( "Expected list for 'ksizes' argument to " "'extract_volume_patches' Op, not %r." % ksizes) ksizes = [_execute.make_int(_i, "ksizes") for _i in ksizes] if not isinstance(strides, (list, tuple)): raise TypeError( "Expected list for 'strides' argument to " "'extract_volume_patches' Op, not %r." % strides) strides = [_execute.make_int(_i, "strides") for _i in strides] padding = _execute.make_str(padding, "padding") _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx) _inputs_flat = [input] _attrs = ("ksizes", ksizes, "strides", strides, "T", _attr_T, "padding", padding) _result = _execute.execute(b"ExtractVolumePatches", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "ExtractVolumePatches", _inputs_flat, _attrs, _result, name) _result, = _result return _result @_dispatch.add_dispatch_list @tf_export('quantization.fake_quant_with_min_max_args', v1=['quantization.fake_quant_with_min_max_args', 'fake_quant_with_min_max_args']) @deprecated_endpoints('fake_quant_with_min_max_args') def fake_quant_with_min_max_args(inputs, min=-6, max=6, num_bits=8, narrow_range=False, name=None): r"""Fake-quantize the 'inputs' tensor, type float to 'outputs' tensor of same type. Attributes `[min; max]` define the clamping range for the `inputs` data. `inputs` values are quantized into the quantization range (`[0; 2^num_bits - 1]` when `narrow_range` is false and `[1; 2^num_bits - 1]` when it is true) and then de-quantized and output as floats in `[min; max]` interval. `num_bits` is the bitwidth of the quantization; between 2 and 16, inclusive. Quantization is called fake since the output is still in floating point. Args: inputs: A `Tensor` of type `float32`. min: An optional `float`. Defaults to `-6`. max: An optional `float`. Defaults to `6`. num_bits: An optional `int`. Defaults to `8`. narrow_range: An optional `bool`. Defaults to `False`. name: A name for the operation (optional). Returns: A `Tensor` of type `float32`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "FakeQuantWithMinMaxArgs", name, _ctx._post_execution_callbacks, inputs, "min", min, "max", max, "num_bits", num_bits, "narrow_range", narrow_range) return _result except _core._FallbackException: try: return fake_quant_with_min_max_args_eager_fallback( inputs, min=min, max=max, num_bits=num_bits, narrow_range=narrow_range, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except (TypeError, ValueError): result = _dispatch.dispatch( fake_quant_with_min_max_args, inputs=inputs, min=min, max=max, num_bits=num_bits, narrow_range=narrow_range, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. if min is None: min = -6 min = _execute.make_float(min, "min") if max is None: max = 6 max = _execute.make_float(max, "max") if num_bits is None: num_bits = 8 num_bits = _execute.make_int(num_bits, "num_bits") if narrow_range is None: narrow_range = False narrow_range = _execute.make_bool(narrow_range, "narrow_range") try: _, _, _op = _op_def_lib._apply_op_helper( "FakeQuantWithMinMaxArgs", inputs=inputs, min=min, max=max, num_bits=num_bits, narrow_range=narrow_range, name=name) except (TypeError, ValueError): result = _dispatch.dispatch( fake_quant_with_min_max_args, inputs=inputs, min=min, max=max, num_bits=num_bits, narrow_range=narrow_range, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("min", _op.get_attr("min"), "max", _op.get_attr("max"), "num_bits", _op.get_attr("num_bits"), "narrow_range", _op.get_attr("narrow_range")) _execute.record_gradient( "FakeQuantWithMinMaxArgs", _inputs_flat, _attrs, _result, name) _result, = _result return _result def fake_quant_with_min_max_args_eager_fallback(inputs, min=-6, max=6, num_bits=8, narrow_range=False, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function fake_quant_with_min_max_args """ _ctx = ctx if ctx else _context.context() if min is None: min = -6 min = _execute.make_float(min, "min") if max is None: max = 6 max = _execute.make_float(max, "max") if num_bits is None: num_bits = 8 num_bits = _execute.make_int(num_bits, "num_bits") if narrow_range is None: narrow_range = False narrow_range = _execute.make_bool(narrow_range, "narrow_range") inputs = _ops.convert_to_tensor(inputs, _dtypes.float32) _inputs_flat = [inputs] _attrs = ("min", min, "max", max, "num_bits", num_bits, "narrow_range", narrow_range) _result = _execute.execute(b"FakeQuantWithMinMaxArgs", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "FakeQuantWithMinMaxArgs", _inputs_flat, _attrs, _result, name) _result, = _result return _result @_dispatch.add_dispatch_list @tf_export('quantization.fake_quant_with_min_max_args_gradient', v1=['quantization.fake_quant_with_min_max_args_gradient', 'fake_quant_with_min_max_args_gradient']) @deprecated_endpoints('fake_quant_with_min_max_args_gradient') def fake_quant_with_min_max_args_gradient(gradients, inputs, min=-6, max=6, num_bits=8, narrow_range=False, name=None): r"""Compute gradients for a FakeQuantWithMinMaxArgs operation. Args: gradients: A `Tensor` of type `float32`. Backpropagated gradients above the FakeQuantWithMinMaxArgs operation. inputs: A `Tensor` of type `float32`. Values passed as inputs to the FakeQuantWithMinMaxArgs operation. min: An optional `float`. Defaults to `-6`. max: An optional `float`. Defaults to `6`. num_bits: An optional `int`. Defaults to `8`. narrow_range: An optional `bool`. Defaults to `False`. name: A name for the operation (optional). Returns: A `Tensor` of type `float32`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "FakeQuantWithMinMaxArgsGradient", name, _ctx._post_execution_callbacks, gradients, inputs, "min", min, "max", max, "num_bits", num_bits, "narrow_range", narrow_range) return _result except _core._FallbackException: try: return fake_quant_with_min_max_args_gradient_eager_fallback( gradients, inputs, min=min, max=max, num_bits=num_bits, narrow_range=narrow_range, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except (TypeError, ValueError): result = _dispatch.dispatch( fake_quant_with_min_max_args_gradient, gradients=gradients, inputs=inputs, min=min, max=max, num_bits=num_bits, narrow_range=narrow_range, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. if min is None: min = -6 min = _execute.make_float(min, "min") if max is None: max = 6 max = _execute.make_float(max, "max") if num_bits is None: num_bits = 8 num_bits = _execute.make_int(num_bits, "num_bits") if narrow_range is None: narrow_range = False narrow_range = _execute.make_bool(narrow_range, "narrow_range") try: _, _, _op = _op_def_lib._apply_op_helper( "FakeQuantWithMinMaxArgsGradient", gradients=gradients, inputs=inputs, min=min, max=max, num_bits=num_bits, narrow_range=narrow_range, name=name) except (TypeError, ValueError): result = _dispatch.dispatch( fake_quant_with_min_max_args_gradient, gradients=gradients, inputs=inputs, min=min, max=max, num_bits=num_bits, narrow_range=narrow_range, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("min", _op.get_attr("min"), "max", _op.get_attr("max"), "num_bits", _op.get_attr("num_bits"), "narrow_range", _op.get_attr("narrow_range")) _execute.record_gradient( "FakeQuantWithMinMaxArgsGradient", _inputs_flat, _attrs, _result, name) _result, = _result return _result def fake_quant_with_min_max_args_gradient_eager_fallback(gradients, inputs, min=-6, max=6, num_bits=8, narrow_range=False, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function fake_quant_with_min_max_args_gradient """ _ctx = ctx if ctx else _context.context() if min is None: min = -6 min = _execute.make_float(min, "min") if max is None: max = 6 max = _execute.make_float(max, "max") if num_bits is None: num_bits = 8 num_bits = _execute.make_int(num_bits, "num_bits") if narrow_range is None: narrow_range = False narrow_range = _execute.make_bool(narrow_range, "narrow_range") gradients = _ops.convert_to_tensor(gradients, _dtypes.float32) inputs = _ops.convert_to_tensor(inputs, _dtypes.float32) _inputs_flat = [gradients, inputs] _attrs = ("min", min, "max", max, "num_bits", num_bits, "narrow_range", narrow_range) _result = _execute.execute(b"FakeQuantWithMinMaxArgsGradient", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "FakeQuantWithMinMaxArgsGradient", _inputs_flat, _attrs, _result, name) _result, = _result return _result @_dispatch.add_dispatch_list @tf_export('quantization.fake_quant_with_min_max_vars', v1=['quantization.fake_quant_with_min_max_vars', 'fake_quant_with_min_max_vars']) @deprecated_endpoints('fake_quant_with_min_max_vars') def fake_quant_with_min_max_vars(inputs, min, max, num_bits=8, narrow_range=False, name=None): r"""Fake-quantize the 'inputs' tensor of type float via global float scalars `min` and `max` to 'outputs' tensor of same shape as `inputs`. `[min; max]` define the clamping range for the `inputs` data. `inputs` values are quantized into the quantization range (`[0; 2^num_bits - 1]` when `narrow_range` is false and `[1; 2^num_bits - 1]` when it is true) and then de-quantized and output as floats in `[min; max]` interval. `num_bits` is the bitwidth of the quantization; between 2 and 16, inclusive. This operation has a gradient and thus allows for training `min` and `max` values. Args: inputs: A `Tensor` of type `float32`. min: A `Tensor` of type `float32`. max: A `Tensor` of type `float32`. num_bits: An optional `int`. Defaults to `8`. narrow_range: An optional `bool`. Defaults to `False`. name: A name for the operation (optional). Returns: A `Tensor` of type `float32`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "FakeQuantWithMinMaxVars", name, _ctx._post_execution_callbacks, inputs, min, max, "num_bits", num_bits, "narrow_range", narrow_range) return _result except _core._FallbackException: try: return fake_quant_with_min_max_vars_eager_fallback( inputs, min, max, num_bits=num_bits, narrow_range=narrow_range, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except (TypeError, ValueError): result = _dispatch.dispatch( fake_quant_with_min_max_vars, inputs=inputs, min=min, max=max, num_bits=num_bits, narrow_range=narrow_range, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. if num_bits is None: num_bits = 8 num_bits = _execute.make_int(num_bits, "num_bits") if narrow_range is None: narrow_range = False narrow_range = _execute.make_bool(narrow_range, "narrow_range") try: _, _, _op = _op_def_lib._apply_op_helper( "FakeQuantWithMinMaxVars", inputs=inputs, min=min, max=max, num_bits=num_bits, narrow_range=narrow_range, name=name) except (TypeError, ValueError): result = _dispatch.dispatch( fake_quant_with_min_max_vars, inputs=inputs, min=min, max=max, num_bits=num_bits, narrow_range=narrow_range, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("num_bits", _op.get_attr("num_bits"), "narrow_range", _op.get_attr("narrow_range")) _execute.record_gradient( "FakeQuantWithMinMaxVars", _inputs_flat, _attrs, _result, name) _result, = _result return _result def fake_quant_with_min_max_vars_eager_fallback(inputs, min, max, num_bits=8, narrow_range=False, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function fake_quant_with_min_max_vars """ _ctx = ctx if ctx else _context.context() if num_bits is None: num_bits = 8 num_bits = _execute.make_int(num_bits, "num_bits") if narrow_range is None: narrow_range = False narrow_range = _execute.make_bool(narrow_range, "narrow_range") inputs = _ops.convert_to_tensor(inputs, _dtypes.float32) min = _ops.convert_to_tensor(min, _dtypes.float32) max = _ops.convert_to_tensor(max, _dtypes.float32) _inputs_flat = [inputs, min, max] _attrs = ("num_bits", num_bits, "narrow_range", narrow_range) _result = _execute.execute(b"FakeQuantWithMinMaxVars", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "FakeQuantWithMinMaxVars", _inputs_flat, _attrs, _result, name) _result, = _result return _result _fake_quant_with_min_max_vars_gradient_outputs = ["backprops_wrt_input", "backprop_wrt_min", "backprop_wrt_max"] _FakeQuantWithMinMaxVarsGradientOutput = _collections.namedtuple( "FakeQuantWithMinMaxVarsGradient", _fake_quant_with_min_max_vars_gradient_outputs) @_dispatch.add_dispatch_list @tf_export('quantization.fake_quant_with_min_max_vars_gradient', v1=['quantization.fake_quant_with_min_max_vars_gradient', 'fake_quant_with_min_max_vars_gradient']) @deprecated_endpoints('fake_quant_with_min_max_vars_gradient') def fake_quant_with_min_max_vars_gradient(gradients, inputs, min, max, num_bits=8, narrow_range=False, name=None): r"""Compute gradients for a FakeQuantWithMinMaxVars operation. Args: gradients: A `Tensor` of type `float32`. Backpropagated gradients above the FakeQuantWithMinMaxVars operation. inputs: A `Tensor` of type `float32`. Values passed as inputs to the FakeQuantWithMinMaxVars operation. min, max: Quantization interval, scalar floats. min: A `Tensor` of type `float32`. max: A `Tensor` of type `float32`. num_bits: An optional `int`. Defaults to `8`. The bitwidth of the quantization; between 2 and 8, inclusive. narrow_range: An optional `bool`. Defaults to `False`. Whether to quantize into 2^num_bits - 1 distinct values. name: A name for the operation (optional). Returns: A tuple of `Tensor` objects (backprops_wrt_input, backprop_wrt_min, backprop_wrt_max). backprops_wrt_input: A `Tensor` of type `float32`. backprop_wrt_min: A `Tensor` of type `float32`. backprop_wrt_max: A `Tensor` of type `float32`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "FakeQuantWithMinMaxVarsGradient", name, _ctx._post_execution_callbacks, gradients, inputs, min, max, "num_bits", num_bits, "narrow_range", narrow_range) _result = _FakeQuantWithMinMaxVarsGradientOutput._make(_result) return _result except _core._FallbackException: try: return fake_quant_with_min_max_vars_gradient_eager_fallback( gradients, inputs, min, max, num_bits=num_bits, narrow_range=narrow_range, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except (TypeError, ValueError): result = _dispatch.dispatch( fake_quant_with_min_max_vars_gradient, gradients=gradients, inputs=inputs, min=min, max=max, num_bits=num_bits, narrow_range=narrow_range, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. if num_bits is None: num_bits = 8 num_bits = _execute.make_int(num_bits, "num_bits") if narrow_range is None: narrow_range = False narrow_range = _execute.make_bool(narrow_range, "narrow_range") try: _, _, _op = _op_def_lib._apply_op_helper( "FakeQuantWithMinMaxVarsGradient", gradients=gradients, inputs=inputs, min=min, max=max, num_bits=num_bits, narrow_range=narrow_range, name=name) except (TypeError, ValueError): result = _dispatch.dispatch( fake_quant_with_min_max_vars_gradient, gradients=gradients, inputs=inputs, min=min, max=max, num_bits=num_bits, narrow_range=narrow_range, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("num_bits", _op.get_attr("num_bits"), "narrow_range", _op.get_attr("narrow_range")) _execute.record_gradient( "FakeQuantWithMinMaxVarsGradient", _inputs_flat, _attrs, _result, name) _result = _FakeQuantWithMinMaxVarsGradientOutput._make(_result) return _result def fake_quant_with_min_max_vars_gradient_eager_fallback(gradients, inputs, min, max, num_bits=8, narrow_range=False, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function fake_quant_with_min_max_vars_gradient """ _ctx = ctx if ctx else _context.context() if num_bits is None: num_bits = 8 num_bits = _execute.make_int(num_bits, "num_bits") if narrow_range is None: narrow_range = False narrow_range = _execute.make_bool(narrow_range, "narrow_range") gradients = _ops.convert_to_tensor(gradients, _dtypes.float32) inputs = _ops.convert_to_tensor(inputs, _dtypes.float32) min = _ops.convert_to_tensor(min, _dtypes.float32) max = _ops.convert_to_tensor(max, _dtypes.float32) _inputs_flat = [gradients, inputs, min, max] _attrs = ("num_bits", num_bits, "narrow_range", narrow_range) _result = _execute.execute(b"FakeQuantWithMinMaxVarsGradient", 3, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "FakeQuantWithMinMaxVarsGradient", _inputs_flat, _attrs, _result, name) _result = _FakeQuantWithMinMaxVarsGradientOutput._make(_result) return _result @_dispatch.add_dispatch_list @tf_export('quantization.fake_quant_with_min_max_vars_per_channel', v1=['quantization.fake_quant_with_min_max_vars_per_channel', 'fake_quant_with_min_max_vars_per_channel']) @deprecated_endpoints('fake_quant_with_min_max_vars_per_channel') def fake_quant_with_min_max_vars_per_channel(inputs, min, max, num_bits=8, narrow_range=False, name=None): r"""Fake-quantize the 'inputs' tensor of type float and one of the shapes: `[d]`, `[b, d]` `[b, h, w, d]` via per-channel floats `min` and `max` of shape `[d]` to 'outputs' tensor of same shape as `inputs`. `[min; max]` define the clamping range for the `inputs` data. `inputs` values are quantized into the quantization range (`[0; 2^num_bits - 1]` when `narrow_range` is false and `[1; 2^num_bits - 1]` when it is true) and then de-quantized and output as floats in `[min; max]` interval. `num_bits` is the bitwidth of the quantization; between 2 and 16, inclusive. This operation has a gradient and thus allows for training `min` and `max` values. Args: inputs: A `Tensor` of type `float32`. min: A `Tensor` of type `float32`. max: A `Tensor` of type `float32`. num_bits: An optional `int`. Defaults to `8`. narrow_range: An optional `bool`. Defaults to `False`. name: A name for the operation (optional). Returns: A `Tensor` of type `float32`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "FakeQuantWithMinMaxVarsPerChannel", name, _ctx._post_execution_callbacks, inputs, min, max, "num_bits", num_bits, "narrow_range", narrow_range) return _result except _core._FallbackException: try: return fake_quant_with_min_max_vars_per_channel_eager_fallback( inputs, min, max, num_bits=num_bits, narrow_range=narrow_range, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except (TypeError, ValueError): result = _dispatch.dispatch( fake_quant_with_min_max_vars_per_channel, inputs=inputs, min=min, max=max, num_bits=num_bits, narrow_range=narrow_range, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. if num_bits is None: num_bits = 8 num_bits = _execute.make_int(num_bits, "num_bits") if narrow_range is None: narrow_range = False narrow_range = _execute.make_bool(narrow_range, "narrow_range") try: _, _, _op = _op_def_lib._apply_op_helper( "FakeQuantWithMinMaxVarsPerChannel", inputs=inputs, min=min, max=max, num_bits=num_bits, narrow_range=narrow_range, name=name) except (TypeError, ValueError): result = _dispatch.dispatch( fake_quant_with_min_max_vars_per_channel, inputs=inputs, min=min, max=max, num_bits=num_bits, narrow_range=narrow_range, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("num_bits", _op.get_attr("num_bits"), "narrow_range", _op.get_attr("narrow_range")) _execute.record_gradient( "FakeQuantWithMinMaxVarsPerChannel", _inputs_flat, _attrs, _result, name) _result, = _result return _result def fake_quant_with_min_max_vars_per_channel_eager_fallback(inputs, min, max, num_bits=8, narrow_range=False, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function fake_quant_with_min_max_vars_per_channel """ _ctx = ctx if ctx else _context.context() if num_bits is None: num_bits = 8 num_bits = _execute.make_int(num_bits, "num_bits") if narrow_range is None: narrow_range = False narrow_range = _execute.make_bool(narrow_range, "narrow_range") inputs = _ops.convert_to_tensor(inputs, _dtypes.float32) min = _ops.convert_to_tensor(min, _dtypes.float32) max = _ops.convert_to_tensor(max, _dtypes.float32) _inputs_flat = [inputs, min, max] _attrs = ("num_bits", num_bits, "narrow_range", narrow_range) _result = _execute.execute(b"FakeQuantWithMinMaxVarsPerChannel", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "FakeQuantWithMinMaxVarsPerChannel", _inputs_flat, _attrs, _result, name) _result, = _result return _result _fake_quant_with_min_max_vars_per_channel_gradient_outputs = ["backprops_wrt_input", "backprop_wrt_min", "backprop_wrt_max"] _FakeQuantWithMinMaxVarsPerChannelGradientOutput = _collections.namedtuple( "FakeQuantWithMinMaxVarsPerChannelGradient", _fake_quant_with_min_max_vars_per_channel_gradient_outputs) @_dispatch.add_dispatch_list @tf_export('quantization.fake_quant_with_min_max_vars_per_channel_gradient', v1=['quantization.fake_quant_with_min_max_vars_per_channel_gradient', 'fake_quant_with_min_max_vars_per_channel_gradient']) @deprecated_endpoints('fake_quant_with_min_max_vars_per_channel_gradient') def fake_quant_with_min_max_vars_per_channel_gradient(gradients, inputs, min, max, num_bits=8, narrow_range=False, name=None): r"""Compute gradients for a FakeQuantWithMinMaxVarsPerChannel operation. Args: gradients: A `Tensor` of type `float32`. Backpropagated gradients above the FakeQuantWithMinMaxVars operation, shape one of: `[d]`, `[b, d]`, `[b, h, w, d]`. inputs: A `Tensor` of type `float32`. Values passed as inputs to the FakeQuantWithMinMaxVars operation, shape same as `gradients`. min, max: Quantization interval, floats of shape `[d]`. min: A `Tensor` of type `float32`. max: A `Tensor` of type `float32`. num_bits: An optional `int`. Defaults to `8`. The bitwidth of the quantization; between 2 and 16, inclusive. narrow_range: An optional `bool`. Defaults to `False`. Whether to quantize into 2^num_bits - 1 distinct values. name: A name for the operation (optional). Returns: A tuple of `Tensor` objects (backprops_wrt_input, backprop_wrt_min, backprop_wrt_max). backprops_wrt_input: A `Tensor` of type `float32`. backprop_wrt_min: A `Tensor` of type `float32`. backprop_wrt_max: A `Tensor` of type `float32`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "FakeQuantWithMinMaxVarsPerChannelGradient", name, _ctx._post_execution_callbacks, gradients, inputs, min, max, "num_bits", num_bits, "narrow_range", narrow_range) _result = _FakeQuantWithMinMaxVarsPerChannelGradientOutput._make(_result) return _result except _core._FallbackException: try: return fake_quant_with_min_max_vars_per_channel_gradient_eager_fallback( gradients, inputs, min, max, num_bits=num_bits, narrow_range=narrow_range, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except (TypeError, ValueError): result = _dispatch.dispatch( fake_quant_with_min_max_vars_per_channel_gradient, gradients=gradients, inputs=inputs, min=min, max=max, num_bits=num_bits, narrow_range=narrow_range, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. if num_bits is None: num_bits = 8 num_bits = _execute.make_int(num_bits, "num_bits") if narrow_range is None: narrow_range = False narrow_range = _execute.make_bool(narrow_range, "narrow_range") try: _, _, _op = _op_def_lib._apply_op_helper( "FakeQuantWithMinMaxVarsPerChannelGradient", gradients=gradients, inputs=inputs, min=min, max=max, num_bits=num_bits, narrow_range=narrow_range, name=name) except (TypeError, ValueError): result = _dispatch.dispatch( fake_quant_with_min_max_vars_per_channel_gradient, gradients=gradients, inputs=inputs, min=min, max=max, num_bits=num_bits, narrow_range=narrow_range, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("num_bits", _op.get_attr("num_bits"), "narrow_range", _op.get_attr("narrow_range")) _execute.record_gradient( "FakeQuantWithMinMaxVarsPerChannelGradient", _inputs_flat, _attrs, _result, name) _result = _FakeQuantWithMinMaxVarsPerChannelGradientOutput._make(_result) return _result def fake_quant_with_min_max_vars_per_channel_gradient_eager_fallback(gradients, inputs, min, max, num_bits=8, narrow_range=False, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function fake_quant_with_min_max_vars_per_channel_gradient """ _ctx = ctx if ctx else _context.context() if num_bits is None: num_bits = 8 num_bits = _execute.make_int(num_bits, "num_bits") if narrow_range is None: narrow_range = False narrow_range = _execute.make_bool(narrow_range, "narrow_range") gradients = _ops.convert_to_tensor(gradients, _dtypes.float32) inputs = _ops.convert_to_tensor(inputs, _dtypes.float32) min = _ops.convert_to_tensor(min, _dtypes.float32) max = _ops.convert_to_tensor(max, _dtypes.float32) _inputs_flat = [gradients, inputs, min, max] _attrs = ("num_bits", num_bits, "narrow_range", narrow_range) _result = _execute.execute(b"FakeQuantWithMinMaxVarsPerChannelGradient", 3, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "FakeQuantWithMinMaxVarsPerChannelGradient", _inputs_flat, _attrs, _result, name) _result = _FakeQuantWithMinMaxVarsPerChannelGradientOutput._make(_result) return _result @_dispatch.add_dispatch_list @tf_export('fill') def fill(dims, value, name=None): r"""Creates a tensor filled with a scalar value. This operation creates a tensor of shape `dims` and fills it with `value`. For example: ``` # Output tensor has shape [2, 3]. fill([2, 3], 9) ==> [[9, 9, 9] [9, 9, 9]] ``` `tf.fill` differs from `tf.constant` in a few ways: * `tf.fill` only supports scalar contents, whereas `tf.constant` supports Tensor values. * `tf.fill` creates an Op in the computation graph that constructs the actual Tensor value at runtime. This is in contrast to `tf.constant` which embeds the entire Tensor into the graph with a `Const` node. * Because `tf.fill` evaluates at graph runtime, it supports dynamic shapes based on other runtime Tensors, unlike `tf.constant`. Args: dims: A `Tensor`. Must be one of the following types: `int32`, `int64`. 1-D. Represents the shape of the output tensor. value: A `Tensor`. 0-D (scalar). Value to fill the returned tensor. @compatibility(numpy) Equivalent to np.full @end_compatibility name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `value`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "Fill", name, _ctx._post_execution_callbacks, dims, value) return _result except _core._FallbackException: try: return fill_eager_fallback( dims, value, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except (TypeError, ValueError): result = _dispatch.dispatch( fill, dims=dims, value=value, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. try: _, _, _op = _op_def_lib._apply_op_helper( "Fill", dims=dims, value=value, name=name) except (TypeError, ValueError): result = _dispatch.dispatch( fill, dims=dims, value=value, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T"), "index_type", _op.get_attr("index_type")) _execute.record_gradient( "Fill", _inputs_flat, _attrs, _result, name) _result, = _result return _result def fill_eager_fallback(dims, value, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function fill """ _ctx = ctx if ctx else _context.context() _attr_T, (value,) = _execute.args_to_matching_eager([value], _ctx) _attr_index_type, (dims,) = _execute.args_to_matching_eager([dims], _ctx, _dtypes.int32) _inputs_flat = [dims, value] _attrs = ("T", _attr_T, "index_type", _attr_index_type) _result = _execute.execute(b"Fill", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Fill", _inputs_flat, _attrs, _result, name) _result, = _result return _result def gather(params, indices, validate_indices=True, name=None): r"""Gather slices from `params` according to `indices`. `indices` must be an integer tensor of any dimension (usually 0-D or 1-D). Produces an output tensor with shape `indices.shape + params.shape[1:]` where: ```python # Scalar indices output[:, ..., :] = params[indices, :, ... :] # Vector indices output[i, :, ..., :] = params[indices[i], :, ... :] # Higher rank indices output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :] ``` If `indices` is a permutation and `len(indices) == params.shape[0]` then this operation will permute `params` accordingly. `validate_indices`: DEPRECATED. If this operation is assigned to CPU, values in `indices` are always validated to be within range. If assigned to GPU, out-of-bound indices result in safe but unspecified behavior, which may include raising an error. <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"> <img style="width:100%" src="https://www.tensorflow.org/images/Gather.png" alt> </div> Args: params: A `Tensor`. indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. validate_indices: An optional `bool`. Defaults to `True`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `params`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "Gather", name, _ctx._post_execution_callbacks, params, indices, "validate_indices", validate_indices) return _result except _core._FallbackException: try: return gather_eager_fallback( params, indices, validate_indices=validate_indices, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. if validate_indices is None: validate_indices = True validate_indices = _execute.make_bool(validate_indices, "validate_indices") _, _, _op = _op_def_lib._apply_op_helper( "Gather", params=params, indices=indices, validate_indices=validate_indices, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("validate_indices", _op.get_attr("validate_indices"), "Tparams", _op.get_attr("Tparams"), "Tindices", _op.get_attr("Tindices")) _execute.record_gradient( "Gather", _inputs_flat, _attrs, _result, name) _result, = _result return _result def gather_eager_fallback(params, indices, validate_indices=True, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function gather """ _ctx = ctx if ctx else _context.context() if validate_indices is None: validate_indices = True validate_indices = _execute.make_bool(validate_indices, "validate_indices") _attr_Tparams, (params,) = _execute.args_to_matching_eager([params], _ctx) _attr_Tindices, (indices,) = _execute.args_to_matching_eager([indices], _ctx) _inputs_flat = [params, indices] _attrs = ("validate_indices", validate_indices, "Tparams", _attr_Tparams, "Tindices", _attr_Tindices) _result = _execute.execute(b"Gather", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Gather", _inputs_flat, _attrs, _result, name) _result, = _result return _result @_dispatch.add_dispatch_list @tf_export('gather_nd', v1=['gather_nd', 'manip.gather_nd']) @deprecated_endpoints('manip.gather_nd') def gather_nd(params, indices, name=None): r"""Gather slices from `params` into a Tensor with shape specified by `indices`. `indices` is an K-dimensional integer tensor, best thought of as a (K-1)-dimensional tensor of indices into `params`, where each element defines a slice of `params`: output[\\(i_0, ..., i_{K-2}\\)] = params[indices[\\(i_0, ..., i_{K-2}\\)]] Whereas in `tf.gather` `indices` defines slices into the first dimension of `params`, in `tf.gather_nd`, `indices` defines slices into the first `N` dimensions of `params`, where `N = indices.shape[-1]`. The last dimension of `indices` can be at most the rank of `params`: indices.shape[-1] <= params.rank The last dimension of `indices` corresponds to elements (if `indices.shape[-1] == params.rank`) or slices (if `indices.shape[-1] < params.rank`) along dimension `indices.shape[-1]` of `params`. The output tensor has shape indices.shape[:-1] + params.shape[indices.shape[-1]:] Note that on CPU, if an out of bound index is found, an error is returned. On GPU, if an out of bound index is found, a 0 is stored in the corresponding output value. Some examples below. Simple indexing into a matrix: ```python indices = [[0, 0], [1, 1]] params = [['a', 'b'], ['c', 'd']] output = ['a', 'd'] ``` Slice indexing into a matrix: ```python indices = [[1], [0]] params = [['a', 'b'], ['c', 'd']] output = [['c', 'd'], ['a', 'b']] ``` Indexing into a 3-tensor: ```python indices = [[1]] params = [[['a0', 'b0'], ['c0', 'd0']], [['a1', 'b1'], ['c1', 'd1']]] output = [[['a1', 'b1'], ['c1', 'd1']]] indices = [[0, 1], [1, 0]] params = [[['a0', 'b0'], ['c0', 'd0']], [['a1', 'b1'], ['c1', 'd1']]] output = [['c0', 'd0'], ['a1', 'b1']] indices = [[0, 0, 1], [1, 0, 1]] params = [[['a0', 'b0'], ['c0', 'd0']], [['a1', 'b1'], ['c1', 'd1']]] output = ['b0', 'b1'] ``` Batched indexing into a matrix: ```python indices = [[[0, 0]], [[0, 1]]] params = [['a', 'b'], ['c', 'd']] output = [['a'], ['b']] ``` Batched slice indexing into a matrix: ```python indices = [[[1]], [[0]]] params = [['a', 'b'], ['c', 'd']] output = [[['c', 'd']], [['a', 'b']]] ``` Batched indexing into a 3-tensor: ```python indices = [[[1]], [[0]]] params = [[['a0', 'b0'], ['c0', 'd0']], [['a1', 'b1'], ['c1', 'd1']]] output = [[[['a1', 'b1'], ['c1', 'd1']]], [[['a0', 'b0'], ['c0', 'd0']]]] indices = [[[0, 1], [1, 0]], [[0, 0], [1, 1]]] params = [[['a0', 'b0'], ['c0', 'd0']], [['a1', 'b1'], ['c1', 'd1']]] output = [[['c0', 'd0'], ['a1', 'b1']], [['a0', 'b0'], ['c1', 'd1']]] indices = [[[0, 0, 1], [1, 0, 1]], [[0, 1, 1], [1, 1, 0]]] params = [[['a0', 'b0'], ['c0', 'd0']], [['a1', 'b1'], ['c1', 'd1']]] output = [['b0', 'b1'], ['d0', 'c1']] ``` See also `tf.gather` and `tf.batch_gather`. Args: params: A `Tensor`. The tensor from which to gather values. indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. Index tensor. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `params`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "GatherNd", name, _ctx._post_execution_callbacks, params, indices) return _result except _core._FallbackException: try: return gather_nd_eager_fallback( params, indices, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except (TypeError, ValueError): result = _dispatch.dispatch( gather_nd, params=params, indices=indices, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. try: _, _, _op = _op_def_lib._apply_op_helper( "GatherNd", params=params, indices=indices, name=name) except (TypeError, ValueError): result = _dispatch.dispatch( gather_nd, params=params, indices=indices, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("Tparams", _op.get_attr("Tparams"), "Tindices", _op.get_attr("Tindices")) _execute.record_gradient( "GatherNd", _inputs_flat, _attrs, _result, name) _result, = _result return _result def gather_nd_eager_fallback(params, indices, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function gather_nd """ _ctx = ctx if ctx else _context.context() _attr_Tparams, (params,) = _execute.args_to_matching_eager([params], _ctx) _attr_Tindices, (indices,) = _execute.args_to_matching_eager([indices], _ctx) _inputs_flat = [params, indices] _attrs = ("Tparams", _attr_Tparams, "Tindices", _attr_Tindices) _result = _execute.execute(b"GatherNd", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "GatherNd", _inputs_flat, _attrs, _result, name) _result, = _result return _result def gather_v2(params, indices, axis, name=None): r"""Gather slices from `params` axis `axis` according to `indices`. `indices` must be an integer tensor of any dimension (usually 0-D or 1-D). Produces an output tensor with shape `params.shape[:axis] + indices.shape + params.shape[axis + 1:]` where: ```python # Scalar indices (output is rank(params) - 1). output[a_0, ..., a_n, b_0, ..., b_n] = params[a_0, ..., a_n, indices, b_0, ..., b_n] # Vector indices (output is rank(params)). output[a_0, ..., a_n, i, b_0, ..., b_n] = params[a_0, ..., a_n, indices[i], b_0, ..., b_n] # Higher rank indices (output is rank(params) + rank(indices) - 1). output[a_0, ..., a_n, i, ..., j, b_0, ... b_n] = params[a_0, ..., a_n, indices[i, ..., j], b_0, ..., b_n] ``` <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"> <img style="width:100%" src="https://www.tensorflow.org/images/Gather.png" alt> </div> Note that on CPU, if an out of bound index is found, an error is returned. On GPU, if an out of bound index is found, a 0 is stored in the corresponding output value. See also `tf.batch_gather` and `tf.gather_nd`. Args: params: A `Tensor`. The tensor from which to gather values. Must be at least rank `axis + 1`. indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. Index tensor. Must be in range `[0, params.shape[axis])`. axis: A `Tensor`. Must be one of the following types: `int32`, `int64`. The axis in `params` to gather `indices` from. Defaults to the first dimension. Supports negative indexes. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `params`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "GatherV2", name, _ctx._post_execution_callbacks, params, indices, axis) return _result except _core._FallbackException: try: return gather_v2_eager_fallback( params, indices, axis, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. _, _, _op = _op_def_lib._apply_op_helper( "GatherV2", params=params, indices=indices, axis=axis, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("Tparams", _op.get_attr("Tparams"), "Tindices", _op.get_attr("Tindices"), "Taxis", _op.get_attr("Taxis")) _execute.record_gradient( "GatherV2", _inputs_flat, _attrs, _result, name) _result, = _result return _result def gather_v2_eager_fallback(params, indices, axis, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function gather_v2 """ _ctx = ctx if ctx else _context.context() _attr_Tparams, (params,) = _execute.args_to_matching_eager([params], _ctx) _attr_Tindices, (indices,) = _execute.args_to_matching_eager([indices], _ctx) _attr_Taxis, (axis,) = _execute.args_to_matching_eager([axis], _ctx) _inputs_flat = [params, indices, axis] _attrs = ("Tparams", _attr_Tparams, "Tindices", _attr_Tindices, "Taxis", _attr_Taxis) _result = _execute.execute(b"GatherV2", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "GatherV2", _inputs_flat, _attrs, _result, name) _result, = _result return _result @_dispatch.add_dispatch_list @tf_export('guarantee_const') def guarantee_const(input, name=None): r"""Gives a guarantee to the TF runtime that the input tensor is a constant. The runtime is then free to make optimizations based on this. Only accepts value typed tensors as inputs and rejects resource variable handles as input. Returns the input tensor without modification. Args: input: A `Tensor`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "GuaranteeConst", name, _ctx._post_execution_callbacks, input) return _result except _core._FallbackException: try: return guarantee_const_eager_fallback( input, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except (TypeError, ValueError): result = _dispatch.dispatch( guarantee_const, input=input, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. try: _, _, _op = _op_def_lib._apply_op_helper( "GuaranteeConst", input=input, name=name) except (TypeError, ValueError): result = _dispatch.dispatch( guarantee_const, input=input, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T")) _execute.record_gradient( "GuaranteeConst", _inputs_flat, _attrs, _result, name) _result, = _result return _result def guarantee_const_eager_fallback(input, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function guarantee_const """ _ctx = ctx if ctx else _context.context() _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx) _inputs_flat = [input] _attrs = ("T", _attr_T) _result = _execute.execute(b"GuaranteeConst", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "GuaranteeConst", _inputs_flat, _attrs, _result, name) _result, = _result return _result def identity(input, name=None): r"""Return a tensor with the same shape and contents as the input tensor or value. Args: input: A `Tensor`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "Identity", name, _ctx._post_execution_callbacks, input) return _result except _core._FallbackException: try: return identity_eager_fallback( input, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. _, _, _op = _op_def_lib._apply_op_helper( "Identity", input=input, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T")) _execute.record_gradient( "Identity", _inputs_flat, _attrs, _result, name) _result, = _result return _result def identity_eager_fallback(input, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function identity """ _ctx = ctx if ctx else _context.context() _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx) _inputs_flat = [input] _attrs = ("T", _attr_T) _result = _execute.execute(b"Identity", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Identity", _inputs_flat, _attrs, _result, name) _result, = _result return _result @_dispatch.add_dispatch_list @tf_export('identity_n') def identity_n(input, name=None): r"""Returns a list of tensors with the same shapes and contents as the input tensors. This op can be used to override the gradient for complicated functions. For example, suppose y = f(x) and we wish to apply a custom function g for backprop such that dx = g(dy). In Python, ```python with tf.get_default_graph().gradient_override_map( {'IdentityN': 'OverrideGradientWithG'}): y, _ = identity_n([f(x), x]) @tf.RegisterGradient('OverrideGradientWithG') def ApplyG(op, dy, _): return [None, g(dy)] # Do not backprop to f(x). ``` Args: input: A list of `Tensor` objects. name: A name for the operation (optional). Returns: A list of `Tensor` objects. Has the same type as `input`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "IdentityN", name, _ctx._post_execution_callbacks, input) return _result except _core._FallbackException: try: return identity_n_eager_fallback( input, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except (TypeError, ValueError): result = _dispatch.dispatch( identity_n, input=input, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. try: _, _, _op = _op_def_lib._apply_op_helper( "IdentityN", input=input, name=name) except (TypeError, ValueError): result = _dispatch.dispatch( identity_n, input=input, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T")) _execute.record_gradient( "IdentityN", _inputs_flat, _attrs, _result, name) return _result def identity_n_eager_fallback(input, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function identity_n """ _ctx = ctx if ctx else _context.context() _attr_T, input = _execute.convert_to_mixed_eager_tensors(input, _ctx) _inputs_flat = list(input) _attrs = ("T", _attr_T) _result = _execute.execute(b"IdentityN", len(input), inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "IdentityN", _inputs_flat, _attrs, _result, name) return _result def immutable_const(dtype, shape, memory_region_name, name=None): r"""Returns immutable tensor from memory region. The current implementation memmaps the tensor from a file. Args: dtype: A `tf.DType`. Type of the returned tensor. shape: A `tf.TensorShape` or list of `ints`. Shape of the returned tensor. memory_region_name: A `string`. Name of readonly memory region used by the tensor, see NewReadOnlyMemoryRegionFromFile in tensorflow::Env. name: A name for the operation (optional). Returns: A `Tensor` of type `dtype`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "ImmutableConst", name, _ctx._post_execution_callbacks, "dtype", dtype, "shape", shape, "memory_region_name", memory_region_name) return _result except _core._FallbackException: try: return immutable_const_eager_fallback( dtype=dtype, shape=shape, memory_region_name=memory_region_name, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. dtype = _execute.make_type(dtype, "dtype") shape = _execute.make_shape(shape, "shape") memory_region_name = _execute.make_str(memory_region_name, "memory_region_name") _, _, _op = _op_def_lib._apply_op_helper( "ImmutableConst", dtype=dtype, shape=shape, memory_region_name=memory_region_name, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("dtype", _op.get_attr("dtype"), "shape", _op.get_attr("shape"), "memory_region_name", _op.get_attr("memory_region_name")) _execute.record_gradient( "ImmutableConst", _inputs_flat, _attrs, _result, name) _result, = _result return _result def immutable_const_eager_fallback(dtype, shape, memory_region_name, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function immutable_const """ _ctx = ctx if ctx else _context.context() dtype = _execute.make_type(dtype, "dtype") shape = _execute.make_shape(shape, "shape") memory_region_name = _execute.make_str(memory_region_name, "memory_region_name") _inputs_flat = [] _attrs = ("dtype", dtype, "shape", shape, "memory_region_name", memory_region_name) _result = _execute.execute(b"ImmutableConst", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "ImmutableConst", _inputs_flat, _attrs, _result, name) _result, = _result return _result def inplace_add(x, i, v, name=None): r""" Adds v into specified rows of x. Computes y = x; y[i, :] += v; return y. Args: x: A `Tensor`. A `Tensor` of type T. i: A `Tensor` of type `int32`. A vector. Indices into the left-most dimension of `x`. v: A `Tensor`. Must have the same type as `x`. A `Tensor` of type T. Same dimension sizes as x except the first dimension, which must be the same as i's size. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `x`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "InplaceAdd", name, _ctx._post_execution_callbacks, x, i, v) return _result except _core._FallbackException: try: return inplace_add_eager_fallback( x, i, v, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. _, _, _op = _op_def_lib._apply_op_helper( "InplaceAdd", x=x, i=i, v=v, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T")) _execute.record_gradient( "InplaceAdd", _inputs_flat, _attrs, _result, name) _result, = _result return _result def inplace_add_eager_fallback(x, i, v, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function inplace_add """ _ctx = ctx if ctx else _context.context() _attr_T, _inputs_T = _execute.args_to_matching_eager([x, v], _ctx) (x, v) = _inputs_T i = _ops.convert_to_tensor(i, _dtypes.int32) _inputs_flat = [x, i, v] _attrs = ("T", _attr_T) _result = _execute.execute(b"InplaceAdd", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "InplaceAdd", _inputs_flat, _attrs, _result, name) _result, = _result return _result def inplace_sub(x, i, v, name=None): r""" Subtracts `v` into specified rows of `x`. Computes y = x; y[i, :] -= v; return y. Args: x: A `Tensor`. A `Tensor` of type T. i: A `Tensor` of type `int32`. A vector. Indices into the left-most dimension of `x`. v: A `Tensor`. Must have the same type as `x`. A `Tensor` of type T. Same dimension sizes as x except the first dimension, which must be the same as i's size. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `x`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "InplaceSub", name, _ctx._post_execution_callbacks, x, i, v) return _result except _core._FallbackException: try: return inplace_sub_eager_fallback( x, i, v, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. _, _, _op = _op_def_lib._apply_op_helper( "InplaceSub", x=x, i=i, v=v, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T")) _execute.record_gradient( "InplaceSub", _inputs_flat, _attrs, _result, name) _result, = _result return _result def inplace_sub_eager_fallback(x, i, v, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function inplace_sub """ _ctx = ctx if ctx else _context.context() _attr_T, _inputs_T = _execute.args_to_matching_eager([x, v], _ctx) (x, v) = _inputs_T i = _ops.convert_to_tensor(i, _dtypes.int32) _inputs_flat = [x, i, v] _attrs = ("T", _attr_T) _result = _execute.execute(b"InplaceSub", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "InplaceSub", _inputs_flat, _attrs, _result, name) _result, = _result return _result def inplace_update(x, i, v, name=None): r""" Updates specified rows with values in `v`. Computes `x[i, :] = v; return x`. Args: x: A `Tensor`. A tensor of type `T`. i: A `Tensor` of type `int32`. A vector. Indices into the left-most dimension of `x`. v: A `Tensor`. Must have the same type as `x`. A `Tensor` of type T. Same dimension sizes as x except the first dimension, which must be the same as i's size. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `x`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "InplaceUpdate", name, _ctx._post_execution_callbacks, x, i, v) return _result except _core._FallbackException: try: return inplace_update_eager_fallback( x, i, v, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. _, _, _op = _op_def_lib._apply_op_helper( "InplaceUpdate", x=x, i=i, v=v, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T")) _execute.record_gradient( "InplaceUpdate", _inputs_flat, _attrs, _result, name) _result, = _result return _result def inplace_update_eager_fallback(x, i, v, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function inplace_update """ _ctx = ctx if ctx else _context.context() _attr_T, _inputs_T = _execute.args_to_matching_eager([x, v], _ctx) (x, v) = _inputs_T i = _ops.convert_to_tensor(i, _dtypes.int32) _inputs_flat = [x, i, v] _attrs = ("T", _attr_T) _result = _execute.execute(b"InplaceUpdate", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "InplaceUpdate", _inputs_flat, _attrs, _result, name) _result, = _result return _result @_dispatch.add_dispatch_list @tf_export('math.invert_permutation', v1=['math.invert_permutation', 'invert_permutation']) @deprecated_endpoints('invert_permutation') def invert_permutation(x, name=None): r"""Computes the inverse permutation of a tensor. This operation computes the inverse of an index permutation. It takes a 1-D integer tensor `x`, which represents the indices of a zero-based array, and swaps each value with its index position. In other words, for an output tensor `y` and an input tensor `x`, this operation computes the following: `y[x[i]] = i for i in [0, 1, ..., len(x) - 1]` The values must include 0. There can be no duplicate values or negative values. For example: ``` # tensor `x` is [3, 4, 0, 2, 1] invert_permutation(x) ==> [2, 4, 3, 0, 1] ``` Args: x: A `Tensor`. Must be one of the following types: `int32`, `int64`. 1-D. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `x`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "InvertPermutation", name, _ctx._post_execution_callbacks, x) return _result except _core._FallbackException: try: return invert_permutation_eager_fallback( x, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except (TypeError, ValueError): result = _dispatch.dispatch( invert_permutation, x=x, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. try: _, _, _op = _op_def_lib._apply_op_helper( "InvertPermutation", x=x, name=name) except (TypeError, ValueError): result = _dispatch.dispatch( invert_permutation, x=x, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T")) _execute.record_gradient( "InvertPermutation", _inputs_flat, _attrs, _result, name) _result, = _result return _result def invert_permutation_eager_fallback(x, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function invert_permutation """ _ctx = ctx if ctx else _context.context() _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx, _dtypes.int32) _inputs_flat = [x] _attrs = ("T", _attr_T) _result = _execute.execute(b"InvertPermutation", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "InvertPermutation", _inputs_flat, _attrs, _result, name) _result, = _result return _result _list_diff_outputs = ["out", "idx"] _ListDiffOutput = _collections.namedtuple( "ListDiff", _list_diff_outputs) def list_diff(x, y, out_idx=_dtypes.int32, name=None): r"""Computes the difference between two lists of numbers or strings. Given a list `x` and a list `y`, this operation returns a list `out` that represents all values that are in `x` but not in `y`. The returned list `out` is sorted in the same order that the numbers appear in `x` (duplicates are preserved). This operation also returns a list `idx` that represents the position of each `out` element in `x`. In other words: `out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1]` For example, given this input: ``` x = [1, 2, 3, 4, 5, 6] y = [1, 3, 5] ``` This operation would return: ``` out ==> [2, 4, 6] idx ==> [1, 3, 5] ``` Args: x: A `Tensor`. 1-D. Values to keep. y: A `Tensor`. Must have the same type as `x`. 1-D. Values to remove. out_idx: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to `tf.int32`. name: A name for the operation (optional). Returns: A tuple of `Tensor` objects (out, idx). out: A `Tensor`. Has the same type as `x`. idx: A `Tensor` of type `out_idx`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "ListDiff", name, _ctx._post_execution_callbacks, x, y, "out_idx", out_idx) _result = _ListDiffOutput._make(_result) return _result except _core._FallbackException: try: return list_diff_eager_fallback( x, y, out_idx=out_idx, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. if out_idx is None: out_idx = _dtypes.int32 out_idx = _execute.make_type(out_idx, "out_idx") _, _, _op = _op_def_lib._apply_op_helper( "ListDiff", x=x, y=y, out_idx=out_idx, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T"), "out_idx", _op.get_attr("out_idx")) _execute.record_gradient( "ListDiff", _inputs_flat, _attrs, _result, name) _result = _ListDiffOutput._make(_result) return _result def list_diff_eager_fallback(x, y, out_idx=_dtypes.int32, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function list_diff """ _ctx = ctx if ctx else _context.context() if out_idx is None: out_idx = _dtypes.int32 out_idx = _execute.make_type(out_idx, "out_idx") _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx) (x, y) = _inputs_T _inputs_flat = [x, y] _attrs = ("T", _attr_T, "out_idx", out_idx) _result = _execute.execute(b"ListDiff", 2, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "ListDiff", _inputs_flat, _attrs, _result, name) _result = _ListDiffOutput._make(_result) return _result def lower_bound(sorted_inputs, values, out_type=_dtypes.int32, name=None): r"""Applies lower_bound(sorted_search_values, values) along each row. Each set of rows with the same index in (sorted_inputs, values) is treated independently. The resulting row is the equivalent of calling `np.searchsorted(sorted_inputs, values, side='left')`. The result is not a global index to the entire `Tensor`, but rather just the index in the last dimension. A 2-D example: sorted_sequence = [[0, 3, 9, 9, 10], [1, 2, 3, 4, 5]] values = [[2, 4, 9], [0, 2, 6]] result = LowerBound(sorted_sequence, values) result == [[1, 2, 2], [0, 1, 5]] Args: sorted_inputs: A `Tensor`. 2-D Tensor where each row is ordered. values: A `Tensor`. Must have the same type as `sorted_inputs`. 2-D Tensor with the same numbers of rows as `sorted_search_values`. Contains the values that will be searched for in `sorted_search_values`. out_type: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to `tf.int32`. name: A name for the operation (optional). Returns: A `Tensor` of type `out_type`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "LowerBound", name, _ctx._post_execution_callbacks, sorted_inputs, values, "out_type", out_type) return _result except _core._FallbackException: try: return lower_bound_eager_fallback( sorted_inputs, values, out_type=out_type, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. if out_type is None: out_type = _dtypes.int32 out_type = _execute.make_type(out_type, "out_type") _, _, _op = _op_def_lib._apply_op_helper( "LowerBound", sorted_inputs=sorted_inputs, values=values, out_type=out_type, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T"), "out_type", _op.get_attr("out_type")) _execute.record_gradient( "LowerBound", _inputs_flat, _attrs, _result, name) _result, = _result return _result def lower_bound_eager_fallback(sorted_inputs, values, out_type=_dtypes.int32, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function lower_bound """ _ctx = ctx if ctx else _context.context() if out_type is None: out_type = _dtypes.int32 out_type = _execute.make_type(out_type, "out_type") _attr_T, _inputs_T = _execute.args_to_matching_eager([sorted_inputs, values], _ctx) (sorted_inputs, values) = _inputs_T _inputs_flat = [sorted_inputs, values] _attrs = ("T", _attr_T, "out_type", out_type) _result = _execute.execute(b"LowerBound", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "LowerBound", _inputs_flat, _attrs, _result, name) _result, = _result return _result @_dispatch.add_dispatch_list @tf_export('linalg.band_part', v1=['linalg.band_part', 'matrix_band_part']) @deprecated_endpoints('matrix_band_part') def matrix_band_part(input, num_lower, num_upper, name=None): r"""Copy a tensor setting everything outside a central band in each innermost matrix to zero. The `band` part is computed as follows: Assume `input` has `k` dimensions `[I, J, K, ..., M, N]`, then the output is a tensor with the same shape where `band[i, j, k, ..., m, n] = in_band(m, n) * input[i, j, k, ..., m, n]`. The indicator function `in_band(m, n) = (num_lower < 0 || (m-n) <= num_lower)) && (num_upper < 0 || (n-m) <= num_upper)`. For example: ``` # if 'input' is [[ 0, 1, 2, 3] [-1, 0, 1, 2] [-2, -1, 0, 1] [-3, -2, -1, 0]], tf.matrix_band_part(input, 1, -1) ==> [[ 0, 1, 2, 3] [-1, 0, 1, 2] [ 0, -1, 0, 1] [ 0, 0, -1, 0]], tf.matrix_band_part(input, 2, 1) ==> [[ 0, 1, 0, 0] [-1, 0, 1, 0] [-2, -1, 0, 1] [ 0, -2, -1, 0]] ``` Useful special cases: ``` tf.matrix_band_part(input, 0, -1) ==> Upper triangular part. tf.matrix_band_part(input, -1, 0) ==> Lower triangular part. tf.matrix_band_part(input, 0, 0) ==> Diagonal. ``` Args: input: A `Tensor`. Rank `k` tensor. num_lower: A `Tensor`. Must be one of the following types: `int32`, `int64`. 0-D tensor. Number of subdiagonals to keep. If negative, keep entire lower triangle. num_upper: A `Tensor`. Must have the same type as `num_lower`. 0-D tensor. Number of superdiagonals to keep. If negative, keep entire upper triangle. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "MatrixBandPart", name, _ctx._post_execution_callbacks, input, num_lower, num_upper) return _result except _core._FallbackException: try: return matrix_band_part_eager_fallback( input, num_lower, num_upper, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except (TypeError, ValueError): result = _dispatch.dispatch( matrix_band_part, input=input, num_lower=num_lower, num_upper=num_upper, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. try: _, _, _op = _op_def_lib._apply_op_helper( "MatrixBandPart", input=input, num_lower=num_lower, num_upper=num_upper, name=name) except (TypeError, ValueError): result = _dispatch.dispatch( matrix_band_part, input=input, num_lower=num_lower, num_upper=num_upper, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T"), "Tindex", _op.get_attr("Tindex")) _execute.record_gradient( "MatrixBandPart", _inputs_flat, _attrs, _result, name) _result, = _result return _result def matrix_band_part_eager_fallback(input, num_lower, num_upper, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function matrix_band_part """ _ctx = ctx if ctx else _context.context() _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx) _attr_Tindex, _inputs_Tindex = _execute.args_to_matching_eager([num_lower, num_upper], _ctx, _dtypes.int64) (num_lower, num_upper) = _inputs_Tindex _inputs_flat = [input, num_lower, num_upper] _attrs = ("T", _attr_T, "Tindex", _attr_Tindex) _result = _execute.execute(b"MatrixBandPart", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "MatrixBandPart", _inputs_flat, _attrs, _result, name) _result, = _result return _result @_dispatch.add_dispatch_list @tf_export('linalg.diag', v1=['linalg.diag', 'matrix_diag']) @deprecated_endpoints('matrix_diag') def matrix_diag(diagonal, name=None): r"""Returns a batched diagonal tensor with a given batched diagonal values. Given a `diagonal`, this operation returns a tensor with the `diagonal` and everything else padded with zeros. The diagonal is computed as follows: Assume `diagonal` has `k` dimensions `[I, J, K, ..., N]`, then the output is a tensor of rank `k+1` with dimensions [I, J, K, ..., N, N]` where: `output[i, j, k, ..., m, n] = 1{m=n} * diagonal[i, j, k, ..., n]`. For example: ``` # 'diagonal' is [[1, 2, 3, 4], [5, 6, 7, 8]] and diagonal.shape = (2, 4) tf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0] [0, 2, 0, 0] [0, 0, 3, 0] [0, 0, 0, 4]], [[5, 0, 0, 0] [0, 6, 0, 0] [0, 0, 7, 0] [0, 0, 0, 8]]] which has shape (2, 4, 4) ``` Args: diagonal: A `Tensor`. Rank `k`, where `k >= 1`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `diagonal`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "MatrixDiag", name, _ctx._post_execution_callbacks, diagonal) return _result except _core._FallbackException: try: return matrix_diag_eager_fallback( diagonal, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except (TypeError, ValueError): result = _dispatch.dispatch( matrix_diag, diagonal=diagonal, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. try: _, _, _op = _op_def_lib._apply_op_helper( "MatrixDiag", diagonal=diagonal, name=name) except (TypeError, ValueError): result = _dispatch.dispatch( matrix_diag, diagonal=diagonal, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T")) _execute.record_gradient( "MatrixDiag", _inputs_flat, _attrs, _result, name) _result, = _result return _result def matrix_diag_eager_fallback(diagonal, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function matrix_diag """ _ctx = ctx if ctx else _context.context() _attr_T, (diagonal,) = _execute.args_to_matching_eager([diagonal], _ctx) _inputs_flat = [diagonal] _attrs = ("T", _attr_T) _result = _execute.execute(b"MatrixDiag", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "MatrixDiag", _inputs_flat, _attrs, _result, name) _result, = _result return _result @_dispatch.add_dispatch_list @tf_export('linalg.diag_part', v1=['linalg.diag_part', 'matrix_diag_part']) @deprecated_endpoints('matrix_diag_part') def matrix_diag_part(input, name=None): r"""Returns the batched diagonal part of a batched tensor. This operation returns a tensor with the `diagonal` part of the batched `input`. The `diagonal` part is computed as follows: Assume `input` has `k` dimensions `[I, J, K, ..., M, N]`, then the output is a tensor of rank `k - 1` with dimensions `[I, J, K, ..., min(M, N)]` where: `diagonal[i, j, k, ..., n] = input[i, j, k, ..., n, n]`. The input must be at least a matrix. For example: ``` # 'input' is [[[1, 0, 0, 0] [0, 2, 0, 0] [0, 0, 3, 0] [0, 0, 0, 4]], [[5, 0, 0, 0] [0, 6, 0, 0] [0, 0, 7, 0] [0, 0, 0, 8]]] and input.shape = (2, 4, 4) tf.matrix_diag_part(input) ==> [[1, 2, 3, 4], [5, 6, 7, 8]] which has shape (2, 4) ``` Args: input: A `Tensor`. Rank `k` tensor where `k >= 2`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "MatrixDiagPart", name, _ctx._post_execution_callbacks, input) return _result except _core._FallbackException: try: return matrix_diag_part_eager_fallback( input, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except (TypeError, ValueError): result = _dispatch.dispatch( matrix_diag_part, input=input, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. try: _, _, _op = _op_def_lib._apply_op_helper( "MatrixDiagPart", input=input, name=name) except (TypeError, ValueError): result = _dispatch.dispatch( matrix_diag_part, input=input, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T")) _execute.record_gradient( "MatrixDiagPart", _inputs_flat, _attrs, _result, name) _result, = _result return _result def matrix_diag_part_eager_fallback(input, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function matrix_diag_part """ _ctx = ctx if ctx else _context.context() _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx) _inputs_flat = [input] _attrs = ("T", _attr_T) _result = _execute.execute(b"MatrixDiagPart", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "MatrixDiagPart", _inputs_flat, _attrs, _result, name) _result, = _result return _result @_dispatch.add_dispatch_list @tf_export('linalg.set_diag', v1=['linalg.set_diag', 'matrix_set_diag']) @deprecated_endpoints('matrix_set_diag') def matrix_set_diag(input, diagonal, name=None): r"""Returns a batched matrix tensor with new batched diagonal values. Given `input` and `diagonal`, this operation returns a tensor with the same shape and values as `input`, except for the main diagonal of the innermost matrices. These will be overwritten by the values in `diagonal`. The output is computed as follows: Assume `input` has `k+1` dimensions `[I, J, K, ..., M, N]` and `diagonal` has `k` dimensions `[I, J, K, ..., min(M, N)]`. Then the output is a tensor of rank `k+1` with dimensions `[I, J, K, ..., M, N]` where: * `output[i, j, k, ..., m, n] = diagonal[i, j, k, ..., n]` for `m == n`. * `output[i, j, k, ..., m, n] = input[i, j, k, ..., m, n]` for `m != n`. Args: input: A `Tensor`. Rank `k+1`, where `k >= 1`. diagonal: A `Tensor`. Must have the same type as `input`. Rank `k`, where `k >= 1`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "MatrixSetDiag", name, _ctx._post_execution_callbacks, input, diagonal) return _result except _core._FallbackException: try: return matrix_set_diag_eager_fallback( input, diagonal, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except (TypeError, ValueError): result = _dispatch.dispatch( matrix_set_diag, input=input, diagonal=diagonal, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. try: _, _, _op = _op_def_lib._apply_op_helper( "MatrixSetDiag", input=input, diagonal=diagonal, name=name) except (TypeError, ValueError): result = _dispatch.dispatch( matrix_set_diag, input=input, diagonal=diagonal, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T")) _execute.record_gradient( "MatrixSetDiag", _inputs_flat, _attrs, _result, name) _result, = _result return _result def matrix_set_diag_eager_fallback(input, diagonal, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function matrix_set_diag """ _ctx = ctx if ctx else _context.context() _attr_T, _inputs_T = _execute.args_to_matching_eager([input, diagonal], _ctx) (input, diagonal) = _inputs_T _inputs_flat = [input, diagonal] _attrs = ("T", _attr_T) _result = _execute.execute(b"MatrixSetDiag", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "MatrixSetDiag", _inputs_flat, _attrs, _result, name) _result, = _result return _result def mirror_pad(input, paddings, mode, name=None): r"""Pads a tensor with mirrored values. This operation pads a `input` with mirrored values according to the `paddings` you specify. `paddings` is an integer tensor with shape `[n, 2]`, where n is the rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates how many values to add before the contents of `input` in that dimension, and `paddings[D, 1]` indicates how many values to add after the contents of `input` in that dimension. Both `paddings[D, 0]` and `paddings[D, 1]` must be no greater than `input.dim_size(D)` (or `input.dim_size(D) - 1`) if `copy_border` is true (if false, respectively). The padded size of each dimension D of the output is: `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)` For example: ``` # 't' is [[1, 2, 3], [4, 5, 6]]. # 'paddings' is [[1, 1]], [2, 2]]. # 'mode' is SYMMETRIC. # rank of 't' is 2. pad(t, paddings) ==> [[2, 1, 1, 2, 3, 3, 2] [2, 1, 1, 2, 3, 3, 2] [5, 4, 4, 5, 6, 6, 5] [5, 4, 4, 5, 6, 6, 5]] ``` Args: input: A `Tensor`. The input tensor to be padded. paddings: A `Tensor`. Must be one of the following types: `int32`, `int64`. A two-column matrix specifying the padding sizes. The number of rows must be the same as the rank of `input`. mode: A `string` from: `"REFLECT", "SYMMETRIC"`. Either `REFLECT` or `SYMMETRIC`. In reflect mode the padded regions do not include the borders, while in symmetric mode the padded regions do include the borders. For example, if `input` is `[1, 2, 3]` and `paddings` is `[0, 2]`, then the output is `[1, 2, 3, 2, 1]` in reflect mode, and it is `[1, 2, 3, 3, 2]` in symmetric mode. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "MirrorPad", name, _ctx._post_execution_callbacks, input, paddings, "mode", mode) return _result except _core._FallbackException: try: return mirror_pad_eager_fallback( input, paddings, mode=mode, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. mode = _execute.make_str(mode, "mode") _, _, _op = _op_def_lib._apply_op_helper( "MirrorPad", input=input, paddings=paddings, mode=mode, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T"), "Tpaddings", _op.get_attr("Tpaddings"), "mode", _op.get_attr("mode")) _execute.record_gradient( "MirrorPad", _inputs_flat, _attrs, _result, name) _result, = _result return _result def mirror_pad_eager_fallback(input, paddings, mode, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function mirror_pad """ _ctx = ctx if ctx else _context.context() mode = _execute.make_str(mode, "mode") _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx) _attr_Tpaddings, (paddings,) = _execute.args_to_matching_eager([paddings], _ctx, _dtypes.int32) _inputs_flat = [input, paddings] _attrs = ("T", _attr_T, "Tpaddings", _attr_Tpaddings, "mode", mode) _result = _execute.execute(b"MirrorPad", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "MirrorPad", _inputs_flat, _attrs, _result, name) _result, = _result return _result def mirror_pad_grad(input, paddings, mode, name=None): r"""Gradient op for `MirrorPad` op. This op folds a mirror-padded tensor. This operation folds the padded areas of `input` by `MirrorPad` according to the `paddings` you specify. `paddings` must be the same as `paddings` argument given to the corresponding `MirrorPad` op. The folded size of each dimension D of the output is: `input.dim_size(D) - paddings(D, 0) - paddings(D, 1)` For example: ``` # 't' is [[1, 2, 3], [4, 5, 6], [7, 8, 9]]. # 'paddings' is [[0, 1]], [0, 1]]. # 'mode' is SYMMETRIC. # rank of 't' is 2. pad(t, paddings) ==> [[ 1, 5] [11, 28]] ``` Args: input: A `Tensor`. The input tensor to be folded. paddings: A `Tensor`. Must be one of the following types: `int32`, `int64`. A two-column matrix specifying the padding sizes. The number of rows must be the same as the rank of `input`. mode: A `string` from: `"REFLECT", "SYMMETRIC"`. The mode used in the `MirrorPad` op. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "MirrorPadGrad", name, _ctx._post_execution_callbacks, input, paddings, "mode", mode) return _result except _core._FallbackException: try: return mirror_pad_grad_eager_fallback( input, paddings, mode=mode, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. mode = _execute.make_str(mode, "mode") _, _, _op = _op_def_lib._apply_op_helper( "MirrorPadGrad", input=input, paddings=paddings, mode=mode, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T"), "Tpaddings", _op.get_attr("Tpaddings"), "mode", _op.get_attr("mode")) _execute.record_gradient( "MirrorPadGrad", _inputs_flat, _attrs, _result, name) _result, = _result return _result def mirror_pad_grad_eager_fallback(input, paddings, mode, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function mirror_pad_grad """ _ctx = ctx if ctx else _context.context() mode = _execute.make_str(mode, "mode") _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx) _attr_Tpaddings, (paddings,) = _execute.args_to_matching_eager([paddings], _ctx, _dtypes.int32) _inputs_flat = [input, paddings] _attrs = ("T", _attr_T, "Tpaddings", _attr_Tpaddings, "mode", mode) _result = _execute.execute(b"MirrorPadGrad", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "MirrorPadGrad", _inputs_flat, _attrs, _result, name) _result, = _result return _result def one_hot(indices, depth, on_value, off_value, axis=-1, name=None): r"""Returns a one-hot tensor. The locations represented by indices in `indices` take value `on_value`, while all other locations take value `off_value`. If the input `indices` is rank `N`, the output will have rank `N+1`, The new axis is created at dimension `axis` (default: the new axis is appended at the end). If `indices` is a scalar the output shape will be a vector of length `depth`. If `indices` is a vector of length `features`, the output shape will be: ``` features x depth if axis == -1 depth x features if axis == 0 ``` If `indices` is a matrix (batch) with shape `[batch, features]`, the output shape will be: ``` batch x features x depth if axis == -1 batch x depth x features if axis == 1 depth x batch x features if axis == 0 ``` Examples ========= Suppose that ``` indices = [0, 2, -1, 1] depth = 3 on_value = 5.0 off_value = 0.0 axis = -1 ``` Then output is `[4 x 3]`: ``` output = [5.0 0.0 0.0] // one_hot(0) [0.0 0.0 5.0] // one_hot(2) [0.0 0.0 0.0] // one_hot(-1) [0.0 5.0 0.0] // one_hot(1) ``` Suppose that ``` indices = [0, 2, -1, 1] depth = 3 on_value = 0.0 off_value = 3.0 axis = 0 ``` Then output is `[3 x 4]`: ``` output = [0.0 3.0 3.0 3.0] [3.0 3.0 3.0 0.0] [3.0 3.0 3.0 3.0] [3.0 0.0 3.0 3.0] // ^ one_hot(0) // ^ one_hot(2) // ^ one_hot(-1) // ^ one_hot(1) ``` Suppose that ``` indices = [[0, 2], [1, -1]] depth = 3 on_value = 1.0 off_value = 0.0 axis = -1 ``` Then output is `[2 x 2 x 3]`: ``` output = [ [1.0, 0.0, 0.0] // one_hot(0) [0.0, 0.0, 1.0] // one_hot(2) ][ [0.0, 1.0, 0.0] // one_hot(1) [0.0, 0.0, 0.0] // one_hot(-1) ] ``` Args: indices: A `Tensor`. Must be one of the following types: `uint8`, `int32`, `int64`. A tensor of indices. depth: A `Tensor` of type `int32`. A scalar defining the depth of the one hot dimension. on_value: A `Tensor`. A scalar defining the value to fill in output when `indices[j] = i`. off_value: A `Tensor`. Must have the same type as `on_value`. A scalar defining the value to fill in output when `indices[j] != i`. axis: An optional `int`. Defaults to `-1`. The axis to fill (default: -1, a new inner-most axis). name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `on_value`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "OneHot", name, _ctx._post_execution_callbacks, indices, depth, on_value, off_value, "axis", axis) return _result except _core._FallbackException: try: return one_hot_eager_fallback( indices, depth, on_value, off_value, axis=axis, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. if axis is None: axis = -1 axis = _execute.make_int(axis, "axis") _, _, _op = _op_def_lib._apply_op_helper( "OneHot", indices=indices, depth=depth, on_value=on_value, off_value=off_value, axis=axis, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("axis", _op.get_attr("axis"), "T", _op.get_attr("T"), "TI", _op.get_attr("TI")) _execute.record_gradient( "OneHot", _inputs_flat, _attrs, _result, name) _result, = _result return _result def one_hot_eager_fallback(indices, depth, on_value, off_value, axis=-1, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function one_hot """ _ctx = ctx if ctx else _context.context() if axis is None: axis = -1 axis = _execute.make_int(axis, "axis") _attr_T, _inputs_T = _execute.args_to_matching_eager([on_value, off_value], _ctx) (on_value, off_value) = _inputs_T _attr_TI, (indices,) = _execute.args_to_matching_eager([indices], _ctx, _dtypes.int64) depth = _ops.convert_to_tensor(depth, _dtypes.int32) _inputs_flat = [indices, depth, on_value, off_value] _attrs = ("axis", axis, "T", _attr_T, "TI", _attr_TI) _result = _execute.execute(b"OneHot", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "OneHot", _inputs_flat, _attrs, _result, name) _result, = _result return _result def ones_like(x, name=None): r"""Returns a tensor of ones with the same shape and type as x. Args: x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int8`, `uint8`, `int16`, `uint16`, `int32`, `int64`, `complex64`, `complex128`, `bool`. a tensor of type T. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `x`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "OnesLike", name, _ctx._post_execution_callbacks, x) return _result except _core._FallbackException: try: return ones_like_eager_fallback( x, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. _, _, _op = _op_def_lib._apply_op_helper( "OnesLike", x=x, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T")) _execute.record_gradient( "OnesLike", _inputs_flat, _attrs, _result, name) _result, = _result return _result def ones_like_eager_fallback(x, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function ones_like """ _ctx = ctx if ctx else _context.context() _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx) _inputs_flat = [x] _attrs = ("T", _attr_T) _result = _execute.execute(b"OnesLike", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "OnesLike", _inputs_flat, _attrs, _result, name) _result, = _result return _result def pack(values, axis=0, name=None): r"""Packs a list of `N` rank-`R` tensors into one rank-`(R+1)` tensor. Packs the `N` tensors in `values` into a tensor with rank one higher than each tensor in `values`, by packing them along the `axis` dimension. Given a list of tensors of shape `(A, B, C)`; if `axis == 0` then the `output` tensor will have the shape `(N, A, B, C)`. if `axis == 1` then the `output` tensor will have the shape `(A, N, B, C)`. Etc. For example: ``` # 'x' is [1, 4] # 'y' is [2, 5] # 'z' is [3, 6] pack([x, y, z]) => [[1, 4], [2, 5], [3, 6]] # Pack along first dim. pack([x, y, z], axis=1) => [[1, 2, 3], [4, 5, 6]] ``` This is the opposite of `unpack`. Args: values: A list of at least 1 `Tensor` objects with the same type. Must be of same shape and type. axis: An optional `int`. Defaults to `0`. Dimension along which to pack. Negative values wrap around, so the valid range is `[-(R+1), R+1)`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `values`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "Pack", name, _ctx._post_execution_callbacks, values, "axis", axis) return _result except _core._FallbackException: try: return pack_eager_fallback( values, axis=axis, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. if not isinstance(values, (list, tuple)): raise TypeError( "Expected list for 'values' argument to " "'pack' Op, not %r." % values) _attr_N = len(values) if axis is None: axis = 0 axis = _execute.make_int(axis, "axis") _, _, _op = _op_def_lib._apply_op_helper( "Pack", values=values, axis=axis, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("N", _op.get_attr("N"), "T", _op.get_attr("T"), "axis", _op.get_attr("axis")) _execute.record_gradient( "Pack", _inputs_flat, _attrs, _result, name) _result, = _result return _result def pack_eager_fallback(values, axis=0, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function pack """ _ctx = ctx if ctx else _context.context() if not isinstance(values, (list, tuple)): raise TypeError( "Expected list for 'values' argument to " "'pack' Op, not %r." % values) _attr_N = len(values) if axis is None: axis = 0 axis = _execute.make_int(axis, "axis") _attr_T, values = _execute.args_to_matching_eager(list(values), _ctx) _inputs_flat = list(values) _attrs = ("N", _attr_N, "T", _attr_T, "axis", axis) _result = _execute.execute(b"Pack", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Pack", _inputs_flat, _attrs, _result, name) _result, = _result return _result def pad(input, paddings, name=None): r"""Pads a tensor with zeros. This operation pads a `input` with zeros according to the `paddings` you specify. `paddings` is an integer tensor with shape `[Dn, 2]`, where n is the rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates how many zeros to add before the contents of `input` in that dimension, and `paddings[D, 1]` indicates how many zeros to add after the contents of `input` in that dimension. The padded size of each dimension D of the output is: `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)` For example: ``` # 't' is [[1, 1], [2, 2]] # 'paddings' is [[1, 1], [2, 2]] # rank of 't' is 2 pad(t, paddings) ==> [[0, 0, 0, 0, 0, 0] [0, 0, 1, 1, 0, 0] [0, 0, 2, 2, 0, 0] [0, 0, 0, 0, 0, 0]] ``` Args: input: A `Tensor`. paddings: A `Tensor`. Must be one of the following types: `int32`, `int64`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "Pad", name, _ctx._post_execution_callbacks, input, paddings) return _result except _core._FallbackException: try: return pad_eager_fallback( input, paddings, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. _, _, _op = _op_def_lib._apply_op_helper( "Pad", input=input, paddings=paddings, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T"), "Tpaddings", _op.get_attr("Tpaddings")) _execute.record_gradient( "Pad", _inputs_flat, _attrs, _result, name) _result, = _result return _result def pad_eager_fallback(input, paddings, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function pad """ _ctx = ctx if ctx else _context.context() _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx) _attr_Tpaddings, (paddings,) = _execute.args_to_matching_eager([paddings], _ctx, _dtypes.int32) _inputs_flat = [input, paddings] _attrs = ("T", _attr_T, "Tpaddings", _attr_Tpaddings) _result = _execute.execute(b"Pad", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Pad", _inputs_flat, _attrs, _result, name) _result, = _result return _result def pad_v2(input, paddings, constant_values, name=None): r"""Pads a tensor. This operation pads `input` according to the `paddings` and `constant_values` you specify. `paddings` is an integer tensor with shape `[Dn, 2]`, where n is the rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates how many padding values to add before the contents of `input` in that dimension, and `paddings[D, 1]` indicates how many padding values to add after the contents of `input` in that dimension. `constant_values` is a scalar tensor of the same type as `input` that indicates the value to use for padding `input`. The padded size of each dimension D of the output is: `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)` For example: ``` # 't' is [[1, 1], [2, 2]] # 'paddings' is [[1, 1], [2, 2]] # 'constant_values' is 0 # rank of 't' is 2 pad(t, paddings) ==> [[0, 0, 0, 0, 0, 0] [0, 0, 1, 1, 0, 0] [0, 0, 2, 2, 0, 0] [0, 0, 0, 0, 0, 0]] ``` Args: input: A `Tensor`. paddings: A `Tensor`. Must be one of the following types: `int32`, `int64`. constant_values: A `Tensor`. Must have the same type as `input`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "PadV2", name, _ctx._post_execution_callbacks, input, paddings, constant_values) return _result except _core._FallbackException: try: return pad_v2_eager_fallback( input, paddings, constant_values, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. _, _, _op = _op_def_lib._apply_op_helper( "PadV2", input=input, paddings=paddings, constant_values=constant_values, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T"), "Tpaddings", _op.get_attr("Tpaddings")) _execute.record_gradient( "PadV2", _inputs_flat, _attrs, _result, name) _result, = _result return _result def pad_v2_eager_fallback(input, paddings, constant_values, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function pad_v2 """ _ctx = ctx if ctx else _context.context() _attr_T, _inputs_T = _execute.args_to_matching_eager([input, constant_values], _ctx) (input, constant_values) = _inputs_T _attr_Tpaddings, (paddings,) = _execute.args_to_matching_eager([paddings], _ctx, _dtypes.int32) _inputs_flat = [input, paddings, constant_values] _attrs = ("T", _attr_T, "Tpaddings", _attr_Tpaddings) _result = _execute.execute(b"PadV2", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "PadV2", _inputs_flat, _attrs, _result, name) _result, = _result return _result def parallel_concat(values, shape, name=None): r"""Concatenates a list of `N` tensors along the first dimension. The input tensors are all required to have size 1 in the first dimension. For example: ``` # 'x' is [[1, 4]] # 'y' is [[2, 5]] # 'z' is [[3, 6]] parallel_concat([x, y, z]) => [[1, 4], [2, 5], [3, 6]] # Pack along first dim. ``` The difference between concat and parallel_concat is that concat requires all of the inputs be computed before the operation will begin but doesn't require that the input shapes be known during graph construction. Parallel concat will copy pieces of the input into the output as they become available, in some situations this can provide a performance benefit. Args: values: A list of at least 1 `Tensor` objects with the same type. Tensors to be concatenated. All must have size 1 in the first dimension and same shape. shape: A `tf.TensorShape` or list of `ints`. the final shape of the result; should be equal to the shapes of any input but with the number of input values in the first dimension. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `values`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "ParallelConcat", name, _ctx._post_execution_callbacks, values, "shape", shape) return _result except _core._FallbackException: try: return parallel_concat_eager_fallback( values, shape=shape, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. if not isinstance(values, (list, tuple)): raise TypeError( "Expected list for 'values' argument to " "'parallel_concat' Op, not %r." % values) _attr_N = len(values) shape = _execute.make_shape(shape, "shape") _, _, _op = _op_def_lib._apply_op_helper( "ParallelConcat", values=values, shape=shape, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("N", _op.get_attr("N"), "T", _op.get_attr("T"), "shape", _op.get_attr("shape")) _execute.record_gradient( "ParallelConcat", _inputs_flat, _attrs, _result, name) _result, = _result return _result def parallel_concat_eager_fallback(values, shape, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function parallel_concat """ _ctx = ctx if ctx else _context.context() if not isinstance(values, (list, tuple)): raise TypeError( "Expected list for 'values' argument to " "'parallel_concat' Op, not %r." % values) _attr_N = len(values) shape = _execute.make_shape(shape, "shape") _attr_T, values = _execute.args_to_matching_eager(list(values), _ctx) _inputs_flat = list(values) _attrs = ("N", _attr_N, "T", _attr_T, "shape", shape) _result = _execute.execute(b"ParallelConcat", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "ParallelConcat", _inputs_flat, _attrs, _result, name) _result, = _result return _result def placeholder(dtype, shape=None, name=None): r"""A placeholder op for a value that will be fed into the computation. N.B. This operation will fail with an error if it is executed. It is intended as a way to represent a value that will always be fed, and to provide attrs that enable the fed value to be checked at runtime. Args: dtype: A `tf.DType`. The type of elements in the tensor. shape: An optional `tf.TensorShape` or list of `ints`. Defaults to `None`. (Optional) The shape of the tensor. If the shape has 0 dimensions, the shape is unconstrained. name: A name for the operation (optional). Returns: A `Tensor` of type `dtype`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "Placeholder", name, _ctx._post_execution_callbacks, "dtype", dtype, "shape", shape) return _result except _core._FallbackException: try: return placeholder_eager_fallback( dtype=dtype, shape=shape, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. dtype = _execute.make_type(dtype, "dtype") if shape is None: shape = None shape = _execute.make_shape(shape, "shape") _, _, _op = _op_def_lib._apply_op_helper( "Placeholder", dtype=dtype, shape=shape, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("dtype", _op.get_attr("dtype"), "shape", _op.get_attr("shape")) _execute.record_gradient( "Placeholder", _inputs_flat, _attrs, _result, name) _result, = _result return _result def placeholder_eager_fallback(dtype, shape=None, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function placeholder """ _ctx = ctx if ctx else _context.context() dtype = _execute.make_type(dtype, "dtype") if shape is None: shape = None shape = _execute.make_shape(shape, "shape") _inputs_flat = [] _attrs = ("dtype", dtype, "shape", shape) _result = _execute.execute(b"Placeholder", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Placeholder", _inputs_flat, _attrs, _result, name) _result, = _result return _result def placeholder_v2(dtype, shape, name=None): r"""A placeholder op for a value that will be fed into the computation. N.B. This operation will fail with an error if it is executed. It is intended as a way to represent a value that will always be fed, and to provide attrs that enable the fed value to be checked at runtime. Args: dtype: A `tf.DType`. The type of elements in the tensor. shape: A `tf.TensorShape` or list of `ints`. The shape of the tensor. The shape can be any partially-specified shape. To be unconstrained, pass in a shape with unknown rank. name: A name for the operation (optional). Returns: A `Tensor` of type `dtype`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "PlaceholderV2", name, _ctx._post_execution_callbacks, "dtype", dtype, "shape", shape) return _result except _core._FallbackException: try: return placeholder_v2_eager_fallback( dtype=dtype, shape=shape, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. dtype = _execute.make_type(dtype, "dtype") shape = _execute.make_shape(shape, "shape") _, _, _op = _op_def_lib._apply_op_helper( "PlaceholderV2", dtype=dtype, shape=shape, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("dtype", _op.get_attr("dtype"), "shape", _op.get_attr("shape")) _execute.record_gradient( "PlaceholderV2", _inputs_flat, _attrs, _result, name) _result, = _result return _result def placeholder_v2_eager_fallback(dtype, shape, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function placeholder_v2 """ _ctx = ctx if ctx else _context.context() dtype = _execute.make_type(dtype, "dtype") shape = _execute.make_shape(shape, "shape") _inputs_flat = [] _attrs = ("dtype", dtype, "shape", shape) _result = _execute.execute(b"PlaceholderV2", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "PlaceholderV2", _inputs_flat, _attrs, _result, name) _result, = _result return _result def placeholder_with_default(input, shape, name=None): r"""A placeholder op that passes through `input` when its output is not fed. Args: input: A `Tensor`. The default value to produce when `output` is not fed. shape: A `tf.TensorShape` or list of `ints`. The (possibly partial) shape of the tensor. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "PlaceholderWithDefault", name, _ctx._post_execution_callbacks, input, "shape", shape) return _result except _core._FallbackException: try: return placeholder_with_default_eager_fallback( input, shape=shape, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. shape = _execute.make_shape(shape, "shape") _, _, _op = _op_def_lib._apply_op_helper( "PlaceholderWithDefault", input=input, shape=shape, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("dtype", _op.get_attr("dtype"), "shape", _op.get_attr("shape")) _execute.record_gradient( "PlaceholderWithDefault", _inputs_flat, _attrs, _result, name) _result, = _result return _result def placeholder_with_default_eager_fallback(input, shape, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function placeholder_with_default """ _ctx = ctx if ctx else _context.context() shape = _execute.make_shape(shape, "shape") _attr_dtype, (input,) = _execute.args_to_matching_eager([input], _ctx) _inputs_flat = [input] _attrs = ("dtype", _attr_dtype, "shape", shape) _result = _execute.execute(b"PlaceholderWithDefault", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "PlaceholderWithDefault", _inputs_flat, _attrs, _result, name) _result, = _result return _result def prevent_gradient(input, message="", name=None): r"""An identity op that triggers an error if a gradient is requested. When executed in a graph, this op outputs its input tensor as-is. When building ops to compute gradients, the TensorFlow gradient system will return an error when trying to lookup the gradient of this op, because no gradient must ever be registered for this function. This op exists to prevent subtle bugs from silently returning unimplemented gradients in some corner cases. Args: input: A `Tensor`. any tensor. message: An optional `string`. Defaults to `""`. Will be printed in the error when anyone tries to differentiate this operation. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "PreventGradient", name, _ctx._post_execution_callbacks, input, "message", message) return _result except _core._FallbackException: try: return prevent_gradient_eager_fallback( input, message=message, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. if message is None: message = "" message = _execute.make_str(message, "message") _, _, _op = _op_def_lib._apply_op_helper( "PreventGradient", input=input, message=message, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T"), "message", _op.get_attr("message")) _execute.record_gradient( "PreventGradient", _inputs_flat, _attrs, _result, name) _result, = _result return _result def prevent_gradient_eager_fallback(input, message="", name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function prevent_gradient """ _ctx = ctx if ctx else _context.context() if message is None: message = "" message = _execute.make_str(message, "message") _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx) _inputs_flat = [input] _attrs = ("T", _attr_T, "message", message) _result = _execute.execute(b"PreventGradient", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "PreventGradient", _inputs_flat, _attrs, _result, name) _result, = _result return _result def quantize_and_dequantize(input, signed_input=True, num_bits=8, range_given=False, input_min=0, input_max=0, name=None): r"""Use QuantizeAndDequantizeV2 instead. Args: input: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`. signed_input: An optional `bool`. Defaults to `True`. num_bits: An optional `int`. Defaults to `8`. range_given: An optional `bool`. Defaults to `False`. input_min: An optional `float`. Defaults to `0`. input_max: An optional `float`. Defaults to `0`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "QuantizeAndDequantize", name, _ctx._post_execution_callbacks, input, "signed_input", signed_input, "num_bits", num_bits, "range_given", range_given, "input_min", input_min, "input_max", input_max) return _result except _core._FallbackException: try: return quantize_and_dequantize_eager_fallback( input, signed_input=signed_input, num_bits=num_bits, range_given=range_given, input_min=input_min, input_max=input_max, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. if signed_input is None: signed_input = True signed_input = _execute.make_bool(signed_input, "signed_input") if num_bits is None: num_bits = 8 num_bits = _execute.make_int(num_bits, "num_bits") if range_given is None: range_given = False range_given = _execute.make_bool(range_given, "range_given") if input_min is None: input_min = 0 input_min = _execute.make_float(input_min, "input_min") if input_max is None: input_max = 0 input_max = _execute.make_float(input_max, "input_max") _, _, _op = _op_def_lib._apply_op_helper( "QuantizeAndDequantize", input=input, signed_input=signed_input, num_bits=num_bits, range_given=range_given, input_min=input_min, input_max=input_max, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("signed_input", _op.get_attr("signed_input"), "num_bits", _op.get_attr("num_bits"), "range_given", _op.get_attr("range_given"), "input_min", _op.get_attr("input_min"), "input_max", _op.get_attr("input_max"), "T", _op.get_attr("T")) _execute.record_gradient( "QuantizeAndDequantize", _inputs_flat, _attrs, _result, name) _result, = _result return _result def quantize_and_dequantize_eager_fallback(input, signed_input=True, num_bits=8, range_given=False, input_min=0, input_max=0, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function quantize_and_dequantize """ _ctx = ctx if ctx else _context.context() if signed_input is None: signed_input = True signed_input = _execute.make_bool(signed_input, "signed_input") if num_bits is None: num_bits = 8 num_bits = _execute.make_int(num_bits, "num_bits") if range_given is None: range_given = False range_given = _execute.make_bool(range_given, "range_given") if input_min is None: input_min = 0 input_min = _execute.make_float(input_min, "input_min") if input_max is None: input_max = 0 input_max = _execute.make_float(input_max, "input_max") _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx) _inputs_flat = [input] _attrs = ("signed_input", signed_input, "num_bits", num_bits, "range_given", range_given, "input_min", input_min, "input_max", input_max, "T", _attr_T) _result = _execute.execute(b"QuantizeAndDequantize", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "QuantizeAndDequantize", _inputs_flat, _attrs, _result, name) _result, = _result return _result @_dispatch.add_dispatch_list @tf_export('quantization.quantize_and_dequantize') def quantize_and_dequantize_v2(input, input_min, input_max, signed_input=True, num_bits=8, range_given=False, round_mode="HALF_TO_EVEN", name=None): r"""Quantizes then dequantizes a tensor. This op simulates the precision loss from the quantized forward pass by: 1. Quantizing the tensor to fixed point numbers, which should match the target quantization method when it is used in inference. 2. Dequantizing it back to floating point numbers for the following ops, most likely matmul. There are different ways to quantize. This version uses only scaling, so 0.0 maps to 0. From the specified 'num_bits' in the quantized output type, it determines minimum and maximum representable quantized values. e.g. * [-128, 127] for signed, num_bits = 8, or * [0, 255] for unsigned, num_bits = 8. If range_given == False, the initial input_min, input_max will be determined automatically as the minimum and maximum values in the input tensor, otherwise the specified values of input_min, input_max are used. Note: If the input_min, input_max are specified, they do not need to equal the actual minimum and maximum values in the tensor. e.g. in some cases it may be beneficial to specify these values such that the low probability extremes of the input distribution are clipped. This op determines the maximum scale_factor that would map the initial [input_min, input_max] range to a range that lies within the representable quantized range. It determines the scale from one of input_min and input_max, then updates the other one to maximize the respresentable range. e.g. * if the output is signed, num_bits = 8, [input_min, input_max] = [-10.0, 5.0]: it would use a scale_factor of -128 / -10.0 = 12.8 In this case, it would update input_max to be 127 / 12.8 = 9.921875 * if the output is signed, num_bits = 8, [input_min, input_max] = [-10.0, 10.0]: it would use a scale_factor of 127 / 10.0 = 12.7 In this case, it would update input_min to be 128.0 / 12.7 = -10.07874 * if the output is unsigned, input_min is forced to be 0, and only the specified input_max is used. After determining the scale_factor and updating the input range, it applies the following to each value in the 'input' tensor. output = round(clamp(value, input_min, input_max) * scale_factor) / scale_factor. The above round function rounds the value based on the given round_mode. Args: input: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`. Tensor to quantize and then dequantize. input_min: A `Tensor`. Must have the same type as `input`. If `range_given == True`, this specifies the minimum input value that needs to be represented, otherwise it is determined from the min value of the `input` tensor. input_max: A `Tensor`. Must have the same type as `input`. If `range_given == True`, this specifies the maximum input value that needs to be represented, otherwise it is determined from the max value of the `input` tensor. signed_input: An optional `bool`. Defaults to `True`. Whether the quantization is signed or unsigned. (actually this parameter should have been called <b>`signed_output`</b>) num_bits: An optional `int`. Defaults to `8`. The bitwidth of the quantization. range_given: An optional `bool`. Defaults to `False`. Whether the range is given or should be determined from the `input` tensor. round_mode: An optional `string` from: `"HALF_TO_EVEN", "HALF_UP"`. Defaults to `"HALF_TO_EVEN"`. The 'round_mode' attribute controls which rounding tie-breaking algorithm is used when rounding float values to their quantized equivalents. The following rounding modes are currently supported: * HALF_TO_EVEN: this is the default round_mode. * HALF_UP: round towards positive. In this mode 7.5 rounds up to 8 and -7.5 rounds up to -7. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "QuantizeAndDequantizeV2", name, _ctx._post_execution_callbacks, input, input_min, input_max, "signed_input", signed_input, "num_bits", num_bits, "range_given", range_given, "round_mode", round_mode) return _result except _core._FallbackException: try: return quantize_and_dequantize_v2_eager_fallback( input, input_min, input_max, signed_input=signed_input, num_bits=num_bits, range_given=range_given, round_mode=round_mode, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except (TypeError, ValueError): result = _dispatch.dispatch( quantize_and_dequantize_v2, input=input, input_min=input_min, input_max=input_max, signed_input=signed_input, num_bits=num_bits, range_given=range_given, round_mode=round_mode, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. if signed_input is None: signed_input = True signed_input = _execute.make_bool(signed_input, "signed_input") if num_bits is None: num_bits = 8 num_bits = _execute.make_int(num_bits, "num_bits") if range_given is None: range_given = False range_given = _execute.make_bool(range_given, "range_given") if round_mode is None: round_mode = "HALF_TO_EVEN" round_mode = _execute.make_str(round_mode, "round_mode") try: _, _, _op = _op_def_lib._apply_op_helper( "QuantizeAndDequantizeV2", input=input, input_min=input_min, input_max=input_max, signed_input=signed_input, num_bits=num_bits, range_given=range_given, round_mode=round_mode, name=name) except (TypeError, ValueError): result = _dispatch.dispatch( quantize_and_dequantize_v2, input=input, input_min=input_min, input_max=input_max, signed_input=signed_input, num_bits=num_bits, range_given=range_given, round_mode=round_mode, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("signed_input", _op.get_attr("signed_input"), "num_bits", _op.get_attr("num_bits"), "range_given", _op.get_attr("range_given"), "T", _op.get_attr("T"), "round_mode", _op.get_attr("round_mode")) _execute.record_gradient( "QuantizeAndDequantizeV2", _inputs_flat, _attrs, _result, name) _result, = _result return _result def quantize_and_dequantize_v2_eager_fallback(input, input_min, input_max, signed_input=True, num_bits=8, range_given=False, round_mode="HALF_TO_EVEN", name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function quantize_and_dequantize_v2 """ _ctx = ctx if ctx else _context.context() if signed_input is None: signed_input = True signed_input = _execute.make_bool(signed_input, "signed_input") if num_bits is None: num_bits = 8 num_bits = _execute.make_int(num_bits, "num_bits") if range_given is None: range_given = False range_given = _execute.make_bool(range_given, "range_given") if round_mode is None: round_mode = "HALF_TO_EVEN" round_mode = _execute.make_str(round_mode, "round_mode") _attr_T, _inputs_T = _execute.args_to_matching_eager([input, input_min, input_max], _ctx) (input, input_min, input_max) = _inputs_T _inputs_flat = [input, input_min, input_max] _attrs = ("signed_input", signed_input, "num_bits", num_bits, "range_given", range_given, "T", _attr_T, "round_mode", round_mode) _result = _execute.execute(b"QuantizeAndDequantizeV2", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "QuantizeAndDequantizeV2", _inputs_flat, _attrs, _result, name) _result, = _result return _result def quantize_and_dequantize_v3(input, input_min, input_max, num_bits, signed_input=True, range_given=True, name=None): r"""Quantizes then dequantizes a tensor. This is almost identical to QuantizeAndDequantizeV2, except that num_bits is a tensor, so its value can change during training. Args: input: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`. input_min: A `Tensor`. Must have the same type as `input`. input_max: A `Tensor`. Must have the same type as `input`. num_bits: A `Tensor` of type `int32`. signed_input: An optional `bool`. Defaults to `True`. range_given: An optional `bool`. Defaults to `True`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "QuantizeAndDequantizeV3", name, _ctx._post_execution_callbacks, input, input_min, input_max, num_bits, "signed_input", signed_input, "range_given", range_given) return _result except _core._FallbackException: try: return quantize_and_dequantize_v3_eager_fallback( input, input_min, input_max, num_bits, signed_input=signed_input, range_given=range_given, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. if signed_input is None: signed_input = True signed_input = _execute.make_bool(signed_input, "signed_input") if range_given is None: range_given = True range_given = _execute.make_bool(range_given, "range_given") _, _, _op = _op_def_lib._apply_op_helper( "QuantizeAndDequantizeV3", input=input, input_min=input_min, input_max=input_max, num_bits=num_bits, signed_input=signed_input, range_given=range_given, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("signed_input", _op.get_attr("signed_input"), "range_given", _op.get_attr("range_given"), "T", _op.get_attr("T")) _execute.record_gradient( "QuantizeAndDequantizeV3", _inputs_flat, _attrs, _result, name) _result, = _result return _result def quantize_and_dequantize_v3_eager_fallback(input, input_min, input_max, num_bits, signed_input=True, range_given=True, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function quantize_and_dequantize_v3 """ _ctx = ctx if ctx else _context.context() if signed_input is None: signed_input = True signed_input = _execute.make_bool(signed_input, "signed_input") if range_given is None: range_given = True range_given = _execute.make_bool(range_given, "range_given") _attr_T, _inputs_T = _execute.args_to_matching_eager([input, input_min, input_max], _ctx) (input, input_min, input_max) = _inputs_T num_bits = _ops.convert_to_tensor(num_bits, _dtypes.int32) _inputs_flat = [input, input_min, input_max, num_bits] _attrs = ("signed_input", signed_input, "range_given", range_given, "T", _attr_T) _result = _execute.execute(b"QuantizeAndDequantizeV3", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "QuantizeAndDequantizeV3", _inputs_flat, _attrs, _result, name) _result, = _result return _result _quantize_v2_outputs = ["output", "output_min", "output_max"] _QuantizeV2Output = _collections.namedtuple( "QuantizeV2", _quantize_v2_outputs) def quantize_v2(input, min_range, max_range, T, mode="MIN_COMBINED", round_mode="HALF_AWAY_FROM_ZERO", name=None): r"""Quantize the 'input' tensor of type float to 'output' tensor of type 'T'. [min_range, max_range] are scalar floats that specify the range for the 'input' data. The 'mode' attribute controls exactly which calculations are used to convert the float values to their quantized equivalents. The 'round_mode' attribute controls which rounding tie-breaking algorithm is used when rounding float values to their quantized equivalents. In 'MIN_COMBINED' mode, each value of the tensor will undergo the following: ``` out[i] = (in[i] - min_range) * range(T) / (max_range - min_range) if T == qint8: out[i] -= (range(T) + 1) / 2.0 ``` here `range(T) = numeric_limits<T>::max() - numeric_limits<T>::min()` *MIN_COMBINED Mode Example* Assume the input is type float and has a possible range of [0.0, 6.0] and the output type is quint8 ([0, 255]). The min_range and max_range values should be specified as 0.0 and 6.0. Quantizing from float to quint8 will multiply each value of the input by 255/6 and cast to quint8. If the output type was qint8 ([-128, 127]), the operation will additionally subtract each value by 128 prior to casting, so that the range of values aligns with the range of qint8. If the mode is 'MIN_FIRST', then this approach is used: ``` num_discrete_values = 1 << (# of bits in T) range_adjust = num_discrete_values / (num_discrete_values - 1) range = (range_max - range_min) * range_adjust range_scale = num_discrete_values / range quantized = round(input * range_scale) - round(range_min * range_scale) + numeric_limits<T>::min() quantized = max(quantized, numeric_limits<T>::min()) quantized = min(quantized, numeric_limits<T>::max()) ``` The biggest difference between this and MIN_COMBINED is that the minimum range is rounded first, before it's subtracted from the rounded value. With MIN_COMBINED, a small bias is introduced where repeated iterations of quantizing and dequantizing will introduce a larger and larger error. *SCALED mode Example* `SCALED` mode matches the quantization approach used in `QuantizeAndDequantize{V2|V3}`. If the mode is `SCALED`, we do not use the full range of the output type, choosing to elide the lowest possible value for symmetry (e.g., output range is -127 to 127, not -128 to 127 for signed 8 bit quantization), so that 0.0 maps to 0. We first find the range of values in our tensor. The range we use is always centered on 0, so we find m such that ```c++ m = max(abs(input_min), abs(input_max)) ``` Our input tensor range is then `[-m, m]`. Next, we choose our fixed-point quantization buckets, `[min_fixed, max_fixed]`. If T is signed, this is ``` num_bits = sizeof(T) * 8 [min_fixed, max_fixed] = [-(1 << (num_bits - 1) - 1), (1 << (num_bits - 1)) - 1] ``` Otherwise, if T is unsigned, the fixed-point range is ``` [min_fixed, max_fixed] = [0, (1 << num_bits) - 1] ``` From this we compute our scaling factor, s: ```c++ s = (max_fixed - min_fixed) / (2 * m) ``` Now we can quantize the elements of our tensor: ```c++ result = round(input * s) ``` One thing to watch out for is that the operator may choose to adjust the requested minimum and maximum values slightly during the quantization process, so you should always use the output ports as the range for further calculations. For example, if the requested minimum and maximum values are close to equal, they will be separated by a small epsilon value to prevent ill-formed quantized buffers from being created. Otherwise, you can end up with buffers where all the quantized values map to the same float value, which causes problems for operations that have to perform further calculations on them. Args: input: A `Tensor` of type `float32`. min_range: A `Tensor` of type `float32`. The minimum scalar value possibly produced for the input. max_range: A `Tensor` of type `float32`. The maximum scalar value possibly produced for the input. T: A `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. mode: An optional `string` from: `"MIN_COMBINED", "MIN_FIRST", "SCALED"`. Defaults to `"MIN_COMBINED"`. round_mode: An optional `string` from: `"HALF_AWAY_FROM_ZERO", "HALF_TO_EVEN"`. Defaults to `"HALF_AWAY_FROM_ZERO"`. name: A name for the operation (optional). Returns: A tuple of `Tensor` objects (output, output_min, output_max). output: A `Tensor` of type `T`. output_min: A `Tensor` of type `float32`. output_max: A `Tensor` of type `float32`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "QuantizeV2", name, _ctx._post_execution_callbacks, input, min_range, max_range, "T", T, "mode", mode, "round_mode", round_mode) _result = _QuantizeV2Output._make(_result) return _result except _core._FallbackException: try: return quantize_v2_eager_fallback( input, min_range, max_range, T=T, mode=mode, round_mode=round_mode, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. T = _execute.make_type(T, "T") if mode is None: mode = "MIN_COMBINED" mode = _execute.make_str(mode, "mode") if round_mode is None: round_mode = "HALF_AWAY_FROM_ZERO" round_mode = _execute.make_str(round_mode, "round_mode") _, _, _op = _op_def_lib._apply_op_helper( "QuantizeV2", input=input, min_range=min_range, max_range=max_range, T=T, mode=mode, round_mode=round_mode, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T"), "mode", _op.get_attr("mode"), "round_mode", _op.get_attr("round_mode")) _execute.record_gradient( "QuantizeV2", _inputs_flat, _attrs, _result, name) _result = _QuantizeV2Output._make(_result) return _result def quantize_v2_eager_fallback(input, min_range, max_range, T, mode="MIN_COMBINED", round_mode="HALF_AWAY_FROM_ZERO", name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function quantize_v2 """ _ctx = ctx if ctx else _context.context() T = _execute.make_type(T, "T") if mode is None: mode = "MIN_COMBINED" mode = _execute.make_str(mode, "mode") if round_mode is None: round_mode = "HALF_AWAY_FROM_ZERO" round_mode = _execute.make_str(round_mode, "round_mode") input = _ops.convert_to_tensor(input, _dtypes.float32) min_range = _ops.convert_to_tensor(min_range, _dtypes.float32) max_range = _ops.convert_to_tensor(max_range, _dtypes.float32) _inputs_flat = [input, min_range, max_range] _attrs = ("T", T, "mode", mode, "round_mode", round_mode) _result = _execute.execute(b"QuantizeV2", 3, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "QuantizeV2", _inputs_flat, _attrs, _result, name) _result = _QuantizeV2Output._make(_result) return _result _quantized_concat_outputs = ["output", "output_min", "output_max"] _QuantizedConcatOutput = _collections.namedtuple( "QuantizedConcat", _quantized_concat_outputs) @_dispatch.add_dispatch_list @tf_export('quantization.quantized_concat', v1=['quantization.quantized_concat', 'quantized_concat']) @deprecated_endpoints('quantized_concat') def quantized_concat(concat_dim, values, input_mins, input_maxes, name=None): r"""Concatenates quantized tensors along one dimension. Args: concat_dim: A `Tensor` of type `int32`. 0-D. The dimension along which to concatenate. Must be in the range [0, rank(values)). values: A list of at least 2 `Tensor` objects with the same type. The `N` Tensors to concatenate. Their ranks and types must match, and their sizes must match in all dimensions except `concat_dim`. input_mins: A list with the same length as `values` of `Tensor` objects with type `float32`. The minimum scalar values for each of the input tensors. input_maxes: A list with the same length as `values` of `Tensor` objects with type `float32`. The maximum scalar values for each of the input tensors. name: A name for the operation (optional). Returns: A tuple of `Tensor` objects (output, output_min, output_max). output: A `Tensor`. Has the same type as `values`. output_min: A `Tensor` of type `float32`. output_max: A `Tensor` of type `float32`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "QuantizedConcat", name, _ctx._post_execution_callbacks, concat_dim, values, input_mins, input_maxes) _result = _QuantizedConcatOutput._make(_result) return _result except _core._FallbackException: try: return quantized_concat_eager_fallback( concat_dim, values, input_mins, input_maxes, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except (TypeError, ValueError): result = _dispatch.dispatch( quantized_concat, concat_dim=concat_dim, values=values, input_mins=input_mins, input_maxes=input_maxes, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. if not isinstance(values, (list, tuple)): raise TypeError( "Expected list for 'values' argument to " "'quantized_concat' Op, not %r." % values) _attr_N = len(values) if not isinstance(input_mins, (list, tuple)): raise TypeError( "Expected list for 'input_mins' argument to " "'quantized_concat' Op, not %r." % input_mins) if len(input_mins) != _attr_N: raise ValueError( "List argument 'input_mins' to 'quantized_concat' Op with length %d " "must match length %d of argument 'values'." % (len(input_mins), _attr_N)) if not isinstance(input_maxes, (list, tuple)): raise TypeError( "Expected list for 'input_maxes' argument to " "'quantized_concat' Op, not %r." % input_maxes) if len(input_maxes) != _attr_N: raise ValueError( "List argument 'input_maxes' to 'quantized_concat' Op with length %d " "must match length %d of argument 'values'." % (len(input_maxes), _attr_N)) try: _, _, _op = _op_def_lib._apply_op_helper( "QuantizedConcat", concat_dim=concat_dim, values=values, input_mins=input_mins, input_maxes=input_maxes, name=name) except (TypeError, ValueError): result = _dispatch.dispatch( quantized_concat, concat_dim=concat_dim, values=values, input_mins=input_mins, input_maxes=input_maxes, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("N", _op.get_attr("N"), "T", _op.get_attr("T")) _execute.record_gradient( "QuantizedConcat", _inputs_flat, _attrs, _result, name) _result = _QuantizedConcatOutput._make(_result) return _result def quantized_concat_eager_fallback(concat_dim, values, input_mins, input_maxes, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function quantized_concat """ _ctx = ctx if ctx else _context.context() if not isinstance(values, (list, tuple)): raise TypeError( "Expected list for 'values' argument to " "'quantized_concat' Op, not %r." % values) _attr_N = len(values) if not isinstance(input_mins, (list, tuple)): raise TypeError( "Expected list for 'input_mins' argument to " "'quantized_concat' Op, not %r." % input_mins) if len(input_mins) != _attr_N: raise ValueError( "List argument 'input_mins' to 'quantized_concat' Op with length %d " "must match length %d of argument 'values'." % (len(input_mins), _attr_N)) if not isinstance(input_maxes, (list, tuple)): raise TypeError( "Expected list for 'input_maxes' argument to " "'quantized_concat' Op, not %r." % input_maxes) if len(input_maxes) != _attr_N: raise ValueError( "List argument 'input_maxes' to 'quantized_concat' Op with length %d " "must match length %d of argument 'values'." % (len(input_maxes), _attr_N)) _attr_T, values = _execute.args_to_matching_eager(list(values), _ctx) concat_dim = _ops.convert_to_tensor(concat_dim, _dtypes.int32) input_mins = _ops.convert_n_to_tensor(input_mins, _dtypes.float32) input_maxes = _ops.convert_n_to_tensor(input_maxes, _dtypes.float32) _inputs_flat = [concat_dim] + list(values) + list(input_mins) + list(input_maxes) _attrs = ("N", _attr_N, "T", _attr_T) _result = _execute.execute(b"QuantizedConcat", 3, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "QuantizedConcat", _inputs_flat, _attrs, _result, name) _result = _QuantizedConcatOutput._make(_result) return _result _quantized_instance_norm_outputs = ["y", "y_min", "y_max"] _QuantizedInstanceNormOutput = _collections.namedtuple( "QuantizedInstanceNorm", _quantized_instance_norm_outputs) def quantized_instance_norm(x, x_min, x_max, output_range_given=False, given_y_min=0, given_y_max=0, variance_epsilon=1e-05, min_separation=0.001, name=None): r"""Quantized Instance normalization. Args: x: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`. A 4D input Tensor. x_min: A `Tensor` of type `float32`. The value represented by the lowest quantized input. x_max: A `Tensor` of type `float32`. The value represented by the highest quantized input. output_range_given: An optional `bool`. Defaults to `False`. If True, `given_y_min` and `given_y_min` and `given_y_max` are used as the output range. Otherwise, the implementation computes the output range. given_y_min: An optional `float`. Defaults to `0`. Output in `y_min` if `output_range_given` is True. given_y_max: An optional `float`. Defaults to `0`. Output in `y_max` if `output_range_given` is True. variance_epsilon: An optional `float`. Defaults to `1e-05`. A small float number to avoid dividing by 0. min_separation: An optional `float`. Defaults to `0.001`. Minimum value of `y_max - y_min` name: A name for the operation (optional). Returns: A tuple of `Tensor` objects (y, y_min, y_max). y: A `Tensor`. Has the same type as `x`. y_min: A `Tensor` of type `float32`. y_max: A `Tensor` of type `float32`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "QuantizedInstanceNorm", name, _ctx._post_execution_callbacks, x, x_min, x_max, "output_range_given", output_range_given, "given_y_min", given_y_min, "given_y_max", given_y_max, "variance_epsilon", variance_epsilon, "min_separation", min_separation) _result = _QuantizedInstanceNormOutput._make(_result) return _result except _core._FallbackException: try: return quantized_instance_norm_eager_fallback( x, x_min, x_max, output_range_given=output_range_given, given_y_min=given_y_min, given_y_max=given_y_max, variance_epsilon=variance_epsilon, min_separation=min_separation, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. if output_range_given is None: output_range_given = False output_range_given = _execute.make_bool(output_range_given, "output_range_given") if given_y_min is None: given_y_min = 0 given_y_min = _execute.make_float(given_y_min, "given_y_min") if given_y_max is None: given_y_max = 0 given_y_max = _execute.make_float(given_y_max, "given_y_max") if variance_epsilon is None: variance_epsilon = 1e-05 variance_epsilon = _execute.make_float(variance_epsilon, "variance_epsilon") if min_separation is None: min_separation = 0.001 min_separation = _execute.make_float(min_separation, "min_separation") _, _, _op = _op_def_lib._apply_op_helper( "QuantizedInstanceNorm", x=x, x_min=x_min, x_max=x_max, output_range_given=output_range_given, given_y_min=given_y_min, given_y_max=given_y_max, variance_epsilon=variance_epsilon, min_separation=min_separation, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T"), "output_range_given", _op.get_attr("output_range_given"), "given_y_min", _op.get_attr("given_y_min"), "given_y_max", _op.get_attr("given_y_max"), "variance_epsilon", _op.get_attr("variance_epsilon"), "min_separation", _op.get_attr("min_separation")) _execute.record_gradient( "QuantizedInstanceNorm", _inputs_flat, _attrs, _result, name) _result = _QuantizedInstanceNormOutput._make(_result) return _result def quantized_instance_norm_eager_fallback(x, x_min, x_max, output_range_given=False, given_y_min=0, given_y_max=0, variance_epsilon=1e-05, min_separation=0.001, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function quantized_instance_norm """ _ctx = ctx if ctx else _context.context() if output_range_given is None: output_range_given = False output_range_given = _execute.make_bool(output_range_given, "output_range_given") if given_y_min is None: given_y_min = 0 given_y_min = _execute.make_float(given_y_min, "given_y_min") if given_y_max is None: given_y_max = 0 given_y_max = _execute.make_float(given_y_max, "given_y_max") if variance_epsilon is None: variance_epsilon = 1e-05 variance_epsilon = _execute.make_float(variance_epsilon, "variance_epsilon") if min_separation is None: min_separation = 0.001 min_separation = _execute.make_float(min_separation, "min_separation") _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx) x_min = _ops.convert_to_tensor(x_min, _dtypes.float32) x_max = _ops.convert_to_tensor(x_max, _dtypes.float32) _inputs_flat = [x, x_min, x_max] _attrs = ("T", _attr_T, "output_range_given", output_range_given, "given_y_min", given_y_min, "given_y_max", given_y_max, "variance_epsilon", variance_epsilon, "min_separation", min_separation) _result = _execute.execute(b"QuantizedInstanceNorm", 3, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "QuantizedInstanceNorm", _inputs_flat, _attrs, _result, name) _result = _QuantizedInstanceNormOutput._make(_result) return _result _quantized_reshape_outputs = ["output", "output_min", "output_max"] _QuantizedReshapeOutput = _collections.namedtuple( "QuantizedReshape", _quantized_reshape_outputs) def quantized_reshape(tensor, shape, input_min, input_max, name=None): r"""Reshapes a quantized tensor as per the Reshape op. ``` Args: tensor: A `Tensor`. shape: A `Tensor`. Must be one of the following types: `int32`, `int64`. Defines the shape of the output tensor. input_min: A `Tensor` of type `float32`. The minimum value of the input. input_max: A `Tensor` of type `float32`. The maximum value of the input. name: A name for the operation (optional). Returns: A tuple of `Tensor` objects (output, output_min, output_max). output: A `Tensor`. Has the same type as `tensor`. output_min: A `Tensor` of type `float32`. output_max: A `Tensor` of type `float32`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "QuantizedReshape", name, _ctx._post_execution_callbacks, tensor, shape, input_min, input_max) _result = _QuantizedReshapeOutput._make(_result) return _result except _core._FallbackException: try: return quantized_reshape_eager_fallback( tensor, shape, input_min, input_max, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. _, _, _op = _op_def_lib._apply_op_helper( "QuantizedReshape", tensor=tensor, shape=shape, input_min=input_min, input_max=input_max, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T"), "Tshape", _op.get_attr("Tshape")) _execute.record_gradient( "QuantizedReshape", _inputs_flat, _attrs, _result, name) _result = _QuantizedReshapeOutput._make(_result) return _result def quantized_reshape_eager_fallback(tensor, shape, input_min, input_max, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function quantized_reshape """ _ctx = ctx if ctx else _context.context() _attr_T, (tensor,) = _execute.args_to_matching_eager([tensor], _ctx) _attr_Tshape, (shape,) = _execute.args_to_matching_eager([shape], _ctx, _dtypes.int32) input_min = _ops.convert_to_tensor(input_min, _dtypes.float32) input_max = _ops.convert_to_tensor(input_max, _dtypes.float32) _inputs_flat = [tensor, shape, input_min, input_max] _attrs = ("T", _attr_T, "Tshape", _attr_Tshape) _result = _execute.execute(b"QuantizedReshape", 3, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "QuantizedReshape", _inputs_flat, _attrs, _result, name) _result = _QuantizedReshapeOutput._make(_result) return _result def rank(input, name=None): r"""Returns the rank of a tensor. This operation returns an integer representing the rank of `input`. For example: ``` # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] # shape of tensor 't' is [2, 2, 3] rank(t) ==> 3 ``` **Note**: The rank of a tensor is not the same as the rank of a matrix. The rank of a tensor is the number of indices required to uniquely select each element of the tensor. Rank is also known as "order", "degree", or "ndims." Args: input: A `Tensor`. name: A name for the operation (optional). Returns: A `Tensor` of type `int32`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "Rank", name, _ctx._post_execution_callbacks, input) return _result except _core._FallbackException: try: return rank_eager_fallback( input, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. _, _, _op = _op_def_lib._apply_op_helper( "Rank", input=input, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T")) _execute.record_gradient( "Rank", _inputs_flat, _attrs, _result, name) _result, = _result return _result def rank_eager_fallback(input, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function rank """ _ctx = ctx if ctx else _context.context() _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx) _inputs_flat = [input] _attrs = ("T", _attr_T) _result = _execute.execute(b"Rank", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Rank", _inputs_flat, _attrs, _result, name) _result, = _result return _result def ref_identity(input, name=None): r"""Return the same ref tensor as the input ref tensor. Args: input: A mutable `Tensor`. name: A name for the operation (optional). Returns: A mutable `Tensor`. Has the same type as `input`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: raise RuntimeError("ref_identity op does not support eager execution. Arg 'output' is a ref.") # Add nodes to the TensorFlow graph. _, _, _op = _op_def_lib._apply_op_helper( "RefIdentity", input=input, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T")) _execute.record_gradient( "RefIdentity", _inputs_flat, _attrs, _result, name) _result, = _result return _result def ref_identity_eager_fallback(input, name=None, ctx=None): raise RuntimeError("ref_identity op does not support eager execution. Arg 'output' is a ref.") @_dispatch.add_dispatch_list @tf_export('reshape', v1=['reshape', 'manip.reshape']) @deprecated_endpoints('manip.reshape') def reshape(tensor, shape, name=None): r"""Reshapes a tensor. Given `tensor`, this operation returns a tensor that has the same values as `tensor` with shape `shape`. If one component of `shape` is the special value -1, the size of that dimension is computed so that the total size remains constant. In particular, a `shape` of `[-1]` flattens into 1-D. At most one component of `shape` can be -1. If `shape` is 1-D or higher, then the operation returns a tensor with shape `shape` filled with the values of `tensor`. In this case, the number of elements implied by `shape` must be the same as the number of elements in `tensor`. For example: ``` # tensor 't' is [1, 2, 3, 4, 5, 6, 7, 8, 9] # tensor 't' has shape [9] reshape(t, [3, 3]) ==> [[1, 2, 3], [4, 5, 6], [7, 8, 9]] # tensor 't' is [[[1, 1], [2, 2]], # [[3, 3], [4, 4]]] # tensor 't' has shape [2, 2, 2] reshape(t, [2, 4]) ==> [[1, 1, 2, 2], [3, 3, 4, 4]] # tensor 't' is [[[1, 1, 1], # [2, 2, 2]], # [[3, 3, 3], # [4, 4, 4]], # [[5, 5, 5], # [6, 6, 6]]] # tensor 't' has shape [3, 2, 3] # pass '[-1]' to flatten 't' reshape(t, [-1]) ==> [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6] # -1 can also be used to infer the shape # -1 is inferred to be 9: reshape(t, [2, -1]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3], [4, 4, 4, 5, 5, 5, 6, 6, 6]] # -1 is inferred to be 2: reshape(t, [-1, 9]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3], [4, 4, 4, 5, 5, 5, 6, 6, 6]] # -1 is inferred to be 3: reshape(t, [ 2, -1, 3]) ==> [[[1, 1, 1], [2, 2, 2], [3, 3, 3]], [[4, 4, 4], [5, 5, 5], [6, 6, 6]]] # tensor 't' is [7] # shape `[]` reshapes to a scalar reshape(t, []) ==> 7 ``` Args: tensor: A `Tensor`. shape: A `Tensor`. Must be one of the following types: `int32`, `int64`. Defines the shape of the output tensor. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `tensor`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "Reshape", name, _ctx._post_execution_callbacks, tensor, shape) return _result except _core._FallbackException: try: return reshape_eager_fallback( tensor, shape, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except (TypeError, ValueError): result = _dispatch.dispatch( reshape, tensor=tensor, shape=shape, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. try: _, _, _op = _op_def_lib._apply_op_helper( "Reshape", tensor=tensor, shape=shape, name=name) except (TypeError, ValueError): result = _dispatch.dispatch( reshape, tensor=tensor, shape=shape, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T"), "Tshape", _op.get_attr("Tshape")) _execute.record_gradient( "Reshape", _inputs_flat, _attrs, _result, name) _result, = _result return _result def reshape_eager_fallback(tensor, shape, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function reshape """ _ctx = ctx if ctx else _context.context() _attr_T, (tensor,) = _execute.args_to_matching_eager([tensor], _ctx) _attr_Tshape, (shape,) = _execute.args_to_matching_eager([shape], _ctx, _dtypes.int32) _inputs_flat = [tensor, shape] _attrs = ("T", _attr_T, "Tshape", _attr_Tshape) _result = _execute.execute(b"Reshape", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Reshape", _inputs_flat, _attrs, _result, name) _result, = _result return _result def resource_strided_slice_assign(ref, begin, end, strides, value, begin_mask=0, end_mask=0, ellipsis_mask=0, new_axis_mask=0, shrink_axis_mask=0, name=None): r"""Assign `value` to the sliced l-value reference of `ref`. The values of `value` are assigned to the positions in the variable `ref` that are selected by the slice parameters. The slice parameters `begin, `end`, `strides`, etc. work exactly as in `StridedSlice`. NOTE this op currently does not support broadcasting and so `value`'s shape must be exactly the shape produced by the slice of `ref`. Args: ref: A `Tensor` of type `resource`. begin: A `Tensor`. Must be one of the following types: `int32`, `int64`. end: A `Tensor`. Must have the same type as `begin`. strides: A `Tensor`. Must have the same type as `begin`. value: A `Tensor`. begin_mask: An optional `int`. Defaults to `0`. end_mask: An optional `int`. Defaults to `0`. ellipsis_mask: An optional `int`. Defaults to `0`. new_axis_mask: An optional `int`. Defaults to `0`. shrink_axis_mask: An optional `int`. Defaults to `0`. name: A name for the operation (optional). Returns: The created Operation. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "ResourceStridedSliceAssign", name, _ctx._post_execution_callbacks, ref, begin, end, strides, value, "begin_mask", begin_mask, "end_mask", end_mask, "ellipsis_mask", ellipsis_mask, "new_axis_mask", new_axis_mask, "shrink_axis_mask", shrink_axis_mask) return _result except _core._FallbackException: try: return resource_strided_slice_assign_eager_fallback( ref, begin, end, strides, value, begin_mask=begin_mask, end_mask=end_mask, ellipsis_mask=ellipsis_mask, new_axis_mask=new_axis_mask, shrink_axis_mask=shrink_axis_mask, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. if begin_mask is None: begin_mask = 0 begin_mask = _execute.make_int(begin_mask, "begin_mask") if end_mask is None: end_mask = 0 end_mask = _execute.make_int(end_mask, "end_mask") if ellipsis_mask is None: ellipsis_mask = 0 ellipsis_mask = _execute.make_int(ellipsis_mask, "ellipsis_mask") if new_axis_mask is None: new_axis_mask = 0 new_axis_mask = _execute.make_int(new_axis_mask, "new_axis_mask") if shrink_axis_mask is None: shrink_axis_mask = 0 shrink_axis_mask = _execute.make_int(shrink_axis_mask, "shrink_axis_mask") _, _, _op = _op_def_lib._apply_op_helper( "ResourceStridedSliceAssign", ref=ref, begin=begin, end=end, strides=strides, value=value, begin_mask=begin_mask, end_mask=end_mask, ellipsis_mask=ellipsis_mask, new_axis_mask=new_axis_mask, shrink_axis_mask=shrink_axis_mask, name=name) return _op _result = None return _result def resource_strided_slice_assign_eager_fallback(ref, begin, end, strides, value, begin_mask=0, end_mask=0, ellipsis_mask=0, new_axis_mask=0, shrink_axis_mask=0, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function resource_strided_slice_assign """ _ctx = ctx if ctx else _context.context() if begin_mask is None: begin_mask = 0 begin_mask = _execute.make_int(begin_mask, "begin_mask") if end_mask is None: end_mask = 0 end_mask = _execute.make_int(end_mask, "end_mask") if ellipsis_mask is None: ellipsis_mask = 0 ellipsis_mask = _execute.make_int(ellipsis_mask, "ellipsis_mask") if new_axis_mask is None: new_axis_mask = 0 new_axis_mask = _execute.make_int(new_axis_mask, "new_axis_mask") if shrink_axis_mask is None: shrink_axis_mask = 0 shrink_axis_mask = _execute.make_int(shrink_axis_mask, "shrink_axis_mask") _attr_T, (value,) = _execute.args_to_matching_eager([value], _ctx) _attr_Index, _inputs_Index = _execute.args_to_matching_eager([begin, end, strides], _ctx) (begin, end, strides) = _inputs_Index ref = _ops.convert_to_tensor(ref, _dtypes.resource) _inputs_flat = [ref, begin, end, strides, value] _attrs = ("T", _attr_T, "Index", _attr_Index, "begin_mask", begin_mask, "end_mask", end_mask, "ellipsis_mask", ellipsis_mask, "new_axis_mask", new_axis_mask, "shrink_axis_mask", shrink_axis_mask) _result = _execute.execute(b"ResourceStridedSliceAssign", 0, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _result = None return _result def reverse(tensor, dims, name=None): r"""Reverses specific dimensions of a tensor. Given a `tensor`, and a `bool` tensor `dims` representing the dimensions of `tensor`, this operation reverses each dimension i of `tensor` where `dims[i]` is `True`. `tensor` can have up to 8 dimensions. The number of dimensions of `tensor` must equal the number of elements in `dims`. In other words: `rank(tensor) = size(dims)` For example: ``` # tensor 't' is [[[[ 0, 1, 2, 3], # [ 4, 5, 6, 7], # [ 8, 9, 10, 11]], # [[12, 13, 14, 15], # [16, 17, 18, 19], # [20, 21, 22, 23]]]] # tensor 't' shape is [1, 2, 3, 4] # 'dims' is [False, False, False, True] reverse(t, dims) ==> [[[[ 3, 2, 1, 0], [ 7, 6, 5, 4], [ 11, 10, 9, 8]], [[15, 14, 13, 12], [19, 18, 17, 16], [23, 22, 21, 20]]]] # 'dims' is [False, True, False, False] reverse(t, dims) ==> [[[[12, 13, 14, 15], [16, 17, 18, 19], [20, 21, 22, 23] [[ 0, 1, 2, 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11]]]] # 'dims' is [False, False, True, False] reverse(t, dims) ==> [[[[8, 9, 10, 11], [4, 5, 6, 7], [0, 1, 2, 3]] [[20, 21, 22, 23], [16, 17, 18, 19], [12, 13, 14, 15]]]] ``` Args: tensor: A `Tensor`. Must be one of the following types: `uint8`, `int8`, `uint16`, `int16`, `int32`, `int64`, `bool`, `half`, `float32`, `float64`, `complex64`, `complex128`, `string`. Up to 8-D. dims: A `Tensor` of type `bool`. 1-D. The dimensions to reverse. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `tensor`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "Reverse", name, _ctx._post_execution_callbacks, tensor, dims) return _result except _core._FallbackException: try: return reverse_eager_fallback( tensor, dims, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. _, _, _op = _op_def_lib._apply_op_helper( "Reverse", tensor=tensor, dims=dims, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T")) _execute.record_gradient( "Reverse", _inputs_flat, _attrs, _result, name) _result, = _result return _result def reverse_eager_fallback(tensor, dims, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function reverse """ _ctx = ctx if ctx else _context.context() _attr_T, (tensor,) = _execute.args_to_matching_eager([tensor], _ctx) dims = _ops.convert_to_tensor(dims, _dtypes.bool) _inputs_flat = [tensor, dims] _attrs = ("T", _attr_T) _result = _execute.execute(b"Reverse", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Reverse", _inputs_flat, _attrs, _result, name) _result, = _result return _result def reverse_sequence(input, seq_lengths, seq_dim, batch_dim=0, name=None): r"""Reverses variable length slices. This op first slices `input` along the dimension `batch_dim`, and for each slice `i`, reverses the first `seq_lengths[i]` elements along the dimension `seq_dim`. The elements of `seq_lengths` must obey `seq_lengths[i] <= input.dims[seq_dim]`, and `seq_lengths` must be a vector of length `input.dims[batch_dim]`. The output slice `i` along dimension `batch_dim` is then given by input slice `i`, with the first `seq_lengths[i]` slices along dimension `seq_dim` reversed. For example: ``` # Given this: batch_dim = 0 seq_dim = 1 input.dims = (4, 8, ...) seq_lengths = [7, 2, 3, 5] # then slices of input are reversed on seq_dim, but only up to seq_lengths: output[0, 0:7, :, ...] = input[0, 7:0:-1, :, ...] output[1, 0:2, :, ...] = input[1, 2:0:-1, :, ...] output[2, 0:3, :, ...] = input[2, 3:0:-1, :, ...] output[3, 0:5, :, ...] = input[3, 5:0:-1, :, ...] # while entries past seq_lens are copied through: output[0, 7:, :, ...] = input[0, 7:, :, ...] output[1, 2:, :, ...] = input[1, 2:, :, ...] output[2, 3:, :, ...] = input[2, 3:, :, ...] output[3, 2:, :, ...] = input[3, 2:, :, ...] ``` In contrast, if: ``` # Given this: batch_dim = 2 seq_dim = 0 input.dims = (8, ?, 4, ...) seq_lengths = [7, 2, 3, 5] # then slices of input are reversed on seq_dim, but only up to seq_lengths: output[0:7, :, 0, :, ...] = input[7:0:-1, :, 0, :, ...] output[0:2, :, 1, :, ...] = input[2:0:-1, :, 1, :, ...] output[0:3, :, 2, :, ...] = input[3:0:-1, :, 2, :, ...] output[0:5, :, 3, :, ...] = input[5:0:-1, :, 3, :, ...] # while entries past seq_lens are copied through: output[7:, :, 0, :, ...] = input[7:, :, 0, :, ...] output[2:, :, 1, :, ...] = input[2:, :, 1, :, ...] output[3:, :, 2, :, ...] = input[3:, :, 2, :, ...] output[2:, :, 3, :, ...] = input[2:, :, 3, :, ...] ``` Args: input: A `Tensor`. The input to reverse. seq_lengths: A `Tensor`. Must be one of the following types: `int32`, `int64`. 1-D with length `input.dims(batch_dim)` and `max(seq_lengths) <= input.dims(seq_dim)` seq_dim: An `int`. The dimension which is partially reversed. batch_dim: An optional `int`. Defaults to `0`. The dimension along which reversal is performed. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "ReverseSequence", name, _ctx._post_execution_callbacks, input, seq_lengths, "seq_dim", seq_dim, "batch_dim", batch_dim) return _result except _core._FallbackException: try: return reverse_sequence_eager_fallback( input, seq_lengths, seq_dim=seq_dim, batch_dim=batch_dim, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. seq_dim = _execute.make_int(seq_dim, "seq_dim") if batch_dim is None: batch_dim = 0 batch_dim = _execute.make_int(batch_dim, "batch_dim") _, _, _op = _op_def_lib._apply_op_helper( "ReverseSequence", input=input, seq_lengths=seq_lengths, seq_dim=seq_dim, batch_dim=batch_dim, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("seq_dim", _op.get_attr("seq_dim"), "batch_dim", _op.get_attr("batch_dim"), "T", _op.get_attr("T"), "Tlen", _op.get_attr("Tlen")) _execute.record_gradient( "ReverseSequence", _inputs_flat, _attrs, _result, name) _result, = _result return _result def reverse_sequence_eager_fallback(input, seq_lengths, seq_dim, batch_dim=0, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function reverse_sequence """ _ctx = ctx if ctx else _context.context() seq_dim = _execute.make_int(seq_dim, "seq_dim") if batch_dim is None: batch_dim = 0 batch_dim = _execute.make_int(batch_dim, "batch_dim") _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx) _attr_Tlen, (seq_lengths,) = _execute.args_to_matching_eager([seq_lengths], _ctx, _dtypes.int64) _inputs_flat = [input, seq_lengths] _attrs = ("seq_dim", seq_dim, "batch_dim", batch_dim, "T", _attr_T, "Tlen", _attr_Tlen) _result = _execute.execute(b"ReverseSequence", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "ReverseSequence", _inputs_flat, _attrs, _result, name) _result, = _result return _result @_dispatch.add_dispatch_list @tf_export('reverse', v1=['reverse', 'manip.reverse', 'reverse_v2']) @deprecated_endpoints('manip.reverse', 'reverse_v2') def reverse_v2(tensor, axis, name=None): r"""Reverses specific dimensions of a tensor. NOTE `tf.reverse` has now changed behavior in preparation for 1.0. `tf.reverse_v2` is currently an alias that will be deprecated before TF 1.0. Given a `tensor`, and a `int32` tensor `axis` representing the set of dimensions of `tensor` to reverse. This operation reverses each dimension `i` for which there exists `j` s.t. `axis[j] == i`. `tensor` can have up to 8 dimensions. The number of dimensions specified in `axis` may be 0 or more entries. If an index is specified more than once, a InvalidArgument error is raised. For example: ``` # tensor 't' is [[[[ 0, 1, 2, 3], # [ 4, 5, 6, 7], # [ 8, 9, 10, 11]], # [[12, 13, 14, 15], # [16, 17, 18, 19], # [20, 21, 22, 23]]]] # tensor 't' shape is [1, 2, 3, 4] # 'dims' is [3] or 'dims' is [-1] reverse(t, dims) ==> [[[[ 3, 2, 1, 0], [ 7, 6, 5, 4], [ 11, 10, 9, 8]], [[15, 14, 13, 12], [19, 18, 17, 16], [23, 22, 21, 20]]]] # 'dims' is '[1]' (or 'dims' is '[-3]') reverse(t, dims) ==> [[[[12, 13, 14, 15], [16, 17, 18, 19], [20, 21, 22, 23] [[ 0, 1, 2, 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11]]]] # 'dims' is '[2]' (or 'dims' is '[-2]') reverse(t, dims) ==> [[[[8, 9, 10, 11], [4, 5, 6, 7], [0, 1, 2, 3]] [[20, 21, 22, 23], [16, 17, 18, 19], [12, 13, 14, 15]]]] ``` Args: tensor: A `Tensor`. Must be one of the following types: `uint8`, `int8`, `uint16`, `int16`, `int32`, `int64`, `bool`, `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`, `string`. Up to 8-D. axis: A `Tensor`. Must be one of the following types: `int32`, `int64`. 1-D. The indices of the dimensions to reverse. Must be in the range `[-rank(tensor), rank(tensor))`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `tensor`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "ReverseV2", name, _ctx._post_execution_callbacks, tensor, axis) return _result except _core._FallbackException: try: return reverse_v2_eager_fallback( tensor, axis, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except (TypeError, ValueError): result = _dispatch.dispatch( reverse_v2, tensor=tensor, axis=axis, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. try: _, _, _op = _op_def_lib._apply_op_helper( "ReverseV2", tensor=tensor, axis=axis, name=name) except (TypeError, ValueError): result = _dispatch.dispatch( reverse_v2, tensor=tensor, axis=axis, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("Tidx", _op.get_attr("Tidx"), "T", _op.get_attr("T")) _execute.record_gradient( "ReverseV2", _inputs_flat, _attrs, _result, name) _result, = _result return _result def reverse_v2_eager_fallback(tensor, axis, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function reverse_v2 """ _ctx = ctx if ctx else _context.context() _attr_Tidx, (axis,) = _execute.args_to_matching_eager([axis], _ctx, _dtypes.int32) _attr_T, (tensor,) = _execute.args_to_matching_eager([tensor], _ctx) _inputs_flat = [tensor, axis] _attrs = ("Tidx", _attr_Tidx, "T", _attr_T) _result = _execute.execute(b"ReverseV2", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "ReverseV2", _inputs_flat, _attrs, _result, name) _result, = _result return _result @_dispatch.add_dispatch_list @tf_export('scatter_nd', v1=['scatter_nd', 'manip.scatter_nd']) @deprecated_endpoints('manip.scatter_nd') def scatter_nd(indices, updates, shape, name=None): r"""Scatter `updates` into a new tensor according to `indices`. Creates a new tensor by applying sparse `updates` to individual values or slices within a tensor (initially zero for numeric, empty for string) of the given `shape` according to indices. This operator is the inverse of the `tf.gather_nd` operator which extracts values or slices from a given tensor. This operation is similar to tensor_scatter_add, except that the tensor is zero-initialized. Calling `tf.scatter_nd(indices, values, shape)` is identical to `tensor_scatter_add(tf.zeros(shape, values.dtype), indices, values)` If `indices` contains duplicates, then their updates are accumulated (summed). **WARNING**: The order in which updates are applied is nondeterministic, so the output will be nondeterministic if `indices` contains duplicates -- because of some numerical approximation issues, numbers summed in different order may yield different results. `indices` is an integer tensor containing indices into a new tensor of shape `shape`. The last dimension of `indices` can be at most the rank of `shape`: indices.shape[-1] <= shape.rank The last dimension of `indices` corresponds to indices into elements (if `indices.shape[-1] = shape.rank`) or slices (if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of `shape`. `updates` is a tensor with shape indices.shape[:-1] + shape[indices.shape[-1]:] The simplest form of scatter is to insert individual elements in a tensor by index. For example, say we want to insert 4 scattered elements in a rank-1 tensor with 8 elements. <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"> <img style="width:100%" src="https://www.tensorflow.org/images/ScatterNd1.png" alt> </div> In Python, this scatter operation would look like this: ```python indices = tf.constant([[4], [3], [1], [7]]) updates = tf.constant([9, 10, 11, 12]) shape = tf.constant([8]) scatter = tf.scatter_nd(indices, updates, shape) with tf.Session() as sess: print(sess.run(scatter)) ``` The resulting tensor would look like this: [0, 11, 0, 10, 9, 0, 0, 12] We can also, insert entire slices of a higher rank tensor all at once. For example, if we wanted to insert two slices in the first dimension of a rank-3 tensor with two matrices of new values. <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"> <img style="width:100%" src="https://www.tensorflow.org/images/ScatterNd2.png" alt> </div> In Python, this scatter operation would look like this: ```python indices = tf.constant([[0], [2]]) updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]]]) shape = tf.constant([4, 4, 4]) scatter = tf.scatter_nd(indices, updates, shape) with tf.Session() as sess: print(sess.run(scatter)) ``` The resulting tensor would look like this: [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]] Note that on CPU, if an out of bound index is found, an error is returned. On GPU, if an out of bound index is found, the index is ignored. Args: indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. Index tensor. updates: A `Tensor`. Updates to scatter into output. shape: A `Tensor`. Must have the same type as `indices`. 1-D. The shape of the resulting tensor. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `updates`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "ScatterNd", name, _ctx._post_execution_callbacks, indices, updates, shape) return _result except _core._FallbackException: try: return scatter_nd_eager_fallback( indices, updates, shape, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except (TypeError, ValueError): result = _dispatch.dispatch( scatter_nd, indices=indices, updates=updates, shape=shape, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. try: _, _, _op = _op_def_lib._apply_op_helper( "ScatterNd", indices=indices, updates=updates, shape=shape, name=name) except (TypeError, ValueError): result = _dispatch.dispatch( scatter_nd, indices=indices, updates=updates, shape=shape, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T"), "Tindices", _op.get_attr("Tindices")) _execute.record_gradient( "ScatterNd", _inputs_flat, _attrs, _result, name) _result, = _result return _result def scatter_nd_eager_fallback(indices, updates, shape, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function scatter_nd """ _ctx = ctx if ctx else _context.context() _attr_T, (updates,) = _execute.args_to_matching_eager([updates], _ctx) _attr_Tindices, _inputs_Tindices = _execute.args_to_matching_eager([indices, shape], _ctx) (indices, shape) = _inputs_Tindices _inputs_flat = [indices, updates, shape] _attrs = ("T", _attr_T, "Tindices", _attr_Tindices) _result = _execute.execute(b"ScatterNd", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "ScatterNd", _inputs_flat, _attrs, _result, name) _result, = _result return _result def scatter_nd_non_aliasing_add(input, indices, updates, name=None): r"""Applies sparse addition to `input` using individual values or slices from `updates` according to indices `indices`. The updates are non-aliasing: `input` is only modified in-place if no other operations will use it. Otherwise, a copy of `input` is made. This operation has a gradient with respect to both `input` and `updates`. `input` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. `indices` must be integer tensor, containing indices into `input`. It must be shape \\([d_0, ..., d_{Q-2}, K]\\) where `0 < K <= P`. The innermost dimension of `indices` (with length `K`) corresponds to indices into elements (if `K = P`) or `(P-K)`-dimensional slices (if `K < P`) along the `K`th dimension of `input`. `updates` is `Tensor` of rank `Q-1+P-K` with shape: $$[d_0, ..., d_{Q-2}, input.shape[K], ..., input.shape[P-1]].$$ For example, say we want to add 4 scattered elements to a rank-1 tensor to 8 elements. In Python, that addition would look like this: input = tf.constant([1, 2, 3, 4, 5, 6, 7, 8]) indices = tf.constant([[4], [3], [1], [7]]) updates = tf.constant([9, 10, 11, 12]) output = tf.scatter_nd_non_aliasing_add(input, indices, updates) with tf.Session() as sess: print(sess.run(output)) The resulting value `output` would look like this: [1, 13, 3, 14, 14, 6, 7, 20] See `tf.scatter_nd` for more details about how to make updates to slices. Args: input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`, `bool`. A Tensor. indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. A Tensor. Must be one of the following types: `int32`, `int64`. A tensor of indices into `input`. updates: A `Tensor`. Must have the same type as `input`. A Tensor. Must have the same type as ref. A tensor of updated values to add to `input`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "ScatterNdNonAliasingAdd", name, _ctx._post_execution_callbacks, input, indices, updates) return _result except _core._FallbackException: try: return scatter_nd_non_aliasing_add_eager_fallback( input, indices, updates, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. _, _, _op = _op_def_lib._apply_op_helper( "ScatterNdNonAliasingAdd", input=input, indices=indices, updates=updates, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T"), "Tindices", _op.get_attr("Tindices")) _execute.record_gradient( "ScatterNdNonAliasingAdd", _inputs_flat, _attrs, _result, name) _result, = _result return _result def scatter_nd_non_aliasing_add_eager_fallback(input, indices, updates, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function scatter_nd_non_aliasing_add """ _ctx = ctx if ctx else _context.context() _attr_T, _inputs_T = _execute.args_to_matching_eager([input, updates], _ctx) (input, updates) = _inputs_T _attr_Tindices, (indices,) = _execute.args_to_matching_eager([indices], _ctx) _inputs_flat = [input, indices, updates] _attrs = ("T", _attr_T, "Tindices", _attr_Tindices) _result = _execute.execute(b"ScatterNdNonAliasingAdd", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "ScatterNdNonAliasingAdd", _inputs_flat, _attrs, _result, name) _result, = _result return _result def shape(input, out_type=_dtypes.int32, name=None): r"""Returns the shape of a tensor. This operation returns a 1-D integer tensor representing the shape of `input`. For example: ``` # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] shape(t) ==> [2, 2, 3] ``` Args: input: A `Tensor`. out_type: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to `tf.int32`. name: A name for the operation (optional). Returns: A `Tensor` of type `out_type`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "Shape", name, _ctx._post_execution_callbacks, input, "out_type", out_type) return _result except _core._FallbackException: try: return shape_eager_fallback( input, out_type=out_type, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. if out_type is None: out_type = _dtypes.int32 out_type = _execute.make_type(out_type, "out_type") _, _, _op = _op_def_lib._apply_op_helper( "Shape", input=input, out_type=out_type, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T"), "out_type", _op.get_attr("out_type")) _execute.record_gradient( "Shape", _inputs_flat, _attrs, _result, name) _result, = _result return _result def shape_eager_fallback(input, out_type=_dtypes.int32, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function shape """ _ctx = ctx if ctx else _context.context() if out_type is None: out_type = _dtypes.int32 out_type = _execute.make_type(out_type, "out_type") _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx) _inputs_flat = [input] _attrs = ("T", _attr_T, "out_type", out_type) _result = _execute.execute(b"Shape", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Shape", _inputs_flat, _attrs, _result, name) _result, = _result return _result def shape_n(input, out_type=_dtypes.int32, name=None): r"""Returns shape of tensors. This operation returns N 1-D integer tensors representing shape of `input[i]s`. Args: input: A list of at least 1 `Tensor` objects with the same type. out_type: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to `tf.int32`. name: A name for the operation (optional). Returns: A list with the same length as `input` of `Tensor` objects with type `out_type`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "ShapeN", name, _ctx._post_execution_callbacks, input, "out_type", out_type) return _result except _core._FallbackException: try: return shape_n_eager_fallback( input, out_type=out_type, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. if not isinstance(input, (list, tuple)): raise TypeError( "Expected list for 'input' argument to " "'shape_n' Op, not %r." % input) _attr_N = len(input) if out_type is None: out_type = _dtypes.int32 out_type = _execute.make_type(out_type, "out_type") _, _, _op = _op_def_lib._apply_op_helper( "ShapeN", input=input, out_type=out_type, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("N", _op.get_attr("N"), "T", _op.get_attr("T"), "out_type", _op.get_attr("out_type")) _execute.record_gradient( "ShapeN", _inputs_flat, _attrs, _result, name) return _result def shape_n_eager_fallback(input, out_type=_dtypes.int32, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function shape_n """ _ctx = ctx if ctx else _context.context() if not isinstance(input, (list, tuple)): raise TypeError( "Expected list for 'input' argument to " "'shape_n' Op, not %r." % input) _attr_N = len(input) if out_type is None: out_type = _dtypes.int32 out_type = _execute.make_type(out_type, "out_type") _attr_T, input = _execute.args_to_matching_eager(list(input), _ctx) _inputs_flat = list(input) _attrs = ("N", _attr_N, "T", _attr_T, "out_type", out_type) _result = _execute.execute(b"ShapeN", _attr_N, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "ShapeN", _inputs_flat, _attrs, _result, name) return _result def size(input, out_type=_dtypes.int32, name=None): r"""Returns the size of a tensor. This operation returns an integer representing the number of elements in `input`. For example: ``` # 't' is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]] size(t) ==> 12 ``` Args: input: A `Tensor`. out_type: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to `tf.int32`. name: A name for the operation (optional). Returns: A `Tensor` of type `out_type`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "Size", name, _ctx._post_execution_callbacks, input, "out_type", out_type) return _result except _core._FallbackException: try: return size_eager_fallback( input, out_type=out_type, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. if out_type is None: out_type = _dtypes.int32 out_type = _execute.make_type(out_type, "out_type") _, _, _op = _op_def_lib._apply_op_helper( "Size", input=input, out_type=out_type, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T"), "out_type", _op.get_attr("out_type")) _execute.record_gradient( "Size", _inputs_flat, _attrs, _result, name) _result, = _result return _result def size_eager_fallback(input, out_type=_dtypes.int32, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function size """ _ctx = ctx if ctx else _context.context() if out_type is None: out_type = _dtypes.int32 out_type = _execute.make_type(out_type, "out_type") _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx) _inputs_flat = [input] _attrs = ("T", _attr_T, "out_type", out_type) _result = _execute.execute(b"Size", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Size", _inputs_flat, _attrs, _result, name) _result, = _result return _result def _slice(input, begin, size, name=None): r"""Return a slice from 'input'. The output tensor is a tensor with dimensions described by 'size' whose values are extracted from 'input' starting at the offsets in 'begin'. *Requirements*: 0 <= begin[i] <= begin[i] + size[i] <= Di for i in [0, n) Args: input: A `Tensor`. begin: A `Tensor`. Must be one of the following types: `int32`, `int64`. begin[i] specifies the offset into the 'i'th dimension of 'input' to slice from. size: A `Tensor`. Must have the same type as `begin`. size[i] specifies the number of elements of the 'i'th dimension of 'input' to slice. If size[i] is -1, all remaining elements in dimension i are included in the slice (i.e. this is equivalent to setting size[i] = input.dim_size(i) - begin[i]). name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "Slice", name, _ctx._post_execution_callbacks, input, begin, size) return _result except _core._FallbackException: try: return _slice_eager_fallback( input, begin, size, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. _, _, _op = _op_def_lib._apply_op_helper( "Slice", input=input, begin=begin, size=size, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T"), "Index", _op.get_attr("Index")) _execute.record_gradient( "Slice", _inputs_flat, _attrs, _result, name) _result, = _result return _result def _slice_eager_fallback(input, begin, size, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function _slice """ _ctx = ctx if ctx else _context.context() _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx) _attr_Index, _inputs_Index = _execute.args_to_matching_eager([begin, size], _ctx) (begin, size) = _inputs_Index _inputs_flat = [input, begin, size] _attrs = ("T", _attr_T, "Index", _attr_Index) _result = _execute.execute(b"Slice", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Slice", _inputs_flat, _attrs, _result, name) _result, = _result return _result def snapshot(input, name=None): r"""Returns a copy of the input tensor. Args: input: A `Tensor`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "Snapshot", name, _ctx._post_execution_callbacks, input) return _result except _core._FallbackException: try: return snapshot_eager_fallback( input, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. _, _, _op = _op_def_lib._apply_op_helper( "Snapshot", input=input, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T")) _execute.record_gradient( "Snapshot", _inputs_flat, _attrs, _result, name) _result, = _result return _result def snapshot_eager_fallback(input, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function snapshot """ _ctx = ctx if ctx else _context.context() _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx) _inputs_flat = [input] _attrs = ("T", _attr_T) _result = _execute.execute(b"Snapshot", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Snapshot", _inputs_flat, _attrs, _result, name) _result, = _result return _result def space_to_batch(input, paddings, block_size, name=None): r"""SpaceToBatch for 4-D tensors of type T. This is a legacy version of the more general SpaceToBatchND. Zero-pads and then rearranges (permutes) blocks of spatial data into batch. More specifically, this op outputs a copy of the input tensor where values from the `height` and `width` dimensions are moved to the `batch` dimension. After the zero-padding, both `height` and `width` of the input must be divisible by the block size. Args: input: A `Tensor`. 4-D with shape `[batch, height, width, depth]`. paddings: A `Tensor`. Must be one of the following types: `int32`, `int64`. 2-D tensor of non-negative integers with shape `[2, 2]`. It specifies the padding of the input with zeros across the spatial dimensions as follows: paddings = [[pad_top, pad_bottom], [pad_left, pad_right]] The effective spatial dimensions of the zero-padded input tensor will be: height_pad = pad_top + height + pad_bottom width_pad = pad_left + width + pad_right The attr `block_size` must be greater than one. It indicates the block size. * Non-overlapping blocks of size `block_size x block size` in the height and width dimensions are rearranged into the batch dimension at each location. * The batch of the output tensor is `batch * block_size * block_size`. * Both height_pad and width_pad must be divisible by block_size. The shape of the output will be: [batch*block_size*block_size, height_pad/block_size, width_pad/block_size, depth] Some examples: (1) For the following input of shape `[1, 2, 2, 1]` and block_size of 2: ``` x = [[[[1], [2]], [[3], [4]]]] ``` The output tensor has shape `[4, 1, 1, 1]` and value: ``` [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] ``` (2) For the following input of shape `[1, 2, 2, 3]` and block_size of 2: ``` x = [[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]] ``` The output tensor has shape `[4, 1, 1, 3]` and value: ``` [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]] ``` (3) For the following input of shape `[1, 4, 4, 1]` and block_size of 2: ``` x = [[[[1], [2], [3], [4]], [[5], [6], [7], [8]], [[9], [10], [11], [12]], [[13], [14], [15], [16]]]] ``` The output tensor has shape `[4, 2, 2, 1]` and value: ``` x = [[[[1], [3]], [[9], [11]]], [[[2], [4]], [[10], [12]]], [[[5], [7]], [[13], [15]]], [[[6], [8]], [[14], [16]]]] ``` (4) For the following input of shape `[2, 2, 4, 1]` and block_size of 2: ``` x = [[[[1], [2], [3], [4]], [[5], [6], [7], [8]]], [[[9], [10], [11], [12]], [[13], [14], [15], [16]]]] ``` The output tensor has shape `[8, 1, 2, 1]` and value: ``` x = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]], [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]] ``` Among others, this operation is useful for reducing atrous convolution into regular convolution. block_size: An `int` that is `>= 2`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "SpaceToBatch", name, _ctx._post_execution_callbacks, input, paddings, "block_size", block_size) return _result except _core._FallbackException: try: return space_to_batch_eager_fallback( input, paddings, block_size=block_size, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. block_size = _execute.make_int(block_size, "block_size") _, _, _op = _op_def_lib._apply_op_helper( "SpaceToBatch", input=input, paddings=paddings, block_size=block_size, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T"), "Tpaddings", _op.get_attr("Tpaddings"), "block_size", _op.get_attr("block_size")) _execute.record_gradient( "SpaceToBatch", _inputs_flat, _attrs, _result, name) _result, = _result return _result def space_to_batch_eager_fallback(input, paddings, block_size, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function space_to_batch """ _ctx = ctx if ctx else _context.context() block_size = _execute.make_int(block_size, "block_size") _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx) _attr_Tpaddings, (paddings,) = _execute.args_to_matching_eager([paddings], _ctx, _dtypes.int32) _inputs_flat = [input, paddings] _attrs = ("T", _attr_T, "Tpaddings", _attr_Tpaddings, "block_size", block_size) _result = _execute.execute(b"SpaceToBatch", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "SpaceToBatch", _inputs_flat, _attrs, _result, name) _result, = _result return _result @_dispatch.add_dispatch_list @tf_export('space_to_batch_nd', v1=['space_to_batch_nd', 'manip.space_to_batch_nd']) @deprecated_endpoints('manip.space_to_batch_nd') def space_to_batch_nd(input, block_shape, paddings, name=None): r"""SpaceToBatch for N-D tensors of type T. This operation divides "spatial" dimensions `[1, ..., M]` of the input into a grid of blocks of shape `block_shape`, and interleaves these blocks with the "batch" dimension (0) such that in the output, the spatial dimensions `[1, ..., M]` correspond to the position within the grid, and the batch dimension combines both the position within a spatial block and the original batch position. Prior to division into blocks, the spatial dimensions of the input are optionally zero padded according to `paddings`. See below for a precise description. Args: input: A `Tensor`. N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`, where spatial_shape has `M` dimensions. block_shape: A `Tensor`. Must be one of the following types: `int32`, `int64`. 1-D with shape `[M]`, all values must be >= 1. paddings: A `Tensor`. Must be one of the following types: `int32`, `int64`. 2-D with shape `[M, 2]`, all values must be >= 0. `paddings[i] = [pad_start, pad_end]` specifies the padding for input dimension `i + 1`, which corresponds to spatial dimension `i`. It is required that `block_shape[i]` divides `input_shape[i + 1] + pad_start + pad_end`. This operation is equivalent to the following steps: 1. Zero-pad the start and end of dimensions `[1, ..., M]` of the input according to `paddings` to produce `padded` of shape `padded_shape`. 2. Reshape `padded` to `reshaped_padded` of shape: [batch] + [padded_shape[1] / block_shape[0], block_shape[0], ..., padded_shape[M] / block_shape[M-1], block_shape[M-1]] + remaining_shape 3. Permute dimensions of `reshaped_padded` to produce `permuted_reshaped_padded` of shape: block_shape + [batch] + [padded_shape[1] / block_shape[0], ..., padded_shape[M] / block_shape[M-1]] + remaining_shape 4. Reshape `permuted_reshaped_padded` to flatten `block_shape` into the batch dimension, producing an output tensor of shape: [batch * prod(block_shape)] + [padded_shape[1] / block_shape[0], ..., padded_shape[M] / block_shape[M-1]] + remaining_shape Some examples: (1) For the following input of shape `[1, 2, 2, 1]`, `block_shape = [2, 2]`, and `paddings = [[0, 0], [0, 0]]`: ``` x = [[[[1], [2]], [[3], [4]]]] ``` The output tensor has shape `[4, 1, 1, 1]` and value: ``` [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] ``` (2) For the following input of shape `[1, 2, 2, 3]`, `block_shape = [2, 2]`, and `paddings = [[0, 0], [0, 0]]`: ``` x = [[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]] ``` The output tensor has shape `[4, 1, 1, 3]` and value: ``` [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]] ``` (3) For the following input of shape `[1, 4, 4, 1]`, `block_shape = [2, 2]`, and `paddings = [[0, 0], [0, 0]]`: ``` x = [[[[1], [2], [3], [4]], [[5], [6], [7], [8]], [[9], [10], [11], [12]], [[13], [14], [15], [16]]]] ``` The output tensor has shape `[4, 2, 2, 1]` and value: ``` x = [[[[1], [3]], [[9], [11]]], [[[2], [4]], [[10], [12]]], [[[5], [7]], [[13], [15]]], [[[6], [8]], [[14], [16]]]] ``` (4) For the following input of shape `[2, 2, 4, 1]`, block_shape = `[2, 2]`, and paddings = `[[0, 0], [2, 0]]`: ``` x = [[[[1], [2], [3], [4]], [[5], [6], [7], [8]]], [[[9], [10], [11], [12]], [[13], [14], [15], [16]]]] ``` The output tensor has shape `[8, 1, 3, 1]` and value: ``` x = [[[[0], [1], [3]]], [[[0], [9], [11]]], [[[0], [2], [4]]], [[[0], [10], [12]]], [[[0], [5], [7]]], [[[0], [13], [15]]], [[[0], [6], [8]]], [[[0], [14], [16]]]] ``` Among others, this operation is useful for reducing atrous convolution into regular convolution. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "SpaceToBatchND", name, _ctx._post_execution_callbacks, input, block_shape, paddings) return _result except _core._FallbackException: try: return space_to_batch_nd_eager_fallback( input, block_shape, paddings, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except (TypeError, ValueError): result = _dispatch.dispatch( space_to_batch_nd, input=input, block_shape=block_shape, paddings=paddings, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. try: _, _, _op = _op_def_lib._apply_op_helper( "SpaceToBatchND", input=input, block_shape=block_shape, paddings=paddings, name=name) except (TypeError, ValueError): result = _dispatch.dispatch( space_to_batch_nd, input=input, block_shape=block_shape, paddings=paddings, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T"), "Tblock_shape", _op.get_attr("Tblock_shape"), "Tpaddings", _op.get_attr("Tpaddings")) _execute.record_gradient( "SpaceToBatchND", _inputs_flat, _attrs, _result, name) _result, = _result return _result def space_to_batch_nd_eager_fallback(input, block_shape, paddings, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function space_to_batch_nd """ _ctx = ctx if ctx else _context.context() _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx) _attr_Tblock_shape, (block_shape,) = _execute.args_to_matching_eager([block_shape], _ctx, _dtypes.int32) _attr_Tpaddings, (paddings,) = _execute.args_to_matching_eager([paddings], _ctx, _dtypes.int32) _inputs_flat = [input, block_shape, paddings] _attrs = ("T", _attr_T, "Tblock_shape", _attr_Tblock_shape, "Tpaddings", _attr_Tpaddings) _result = _execute.execute(b"SpaceToBatchND", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "SpaceToBatchND", _inputs_flat, _attrs, _result, name) _result, = _result return _result def space_to_depth(input, block_size, data_format="NHWC", name=None): r"""SpaceToDepth for tensors of type T. Rearranges blocks of spatial data, into depth. More specifically, this op outputs a copy of the input tensor where values from the `height` and `width` dimensions are moved to the `depth` dimension. The attr `block_size` indicates the input block size. * Non-overlapping blocks of size `block_size x block size` are rearranged into depth at each location. * The depth of the output tensor is `block_size * block_size * input_depth`. * The Y, X coordinates within each block of the input become the high order component of the output channel index. * The input tensor's height and width must be divisible by block_size. The `data_format` attr specifies the layout of the input and output tensors with the following options: "NHWC": `[ batch, height, width, channels ]` "NCHW": `[ batch, channels, height, width ]` "NCHW_VECT_C": `qint8 [ batch, channels / 4, height, width, 4 ]` It is useful to consider the operation as transforming a 6-D Tensor. e.g. for data_format = NHWC, Each element in the input tensor can be specified via 6 coordinates, ordered by decreasing memory layout significance as: n,oY,bY,oX,bX,iC (where n=batch index, oX, oY means X or Y coordinates within the output image, bX, bY means coordinates within the input block, iC means input channels). The output would be a transpose to the following layout: n,oY,oX,bY,bX,iC This operation is useful for resizing the activations between convolutions (but keeping all data), e.g. instead of pooling. It is also useful for training purely convolutional models. For example, given an input of shape `[1, 2, 2, 1]`, data_format = "NHWC" and block_size = 2: ``` x = [[[[1], [2]], [[3], [4]]]] ``` This operation will output a tensor of shape `[1, 1, 1, 4]`: ``` [[[[1, 2, 3, 4]]]] ``` Here, the input has a batch of 1 and each batch element has shape `[2, 2, 1]`, the corresponding output will have a single element (i.e. width and height are both 1) and will have a depth of 4 channels (1 * block_size * block_size). The output element shape is `[1, 1, 4]`. For an input tensor with larger depth, here of shape `[1, 2, 2, 3]`, e.g. ``` x = [[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]] ``` This operation, for block_size of 2, will return the following tensor of shape `[1, 1, 1, 12]` ``` [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]] ``` Similarly, for the following input of shape `[1 4 4 1]`, and a block size of 2: ``` x = [[[[1], [2], [5], [6]], [[3], [4], [7], [8]], [[9], [10], [13], [14]], [[11], [12], [15], [16]]]] ``` the operator will return the following tensor of shape `[1 2 2 4]`: ``` x = [[[[1, 2, 3, 4], [5, 6, 7, 8]], [[9, 10, 11, 12], [13, 14, 15, 16]]]] ``` Args: input: A `Tensor`. block_size: An `int` that is `>= 2`. The size of the spatial block. data_format: An optional `string` from: `"NHWC", "NCHW", "NCHW_VECT_C"`. Defaults to `"NHWC"`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "SpaceToDepth", name, _ctx._post_execution_callbacks, input, "block_size", block_size, "data_format", data_format) return _result except _core._FallbackException: try: return space_to_depth_eager_fallback( input, block_size=block_size, data_format=data_format, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. block_size = _execute.make_int(block_size, "block_size") if data_format is None: data_format = "NHWC" data_format = _execute.make_str(data_format, "data_format") _, _, _op = _op_def_lib._apply_op_helper( "SpaceToDepth", input=input, block_size=block_size, data_format=data_format, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T"), "block_size", _op.get_attr("block_size"), "data_format", _op.get_attr("data_format")) _execute.record_gradient( "SpaceToDepth", _inputs_flat, _attrs, _result, name) _result, = _result return _result def space_to_depth_eager_fallback(input, block_size, data_format="NHWC", name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function space_to_depth """ _ctx = ctx if ctx else _context.context() block_size = _execute.make_int(block_size, "block_size") if data_format is None: data_format = "NHWC" data_format = _execute.make_str(data_format, "data_format") _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx) _inputs_flat = [input] _attrs = ("T", _attr_T, "block_size", block_size, "data_format", data_format) _result = _execute.execute(b"SpaceToDepth", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "SpaceToDepth", _inputs_flat, _attrs, _result, name) _result, = _result return _result def split(axis, value, num_split, name=None): r"""Splits a tensor into `num_split` tensors along one dimension. Args: axis: A `Tensor` of type `int32`. 0-D. The dimension along which to split. Must be in the range `[-rank(value), rank(value))`. value: A `Tensor`. The tensor to split. num_split: An `int` that is `>= 1`. The number of ways to split. Must evenly divide `value.shape[split_dim]`. name: A name for the operation (optional). Returns: A list of `num_split` `Tensor` objects with the same type as `value`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "Split", name, _ctx._post_execution_callbacks, axis, value, "num_split", num_split) return _result except _core._FallbackException: try: return split_eager_fallback( axis, value, num_split=num_split, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. num_split = _execute.make_int(num_split, "num_split") _, _, _op = _op_def_lib._apply_op_helper( "Split", split_dim=axis, value=value, num_split=num_split, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("num_split", _op.get_attr("num_split"), "T", _op.get_attr("T")) _execute.record_gradient( "Split", _inputs_flat, _attrs, _result, name) return _result def split_eager_fallback(axis, value, num_split, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function split """ _ctx = ctx if ctx else _context.context() num_split = _execute.make_int(num_split, "num_split") _attr_T, (value,) = _execute.args_to_matching_eager([value], _ctx) axis = _ops.convert_to_tensor(axis, _dtypes.int32) _inputs_flat = [axis, value] _attrs = ("num_split", num_split, "T", _attr_T) _result = _execute.execute(b"Split", num_split, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Split", _inputs_flat, _attrs, _result, name) return _result def split_v(value, size_splits, axis, num_split, name=None): r"""Splits a tensor into `num_split` tensors along one dimension. Args: value: A `Tensor`. The tensor to split. size_splits: A `Tensor`. Must be one of the following types: `int32`, `int64`. list containing the sizes of each output tensor along the split dimension. Must sum to the dimension of value along split_dim. Can contain one -1 indicating that dimension is to be inferred. axis: A `Tensor` of type `int32`. 0-D. The dimension along which to split. Must be in the range `[-rank(value), rank(value))`. num_split: An `int` that is `>= 1`. name: A name for the operation (optional). Returns: A list of `num_split` `Tensor` objects with the same type as `value`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "SplitV", name, _ctx._post_execution_callbacks, value, size_splits, axis, "num_split", num_split) return _result except _core._FallbackException: try: return split_v_eager_fallback( value, size_splits, axis, num_split=num_split, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. num_split = _execute.make_int(num_split, "num_split") _, _, _op = _op_def_lib._apply_op_helper( "SplitV", value=value, size_splits=size_splits, split_dim=axis, num_split=num_split, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("num_split", _op.get_attr("num_split"), "T", _op.get_attr("T"), "Tlen", _op.get_attr("Tlen")) _execute.record_gradient( "SplitV", _inputs_flat, _attrs, _result, name) return _result def split_v_eager_fallback(value, size_splits, axis, num_split, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function split_v """ _ctx = ctx if ctx else _context.context() num_split = _execute.make_int(num_split, "num_split") _attr_T, (value,) = _execute.args_to_matching_eager([value], _ctx) _attr_Tlen, (size_splits,) = _execute.args_to_matching_eager([size_splits], _ctx, _dtypes.int64) axis = _ops.convert_to_tensor(axis, _dtypes.int32) _inputs_flat = [value, size_splits, axis] _attrs = ("num_split", num_split, "T", _attr_T, "Tlen", _attr_Tlen) _result = _execute.execute(b"SplitV", num_split, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "SplitV", _inputs_flat, _attrs, _result, name) return _result def squeeze(input, axis=[], name=None): r"""Removes dimensions of size 1 from the shape of a tensor. Given a tensor `input`, this operation returns a tensor of the same type with all dimensions of size 1 removed. If you don't want to remove all size 1 dimensions, you can remove specific size 1 dimensions by specifying `axis`. For example: ``` # 't' is a tensor of shape [1, 2, 1, 3, 1, 1] shape(squeeze(t)) ==> [2, 3] ``` Or, to remove specific size 1 dimensions: ``` # 't' is a tensor of shape [1, 2, 1, 3, 1, 1] shape(squeeze(t, [2, 4])) ==> [1, 2, 3, 1] ``` Args: input: A `Tensor`. The `input` to squeeze. axis: An optional list of `ints`. Defaults to `[]`. If specified, only squeezes the dimensions listed. The dimension index starts at 0. It is an error to squeeze a dimension that is not 1. Must be in the range `[-rank(input), rank(input))`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "Squeeze", name, _ctx._post_execution_callbacks, input, "squeeze_dims", axis) return _result except _core._FallbackException: try: return squeeze_eager_fallback( input, axis=axis, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. if axis is None: axis = [] if not isinstance(axis, (list, tuple)): raise TypeError( "Expected list for 'axis' argument to " "'squeeze' Op, not %r." % axis) axis = [_execute.make_int(_i, "axis") for _i in axis] _, _, _op = _op_def_lib._apply_op_helper( "Squeeze", input=input, squeeze_dims=axis, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T"), "squeeze_dims", _op.get_attr("squeeze_dims")) _execute.record_gradient( "Squeeze", _inputs_flat, _attrs, _result, name) _result, = _result return _result def squeeze_eager_fallback(input, axis=[], name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function squeeze """ _ctx = ctx if ctx else _context.context() if axis is None: axis = [] if not isinstance(axis, (list, tuple)): raise TypeError( "Expected list for 'axis' argument to " "'squeeze' Op, not %r." % axis) axis = [_execute.make_int(_i, "axis") for _i in axis] _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx) _inputs_flat = [input] _attrs = ("T", _attr_T, "squeeze_dims", axis) _result = _execute.execute(b"Squeeze", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Squeeze", _inputs_flat, _attrs, _result, name) _result, = _result return _result @_dispatch.add_dispatch_list @tf_export('stop_gradient') def stop_gradient(input, name=None): r"""Stops gradient computation. When executed in a graph, this op outputs its input tensor as-is. When building ops to compute gradients, this op prevents the contribution of its inputs to be taken into account. Normally, the gradient generator adds ops to a graph to compute the derivatives of a specified 'loss' by recursively finding out inputs that contributed to its computation. If you insert this op in the graph it inputs are masked from the gradient generator. They are not taken into account for computing gradients. This is useful any time you want to compute a value with TensorFlow but need to pretend that the value was a constant. Some examples include: * The *EM* algorithm where the *M-step* should not involve backpropagation through the output of the *E-step*. * Contrastive divergence training of Boltzmann machines where, when differentiating the energy function, the training must not backpropagate through the graph that generated the samples from the model. * Adversarial training, where no backprop should happen through the adversarial example generation process. Args: input: A `Tensor`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "StopGradient", name, _ctx._post_execution_callbacks, input) return _result except _core._FallbackException: try: return stop_gradient_eager_fallback( input, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except (TypeError, ValueError): result = _dispatch.dispatch( stop_gradient, input=input, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. try: _, _, _op = _op_def_lib._apply_op_helper( "StopGradient", input=input, name=name) except (TypeError, ValueError): result = _dispatch.dispatch( stop_gradient, input=input, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T")) _execute.record_gradient( "StopGradient", _inputs_flat, _attrs, _result, name) _result, = _result return _result def stop_gradient_eager_fallback(input, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function stop_gradient """ _ctx = ctx if ctx else _context.context() _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx) _inputs_flat = [input] _attrs = ("T", _attr_T) _result = _execute.execute(b"StopGradient", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "StopGradient", _inputs_flat, _attrs, _result, name) _result, = _result return _result def strided_slice(input, begin, end, strides, begin_mask=0, end_mask=0, ellipsis_mask=0, new_axis_mask=0, shrink_axis_mask=0, name=None): r"""Return a strided slice from `input`. Note, most python users will want to use the Python `Tensor.__getitem__` or `Variable.__getitem__` rather than this op directly. The goal of this op is to produce a new tensor with a subset of the elements from the `n` dimensional `input` tensor. The subset is chosen using a sequence of `m` sparse range specifications encoded into the arguments of this function. Note, in some cases `m` could be equal to `n`, but this need not be the case. Each range specification entry can be one of the following: - An ellipsis (...). Ellipses are used to imply zero or more dimensions of full-dimension selection and are produced using `ellipsis_mask`. For example, `foo[...]` is the identity slice. - A new axis. This is used to insert a new shape=1 dimension and is produced using `new_axis_mask`. For example, `foo[:, ...]` where `foo` is shape `(3, 4)` produces a `(1, 3, 4)` tensor. - A range `begin:end:stride`. This is used to specify how much to choose from a given dimension. `stride` can be any integer but 0. `begin` is an integer which represents the index of the first value to select while `end` represents the index of the last value to select. The number of values selected in each dimension is `end - begin` if `stride > 0` and `begin - end` if `stride < 0`. `begin` and `end` can be negative where `-1` is the last element, `-2` is the second to last. `begin_mask` controls whether to replace the explicitly given `begin` with an implicit effective value of `0` if `stride > 0` and `-1` if `stride < 0`. `end_mask` is analogous but produces the number required to create the largest open interval. For example, given a shape `(3,)` tensor `foo[:]`, the effective `begin` and `end` are `0` and `3`. Do not assume this is equivalent to `foo[0:-1]` which has an effective `begin` and `end` of `0` and `2`. Another example is `foo[-2::-1]` which reverses the first dimension of a tensor while dropping the last two (in the original order elements). For example `foo = [1,2,3,4]; foo[-2::-1]` is `[4,3]`. - A single index. This is used to keep only elements that have a given index. For example (`foo[2, :]` on a shape `(5,6)` tensor produces a shape `(6,)` tensor. This is encoded in `begin` and `end` and `shrink_axis_mask`. Each conceptual range specification is encoded in the op's argument. This encoding is best understand by considering a non-trivial example. In particular, `foo[1, 2:4, None, ..., :-3:-1, :]` will be encoded as ``` begin = [1, 2, x, x, 0, x] # x denotes don't care (usually 0) end = [2, 4, x, x, -3, x] strides = [1, 1, x, x, -1, 1] begin_mask = 1<<4 | 1 << 5 = 48 end_mask = 1<<5 = 32 ellipsis_mask = 1<<3 = 8 new_axis_mask = 1<<2 4 shrink_axis_mask = 1<<0 ``` In this case if `foo.shape` is (5, 5, 5, 5, 5, 5) the final shape of the slice becomes (2, 1, 5, 5, 2, 5). Let us walk step by step through each argument specification. 1. The first argument in the example slice is turned into `begin = 1` and `end = begin + 1 = 2`. To disambiguate from the original spec `2:4` we also set the appropriate bit in `shrink_axis_mask`. 2. `2:4` is contributes 2, 4, 1 to begin, end, and stride. All masks have zero bits contributed. 3. None is a synonym for `tf.newaxis`. This means insert a dimension of size 1 dimension in the final shape. Dummy values are contributed to begin, end and stride, while the new_axis_mask bit is set. 4. `...` grab the full ranges from as many dimensions as needed to fully specify a slice for every dimension of the input shape. 5. `:-3:-1` shows the use of negative indices. A negative index `i` associated with a dimension that has shape `s` is converted to a positive index `s + i`. So `-1` becomes `s-1` (i.e. the last element). This conversion is done internally so begin, end and strides receive x, -3, and -1. The appropriate begin_mask bit is set to indicate the start range is the full range (ignoring the x). 6. `:` indicates that the entire contents of the corresponding dimension is selected. This is equivalent to `::` or `0::1`. begin, end, and strides receive 0, 0, and 1, respectively. The appropriate bits in `begin_mask` and `end_mask` are also set. *Requirements*: `0 != strides[i] for i in [0, m)` `ellipsis_mask must be a power of two (only one ellipsis)` Args: input: A `Tensor`. begin: A `Tensor`. Must be one of the following types: `int32`, `int64`. `begin[k]` specifies the offset into the `k`th range specification. The exact dimension this corresponds to will be determined by context. Out-of-bounds values will be silently clamped. If the `k`th bit of `begin_mask` then `begin[k]` is ignored and the full range of the appropriate dimension is used instead. Negative values causes indexing to start from the highest element e.g. If `foo==[1,2,3]` then `foo[-1]==3`. end: A `Tensor`. Must have the same type as `begin`. `end[i]` is like `begin` with the exception that `end_mask` is used to determine full ranges. strides: A `Tensor`. Must have the same type as `begin`. `strides[i]` specifies the increment in the `i`th specification after extracting a given element. Negative indices will reverse the original order. Out or range values are clamped to `[0,dim[i]) if slice[i]>0` or `[-1,dim[i]-1] if slice[i] < 0` begin_mask: An optional `int`. Defaults to `0`. a bitmask where a bit i being 1 means to ignore the begin value and instead use the largest interval possible. At runtime begin[i] will be replaced with `[0, n-1)` if `stride[i] > 0` or `[-1, n-1]` if `stride[i] < 0` end_mask: An optional `int`. Defaults to `0`. analogous to `begin_mask` ellipsis_mask: An optional `int`. Defaults to `0`. a bitmask where bit `i` being 1 means the `i`th position is actually an ellipsis. One bit at most can be 1. If `ellipsis_mask == 0`, then an implicit ellipsis mask of `1 << (m+1)` is provided. This means that `foo[3:5] == foo[3:5, ...]`. An ellipsis implicitly creates as many range specifications as necessary to fully specify the sliced range for every dimension. For example for a 4-dimensional tensor `foo` the slice `foo[2, ..., 5:8]` implies `foo[2, :, :, 5:8]`. new_axis_mask: An optional `int`. Defaults to `0`. a bitmask where bit `i` being 1 means the `i`th specification creates a new shape 1 dimension. For example `foo[:4, tf.newaxis, :2]` would produce a shape `(4, 1, 2)` tensor. shrink_axis_mask: An optional `int`. Defaults to `0`. a bitmask where bit `i` implies that the `i`th specification should shrink the dimensionality. begin and end must imply a slice of size 1 in the dimension. For example in python one might do `foo[:, 3, :]` which would result in `shrink_axis_mask` being 2. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "StridedSlice", name, _ctx._post_execution_callbacks, input, begin, end, strides, "begin_mask", begin_mask, "end_mask", end_mask, "ellipsis_mask", ellipsis_mask, "new_axis_mask", new_axis_mask, "shrink_axis_mask", shrink_axis_mask) return _result except _core._FallbackException: try: return strided_slice_eager_fallback( input, begin, end, strides, begin_mask=begin_mask, end_mask=end_mask, ellipsis_mask=ellipsis_mask, new_axis_mask=new_axis_mask, shrink_axis_mask=shrink_axis_mask, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. if begin_mask is None: begin_mask = 0 begin_mask = _execute.make_int(begin_mask, "begin_mask") if end_mask is None: end_mask = 0 end_mask = _execute.make_int(end_mask, "end_mask") if ellipsis_mask is None: ellipsis_mask = 0 ellipsis_mask = _execute.make_int(ellipsis_mask, "ellipsis_mask") if new_axis_mask is None: new_axis_mask = 0 new_axis_mask = _execute.make_int(new_axis_mask, "new_axis_mask") if shrink_axis_mask is None: shrink_axis_mask = 0 shrink_axis_mask = _execute.make_int(shrink_axis_mask, "shrink_axis_mask") _, _, _op = _op_def_lib._apply_op_helper( "StridedSlice", input=input, begin=begin, end=end, strides=strides, begin_mask=begin_mask, end_mask=end_mask, ellipsis_mask=ellipsis_mask, new_axis_mask=new_axis_mask, shrink_axis_mask=shrink_axis_mask, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T"), "Index", _op.get_attr("Index"), "begin_mask", _op.get_attr("begin_mask"), "end_mask", _op.get_attr("end_mask"), "ellipsis_mask", _op.get_attr("ellipsis_mask"), "new_axis_mask", _op.get_attr("new_axis_mask"), "shrink_axis_mask", _op.get_attr("shrink_axis_mask")) _execute.record_gradient( "StridedSlice", _inputs_flat, _attrs, _result, name) _result, = _result return _result def strided_slice_eager_fallback(input, begin, end, strides, begin_mask=0, end_mask=0, ellipsis_mask=0, new_axis_mask=0, shrink_axis_mask=0, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function strided_slice """ _ctx = ctx if ctx else _context.context() if begin_mask is None: begin_mask = 0 begin_mask = _execute.make_int(begin_mask, "begin_mask") if end_mask is None: end_mask = 0 end_mask = _execute.make_int(end_mask, "end_mask") if ellipsis_mask is None: ellipsis_mask = 0 ellipsis_mask = _execute.make_int(ellipsis_mask, "ellipsis_mask") if new_axis_mask is None: new_axis_mask = 0 new_axis_mask = _execute.make_int(new_axis_mask, "new_axis_mask") if shrink_axis_mask is None: shrink_axis_mask = 0 shrink_axis_mask = _execute.make_int(shrink_axis_mask, "shrink_axis_mask") _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx) _attr_Index, _inputs_Index = _execute.args_to_matching_eager([begin, end, strides], _ctx) (begin, end, strides) = _inputs_Index _inputs_flat = [input, begin, end, strides] _attrs = ("T", _attr_T, "Index", _attr_Index, "begin_mask", begin_mask, "end_mask", end_mask, "ellipsis_mask", ellipsis_mask, "new_axis_mask", new_axis_mask, "shrink_axis_mask", shrink_axis_mask) _result = _execute.execute(b"StridedSlice", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "StridedSlice", _inputs_flat, _attrs, _result, name) _result, = _result return _result def strided_slice_assign(ref, begin, end, strides, value, begin_mask=0, end_mask=0, ellipsis_mask=0, new_axis_mask=0, shrink_axis_mask=0, name=None): r"""Assign `value` to the sliced l-value reference of `ref`. The values of `value` are assigned to the positions in the variable `ref` that are selected by the slice parameters. The slice parameters `begin, `end`, `strides`, etc. work exactly as in `StridedSlice`. NOTE this op currently does not support broadcasting and so `value`'s shape must be exactly the shape produced by the slice of `ref`. Args: ref: A mutable `Tensor`. begin: A `Tensor`. Must be one of the following types: `int32`, `int64`. end: A `Tensor`. Must have the same type as `begin`. strides: A `Tensor`. Must have the same type as `begin`. value: A `Tensor`. Must have the same type as `ref`. begin_mask: An optional `int`. Defaults to `0`. end_mask: An optional `int`. Defaults to `0`. ellipsis_mask: An optional `int`. Defaults to `0`. new_axis_mask: An optional `int`. Defaults to `0`. shrink_axis_mask: An optional `int`. Defaults to `0`. name: A name for the operation (optional). Returns: A mutable `Tensor`. Has the same type as `ref`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: raise RuntimeError("strided_slice_assign op does not support eager execution. Arg 'output_ref' is a ref.") # Add nodes to the TensorFlow graph. if begin_mask is None: begin_mask = 0 begin_mask = _execute.make_int(begin_mask, "begin_mask") if end_mask is None: end_mask = 0 end_mask = _execute.make_int(end_mask, "end_mask") if ellipsis_mask is None: ellipsis_mask = 0 ellipsis_mask = _execute.make_int(ellipsis_mask, "ellipsis_mask") if new_axis_mask is None: new_axis_mask = 0 new_axis_mask = _execute.make_int(new_axis_mask, "new_axis_mask") if shrink_axis_mask is None: shrink_axis_mask = 0 shrink_axis_mask = _execute.make_int(shrink_axis_mask, "shrink_axis_mask") _, _, _op = _op_def_lib._apply_op_helper( "StridedSliceAssign", ref=ref, begin=begin, end=end, strides=strides, value=value, begin_mask=begin_mask, end_mask=end_mask, ellipsis_mask=ellipsis_mask, new_axis_mask=new_axis_mask, shrink_axis_mask=shrink_axis_mask, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T"), "Index", _op.get_attr("Index"), "begin_mask", _op.get_attr("begin_mask"), "end_mask", _op.get_attr("end_mask"), "ellipsis_mask", _op.get_attr("ellipsis_mask"), "new_axis_mask", _op.get_attr("new_axis_mask"), "shrink_axis_mask", _op.get_attr("shrink_axis_mask")) _execute.record_gradient( "StridedSliceAssign", _inputs_flat, _attrs, _result, name) _result, = _result return _result def strided_slice_assign_eager_fallback(ref, begin, end, strides, value, begin_mask=0, end_mask=0, ellipsis_mask=0, new_axis_mask=0, shrink_axis_mask=0, name=None, ctx=None): raise RuntimeError("strided_slice_assign op does not support eager execution. Arg 'output_ref' is a ref.") def strided_slice_grad(shape, begin, end, strides, dy, begin_mask=0, end_mask=0, ellipsis_mask=0, new_axis_mask=0, shrink_axis_mask=0, name=None): r"""Returns the gradient of `StridedSlice`. Since `StridedSlice` cuts out pieces of its `input` which is size `shape`, its gradient will have the same shape (which is passed here as `shape`). The gradient will be zero in any element that the slice does not select. Arguments are the same as StridedSliceGrad with the exception that `dy` is the input gradient to be propagated and `shape` is the shape of `StridedSlice`'s `input`. Args: shape: A `Tensor`. Must be one of the following types: `int32`, `int64`. begin: A `Tensor`. Must have the same type as `shape`. end: A `Tensor`. Must have the same type as `shape`. strides: A `Tensor`. Must have the same type as `shape`. dy: A `Tensor`. begin_mask: An optional `int`. Defaults to `0`. end_mask: An optional `int`. Defaults to `0`. ellipsis_mask: An optional `int`. Defaults to `0`. new_axis_mask: An optional `int`. Defaults to `0`. shrink_axis_mask: An optional `int`. Defaults to `0`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `dy`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "StridedSliceGrad", name, _ctx._post_execution_callbacks, shape, begin, end, strides, dy, "begin_mask", begin_mask, "end_mask", end_mask, "ellipsis_mask", ellipsis_mask, "new_axis_mask", new_axis_mask, "shrink_axis_mask", shrink_axis_mask) return _result except _core._FallbackException: try: return strided_slice_grad_eager_fallback( shape, begin, end, strides, dy, begin_mask=begin_mask, end_mask=end_mask, ellipsis_mask=ellipsis_mask, new_axis_mask=new_axis_mask, shrink_axis_mask=shrink_axis_mask, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. if begin_mask is None: begin_mask = 0 begin_mask = _execute.make_int(begin_mask, "begin_mask") if end_mask is None: end_mask = 0 end_mask = _execute.make_int(end_mask, "end_mask") if ellipsis_mask is None: ellipsis_mask = 0 ellipsis_mask = _execute.make_int(ellipsis_mask, "ellipsis_mask") if new_axis_mask is None: new_axis_mask = 0 new_axis_mask = _execute.make_int(new_axis_mask, "new_axis_mask") if shrink_axis_mask is None: shrink_axis_mask = 0 shrink_axis_mask = _execute.make_int(shrink_axis_mask, "shrink_axis_mask") _, _, _op = _op_def_lib._apply_op_helper( "StridedSliceGrad", shape=shape, begin=begin, end=end, strides=strides, dy=dy, begin_mask=begin_mask, end_mask=end_mask, ellipsis_mask=ellipsis_mask, new_axis_mask=new_axis_mask, shrink_axis_mask=shrink_axis_mask, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T"), "Index", _op.get_attr("Index"), "begin_mask", _op.get_attr("begin_mask"), "end_mask", _op.get_attr("end_mask"), "ellipsis_mask", _op.get_attr("ellipsis_mask"), "new_axis_mask", _op.get_attr("new_axis_mask"), "shrink_axis_mask", _op.get_attr("shrink_axis_mask")) _execute.record_gradient( "StridedSliceGrad", _inputs_flat, _attrs, _result, name) _result, = _result return _result def strided_slice_grad_eager_fallback(shape, begin, end, strides, dy, begin_mask=0, end_mask=0, ellipsis_mask=0, new_axis_mask=0, shrink_axis_mask=0, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function strided_slice_grad """ _ctx = ctx if ctx else _context.context() if begin_mask is None: begin_mask = 0 begin_mask = _execute.make_int(begin_mask, "begin_mask") if end_mask is None: end_mask = 0 end_mask = _execute.make_int(end_mask, "end_mask") if ellipsis_mask is None: ellipsis_mask = 0 ellipsis_mask = _execute.make_int(ellipsis_mask, "ellipsis_mask") if new_axis_mask is None: new_axis_mask = 0 new_axis_mask = _execute.make_int(new_axis_mask, "new_axis_mask") if shrink_axis_mask is None: shrink_axis_mask = 0 shrink_axis_mask = _execute.make_int(shrink_axis_mask, "shrink_axis_mask") _attr_T, (dy,) = _execute.args_to_matching_eager([dy], _ctx) _attr_Index, _inputs_Index = _execute.args_to_matching_eager([shape, begin, end, strides], _ctx) (shape, begin, end, strides) = _inputs_Index _inputs_flat = [shape, begin, end, strides, dy] _attrs = ("T", _attr_T, "Index", _attr_Index, "begin_mask", begin_mask, "end_mask", end_mask, "ellipsis_mask", ellipsis_mask, "new_axis_mask", new_axis_mask, "shrink_axis_mask", shrink_axis_mask) _result = _execute.execute(b"StridedSliceGrad", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "StridedSliceGrad", _inputs_flat, _attrs, _result, name) _result, = _result return _result @_dispatch.add_dispatch_list @tf_export('tensor_scatter_add') def tensor_scatter_add(tensor, indices, updates, name=None): r"""Adds sparse `updates` to an existing tensor according to `indices`. This operation creates a new tensor by adding sparse `updates` to the passed in `tensor`. This operation is very similar to `tf.scatter_nd_add`, except that the updates are added onto an existing tensor (as opposed to a variable). If the memory for the existing tensor cannot be re-used, a copy is made and updated. `indices` is an integer tensor containing indices into a new tensor of shape `shape`. The last dimension of `indices` can be at most the rank of `shape`: indices.shape[-1] <= shape.rank The last dimension of `indices` corresponds to indices into elements (if `indices.shape[-1] = shape.rank`) or slices (if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of `shape`. `updates` is a tensor with shape indices.shape[:-1] + shape[indices.shape[-1]:] The simplest form of tensor_scatter_add is to add individual elements to a tensor by index. For example, say we want to add 4 elements in a rank-1 tensor with 8 elements. In Python, this scatter add operation would look like this: ```python indices = tf.constant([[4], [3], [1], [7]]) updates = tf.constant([9, 10, 11, 12]) tensor = tf.ones([8], dtype=tf.int32) updated = tf.tensor_scatter_add(tensor, indices, updates) with tf.Session() as sess: print(sess.run(scatter)) ``` The resulting tensor would look like this: [1, 12, 1, 11, 10, 1, 1, 13] We can also, insert entire slices of a higher rank tensor all at once. For example, if we wanted to insert two slices in the first dimension of a rank-3 tensor with two matrices of new values. In Python, this scatter add operation would look like this: ```python indices = tf.constant([[0], [2]]) updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]]]) tensor = tf.ones([4, 4, 4]) updated = tf.tensor_scatter_add(tensor, indices, updates) with tf.Session() as sess: print(sess.run(scatter)) ``` The resulting tensor would look like this: [[[6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8], [9, 9, 9, 9]], [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]], [[6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8], [9, 9, 9, 9]], [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]] Note that on CPU, if an out of bound index is found, an error is returned. On GPU, if an out of bound index is found, the index is ignored. Args: tensor: A `Tensor`. Tensor to copy/update. indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. Index tensor. updates: A `Tensor`. Must have the same type as `tensor`. Updates to scatter into output. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `tensor`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "TensorScatterAdd", name, _ctx._post_execution_callbacks, tensor, indices, updates) return _result except _core._FallbackException: try: return tensor_scatter_add_eager_fallback( tensor, indices, updates, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except (TypeError, ValueError): result = _dispatch.dispatch( tensor_scatter_add, tensor=tensor, indices=indices, updates=updates, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. try: _, _, _op = _op_def_lib._apply_op_helper( "TensorScatterAdd", tensor=tensor, indices=indices, updates=updates, name=name) except (TypeError, ValueError): result = _dispatch.dispatch( tensor_scatter_add, tensor=tensor, indices=indices, updates=updates, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T"), "Tindices", _op.get_attr("Tindices")) _execute.record_gradient( "TensorScatterAdd", _inputs_flat, _attrs, _result, name) _result, = _result return _result def tensor_scatter_add_eager_fallback(tensor, indices, updates, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function tensor_scatter_add """ _ctx = ctx if ctx else _context.context() _attr_T, _inputs_T = _execute.args_to_matching_eager([tensor, updates], _ctx) (tensor, updates) = _inputs_T _attr_Tindices, (indices,) = _execute.args_to_matching_eager([indices], _ctx) _inputs_flat = [tensor, indices, updates] _attrs = ("T", _attr_T, "Tindices", _attr_Tindices) _result = _execute.execute(b"TensorScatterAdd", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "TensorScatterAdd", _inputs_flat, _attrs, _result, name) _result, = _result return _result @_dispatch.add_dispatch_list @tf_export('tensor_scatter_sub') def tensor_scatter_sub(tensor, indices, updates, name=None): r"""Subtracts sparse `updates` from an existing tensor according to `indices`. This operation creates a new tensor by subtracting sparse `updates` from the passed in `tensor`. This operation is very similar to `tf.scatter_nd_sub`, except that the updates are subtracted from an existing tensor (as opposed to a variable). If the memory for the existing tensor cannot be re-used, a copy is made and updated. `indices` is an integer tensor containing indices into a new tensor of shape `shape`. The last dimension of `indices` can be at most the rank of `shape`: indices.shape[-1] <= shape.rank The last dimension of `indices` corresponds to indices into elements (if `indices.shape[-1] = shape.rank`) or slices (if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of `shape`. `updates` is a tensor with shape indices.shape[:-1] + shape[indices.shape[-1]:] The simplest form of tensor_scatter_sub is to subtract individual elements from a tensor by index. For example, say we want to insert 4 scattered elements in a rank-1 tensor with 8 elements. In Python, this scatter subtract operation would look like this: ```python indices = tf.constant([[4], [3], [1], [7]]) updates = tf.constant([9, 10, 11, 12]) tensor = tf.ones([8], dtype=tf.int32) updated = tf.tensor_scatter_sub(tensor, indices, updates) with tf.Session() as sess: print(sess.run(scatter)) ``` The resulting tensor would look like this: [1, -10, 1, -9, -8, 1, 1, -11] We can also, insert entire slices of a higher rank tensor all at once. For example, if we wanted to insert two slices in the first dimension of a rank-3 tensor with two matrices of new values. In Python, this scatter add operation would look like this: ```python indices = tf.constant([[0], [2]]) updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]]]) tensor = tf.ones([4, 4, 4]) updated = tf.tensor_scatter_sub(tensor, indices, updates) with tf.Session() as sess: print(sess.run(scatter)) ``` The resulting tensor would look like this: [[[-4, -4, -4, -4], [-5, -5, -5, -5], [-6, -6, -6, -6], [-7, -7, -7, -7]], [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]], [[-4, -4, -4, -4], [-5, -5, -5, -5], [-6, -6, -6, -6], [-7, -7, -7, -7]], [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]] Note that on CPU, if an out of bound index is found, an error is returned. On GPU, if an out of bound index is found, the index is ignored. Args: tensor: A `Tensor`. Tensor to copy/update. indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. Index tensor. updates: A `Tensor`. Must have the same type as `tensor`. Updates to scatter into output. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `tensor`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "TensorScatterSub", name, _ctx._post_execution_callbacks, tensor, indices, updates) return _result except _core._FallbackException: try: return tensor_scatter_sub_eager_fallback( tensor, indices, updates, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except (TypeError, ValueError): result = _dispatch.dispatch( tensor_scatter_sub, tensor=tensor, indices=indices, updates=updates, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. try: _, _, _op = _op_def_lib._apply_op_helper( "TensorScatterSub", tensor=tensor, indices=indices, updates=updates, name=name) except (TypeError, ValueError): result = _dispatch.dispatch( tensor_scatter_sub, tensor=tensor, indices=indices, updates=updates, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T"), "Tindices", _op.get_attr("Tindices")) _execute.record_gradient( "TensorScatterSub", _inputs_flat, _attrs, _result, name) _result, = _result return _result def tensor_scatter_sub_eager_fallback(tensor, indices, updates, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function tensor_scatter_sub """ _ctx = ctx if ctx else _context.context() _attr_T, _inputs_T = _execute.args_to_matching_eager([tensor, updates], _ctx) (tensor, updates) = _inputs_T _attr_Tindices, (indices,) = _execute.args_to_matching_eager([indices], _ctx) _inputs_flat = [tensor, indices, updates] _attrs = ("T", _attr_T, "Tindices", _attr_Tindices) _result = _execute.execute(b"TensorScatterSub", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "TensorScatterSub", _inputs_flat, _attrs, _result, name) _result, = _result return _result @_dispatch.add_dispatch_list @tf_export('tensor_scatter_update') def tensor_scatter_update(tensor, indices, updates, name=None): r"""Scatter `updates` into an existing tensor according to `indices`. This operation creates a new tensor by applying sparse `updates` to the passed in `tensor`. This operation is very similar to `tf.scatter_nd`, except that the updates are scattered onto an existing tensor (as opposed to a zero-tensor). If the memory for the existing tensor cannot be re-used, a copy is made and updated. If `indices` contains duplicates, then their updates are accumulated (summed). **WARNING**: The order in which updates are applied is nondeterministic, so the output will be nondeterministic if `indices` contains duplicates -- because of some numerical approximation issues, numbers summed in different order may yield different results. `indices` is an integer tensor containing indices into a new tensor of shape `shape`. The last dimension of `indices` can be at most the rank of `shape`: indices.shape[-1] <= shape.rank The last dimension of `indices` corresponds to indices into elements (if `indices.shape[-1] = shape.rank`) or slices (if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of `shape`. `updates` is a tensor with shape indices.shape[:-1] + shape[indices.shape[-1]:] The simplest form of scatter is to insert individual elements in a tensor by index. For example, say we want to insert 4 scattered elements in a rank-1 tensor with 8 elements. <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"> <img style="width:100%" src="https://www.tensorflow.org/images/ScatterNd1.png" alt> </div> In Python, this scatter operation would look like this: ```python indices = tf.constant([[4], [3], [1], [7]]) updates = tf.constant([9, 10, 11, 12]) tensor = tf.ones([8], dtype=tf.int32) updated = tf.tensor_scatter_update(tensor, indices, updates) with tf.Session() as sess: print(sess.run(scatter)) ``` The resulting tensor would look like this: [1, 11, 1, 10, 9, 1, 1, 12] We can also, insert entire slices of a higher rank tensor all at once. For example, if we wanted to insert two slices in the first dimension of a rank-3 tensor with two matrices of new values. In Python, this scatter operation would look like this: ```python indices = tf.constant([[0], [2]]) updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]]]) tensor = tf.ones([4, 4, 4]) updated = tf.tensor_scatter_update(tensor, indices, updates) with tf.Session() as sess: print(sess.run(scatter)) ``` The resulting tensor would look like this: [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]], [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]] Note that on CPU, if an out of bound index is found, an error is returned. On GPU, if an out of bound index is found, the index is ignored. Args: tensor: A `Tensor`. Tensor to copy/update. indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. Index tensor. updates: A `Tensor`. Must have the same type as `tensor`. Updates to scatter into output. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `tensor`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "TensorScatterUpdate", name, _ctx._post_execution_callbacks, tensor, indices, updates) return _result except _core._FallbackException: try: return tensor_scatter_update_eager_fallback( tensor, indices, updates, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except (TypeError, ValueError): result = _dispatch.dispatch( tensor_scatter_update, tensor=tensor, indices=indices, updates=updates, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. try: _, _, _op = _op_def_lib._apply_op_helper( "TensorScatterUpdate", tensor=tensor, indices=indices, updates=updates, name=name) except (TypeError, ValueError): result = _dispatch.dispatch( tensor_scatter_update, tensor=tensor, indices=indices, updates=updates, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T"), "Tindices", _op.get_attr("Tindices")) _execute.record_gradient( "TensorScatterUpdate", _inputs_flat, _attrs, _result, name) _result, = _result return _result def tensor_scatter_update_eager_fallback(tensor, indices, updates, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function tensor_scatter_update """ _ctx = ctx if ctx else _context.context() _attr_T, _inputs_T = _execute.args_to_matching_eager([tensor, updates], _ctx) (tensor, updates) = _inputs_T _attr_Tindices, (indices,) = _execute.args_to_matching_eager([indices], _ctx) _inputs_flat = [tensor, indices, updates] _attrs = ("T", _attr_T, "Tindices", _attr_Tindices) _result = _execute.execute(b"TensorScatterUpdate", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "TensorScatterUpdate", _inputs_flat, _attrs, _result, name) _result, = _result return _result @_dispatch.add_dispatch_list @tf_export('tile', v1=['tile', 'manip.tile']) @deprecated_endpoints('manip.tile') def tile(input, multiples, name=None): r"""Constructs a tensor by tiling a given tensor. This operation creates a new tensor by replicating `input` `multiples` times. The output tensor's i'th dimension has `input.dims(i) * multiples[i]` elements, and the values of `input` are replicated `multiples[i]` times along the 'i'th dimension. For example, tiling `[a b c d]` by `[2]` produces `[a b c d a b c d]`. Args: input: A `Tensor`. 1-D or higher. multiples: A `Tensor`. Must be one of the following types: `int32`, `int64`. 1-D. Length must be the same as the number of dimensions in `input` name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "Tile", name, _ctx._post_execution_callbacks, input, multiples) return _result except _core._FallbackException: try: return tile_eager_fallback( input, multiples, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except (TypeError, ValueError): result = _dispatch.dispatch( tile, input=input, multiples=multiples, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. try: _, _, _op = _op_def_lib._apply_op_helper( "Tile", input=input, multiples=multiples, name=name) except (TypeError, ValueError): result = _dispatch.dispatch( tile, input=input, multiples=multiples, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T"), "Tmultiples", _op.get_attr("Tmultiples")) _execute.record_gradient( "Tile", _inputs_flat, _attrs, _result, name) _result, = _result return _result def tile_eager_fallback(input, multiples, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function tile """ _ctx = ctx if ctx else _context.context() _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx) _attr_Tmultiples, (multiples,) = _execute.args_to_matching_eager([multiples], _ctx, _dtypes.int32) _inputs_flat = [input, multiples] _attrs = ("T", _attr_T, "Tmultiples", _attr_Tmultiples) _result = _execute.execute(b"Tile", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Tile", _inputs_flat, _attrs, _result, name) _result, = _result return _result def tile_grad(input, multiples, name=None): r"""Returns the gradient of `Tile`. Since `Tile` takes an input and repeats the input `multiples` times along each dimension, `TileGrad` takes in `multiples` and aggregates each repeated tile of `input` into `output`. Args: input: A `Tensor`. multiples: A `Tensor` of type `int32`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "TileGrad", name, _ctx._post_execution_callbacks, input, multiples) return _result except _core._FallbackException: try: return tile_grad_eager_fallback( input, multiples, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. _, _, _op = _op_def_lib._apply_op_helper( "TileGrad", input=input, multiples=multiples, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T")) _execute.record_gradient( "TileGrad", _inputs_flat, _attrs, _result, name) _result, = _result return _result def tile_grad_eager_fallback(input, multiples, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function tile_grad """ _ctx = ctx if ctx else _context.context() _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx) multiples = _ops.convert_to_tensor(multiples, _dtypes.int32) _inputs_flat = [input, multiples] _attrs = ("T", _attr_T) _result = _execute.execute(b"TileGrad", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "TileGrad", _inputs_flat, _attrs, _result, name) _result, = _result return _result def transpose(x, perm, name=None): r"""Shuffle dimensions of x according to a permutation. The output `y` has the same rank as `x`. The shapes of `x` and `y` satisfy: `y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]` Args: x: A `Tensor`. perm: A `Tensor`. Must be one of the following types: `int32`, `int64`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `x`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "Transpose", name, _ctx._post_execution_callbacks, x, perm) return _result except _core._FallbackException: try: return transpose_eager_fallback( x, perm, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. _, _, _op = _op_def_lib._apply_op_helper( "Transpose", x=x, perm=perm, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T"), "Tperm", _op.get_attr("Tperm")) _execute.record_gradient( "Transpose", _inputs_flat, _attrs, _result, name) _result, = _result return _result def transpose_eager_fallback(x, perm, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function transpose """ _ctx = ctx if ctx else _context.context() _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx) _attr_Tperm, (perm,) = _execute.args_to_matching_eager([perm], _ctx, _dtypes.int32) _inputs_flat = [x, perm] _attrs = ("T", _attr_T, "Tperm", _attr_Tperm) _result = _execute.execute(b"Transpose", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Transpose", _inputs_flat, _attrs, _result, name) _result, = _result return _result _unique_outputs = ["y", "idx"] _UniqueOutput = _collections.namedtuple( "Unique", _unique_outputs) def unique(x, out_idx=_dtypes.int32, name=None): r"""Finds unique elements in a 1-D tensor. This operation returns a tensor `y` containing all of the unique elements of `x` sorted in the same order that they occur in `x`. This operation also returns a tensor `idx` the same size as `x` that contains the index of each value of `x` in the unique output `y`. In other words: `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` For example: ``` # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8] y, idx = unique(x) y ==> [1, 2, 4, 7, 8] idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] ``` Args: x: A `Tensor`. 1-D. out_idx: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to `tf.int32`. name: A name for the operation (optional). Returns: A tuple of `Tensor` objects (y, idx). y: A `Tensor`. Has the same type as `x`. idx: A `Tensor` of type `out_idx`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "Unique", name, _ctx._post_execution_callbacks, x, "out_idx", out_idx) _result = _UniqueOutput._make(_result) return _result except _core._FallbackException: try: return unique_eager_fallback( x, out_idx=out_idx, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. if out_idx is None: out_idx = _dtypes.int32 out_idx = _execute.make_type(out_idx, "out_idx") _, _, _op = _op_def_lib._apply_op_helper( "Unique", x=x, out_idx=out_idx, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T"), "out_idx", _op.get_attr("out_idx")) _execute.record_gradient( "Unique", _inputs_flat, _attrs, _result, name) _result = _UniqueOutput._make(_result) return _result def unique_eager_fallback(x, out_idx=_dtypes.int32, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function unique """ _ctx = ctx if ctx else _context.context() if out_idx is None: out_idx = _dtypes.int32 out_idx = _execute.make_type(out_idx, "out_idx") _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx) _inputs_flat = [x] _attrs = ("T", _attr_T, "out_idx", out_idx) _result = _execute.execute(b"Unique", 2, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Unique", _inputs_flat, _attrs, _result, name) _result = _UniqueOutput._make(_result) return _result _unique_v2_outputs = ["y", "idx"] _UniqueV2Output = _collections.namedtuple( "UniqueV2", _unique_v2_outputs) def unique_v2(x, axis, out_idx=_dtypes.int32, name=None): r"""Finds unique elements along an axis of a tensor. This operation either returns a tensor `y` containing unique elements along the `axis` of a tensor. The returned unique elements is sorted in the same order as they occur along `axis` in `x`. This operation also returns a tensor `idx` that is the same size as the number of the elements in `x` along the `axis` dimension. It contains the index in the unique output `y`. In other words, for an `1-D` tensor `x` with `axis = None: `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` For example: ``` # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8] y, idx = unique(x) y ==> [1, 2, 4, 7, 8] idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] ``` For an `2-D` tensor `x` with `axis = 0`: ``` # tensor 'x' is [[1, 0, 0], # [1, 0, 0], # [2, 0, 0]] y, idx = unique(x, axis=0) y ==> [[1, 0, 0], [2, 0, 0]] idx ==> [0, 0, 1] ``` For an `2-D` tensor `x` with `axis = 1`: ``` # tensor 'x' is [[1, 0, 0], # [1, 0, 0], # [2, 0, 0]] y, idx = unique(x, axis=1) y ==> [[1, 0], [1, 0], [2, 0]] idx ==> [0, 1, 1] ``` Args: x: A `Tensor`. A `Tensor`. axis: A `Tensor`. Must be one of the following types: `int32`, `int64`. A `Tensor` of type `int32` (default: None). The axis of the Tensor to find the unique elements. out_idx: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to `tf.int32`. name: A name for the operation (optional). Returns: A tuple of `Tensor` objects (y, idx). y: A `Tensor`. Has the same type as `x`. idx: A `Tensor` of type `out_idx`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "UniqueV2", name, _ctx._post_execution_callbacks, x, axis, "out_idx", out_idx) _result = _UniqueV2Output._make(_result) return _result except _core._FallbackException: try: return unique_v2_eager_fallback( x, axis, out_idx=out_idx, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. if out_idx is None: out_idx = _dtypes.int32 out_idx = _execute.make_type(out_idx, "out_idx") _, _, _op = _op_def_lib._apply_op_helper( "UniqueV2", x=x, axis=axis, out_idx=out_idx, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T"), "Taxis", _op.get_attr("Taxis"), "out_idx", _op.get_attr("out_idx")) _execute.record_gradient( "UniqueV2", _inputs_flat, _attrs, _result, name) _result = _UniqueV2Output._make(_result) return _result def unique_v2_eager_fallback(x, axis, out_idx=_dtypes.int32, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function unique_v2 """ _ctx = ctx if ctx else _context.context() if out_idx is None: out_idx = _dtypes.int32 out_idx = _execute.make_type(out_idx, "out_idx") _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx) _attr_Taxis, (axis,) = _execute.args_to_matching_eager([axis], _ctx, _dtypes.int64) _inputs_flat = [x, axis] _attrs = ("T", _attr_T, "Taxis", _attr_Taxis, "out_idx", out_idx) _result = _execute.execute(b"UniqueV2", 2, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "UniqueV2", _inputs_flat, _attrs, _result, name) _result = _UniqueV2Output._make(_result) return _result _unique_with_counts_outputs = ["y", "idx", "count"] _UniqueWithCountsOutput = _collections.namedtuple( "UniqueWithCounts", _unique_with_counts_outputs) def unique_with_counts(x, out_idx=_dtypes.int32, name=None): r"""Finds unique elements in a 1-D tensor. This operation returns a tensor `y` containing all of the unique elements of `x` sorted in the same order that they occur in `x`. This operation also returns a tensor `idx` the same size as `x` that contains the index of each value of `x` in the unique output `y`. Finally, it returns a third tensor `count` that contains the count of each element of `y` in `x`. In other words: `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` For example: ``` # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8] y, idx, count = unique_with_counts(x) y ==> [1, 2, 4, 7, 8] idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] count ==> [2, 1, 3, 1, 2] ``` Args: x: A `Tensor`. 1-D. out_idx: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to `tf.int32`. name: A name for the operation (optional). Returns: A tuple of `Tensor` objects (y, idx, count). y: A `Tensor`. Has the same type as `x`. idx: A `Tensor` of type `out_idx`. count: A `Tensor` of type `out_idx`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "UniqueWithCounts", name, _ctx._post_execution_callbacks, x, "out_idx", out_idx) _result = _UniqueWithCountsOutput._make(_result) return _result except _core._FallbackException: try: return unique_with_counts_eager_fallback( x, out_idx=out_idx, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. if out_idx is None: out_idx = _dtypes.int32 out_idx = _execute.make_type(out_idx, "out_idx") _, _, _op = _op_def_lib._apply_op_helper( "UniqueWithCounts", x=x, out_idx=out_idx, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T"), "out_idx", _op.get_attr("out_idx")) _execute.record_gradient( "UniqueWithCounts", _inputs_flat, _attrs, _result, name) _result = _UniqueWithCountsOutput._make(_result) return _result def unique_with_counts_eager_fallback(x, out_idx=_dtypes.int32, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function unique_with_counts """ _ctx = ctx if ctx else _context.context() if out_idx is None: out_idx = _dtypes.int32 out_idx = _execute.make_type(out_idx, "out_idx") _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx) _inputs_flat = [x] _attrs = ("T", _attr_T, "out_idx", out_idx) _result = _execute.execute(b"UniqueWithCounts", 3, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "UniqueWithCounts", _inputs_flat, _attrs, _result, name) _result = _UniqueWithCountsOutput._make(_result) return _result _unique_with_counts_v2_outputs = ["y", "idx", "count"] _UniqueWithCountsV2Output = _collections.namedtuple( "UniqueWithCountsV2", _unique_with_counts_v2_outputs) def unique_with_counts_v2(x, axis, out_idx=_dtypes.int32, name=None): r"""Finds unique elements along an axis of a tensor. This operation either returns a tensor `y` containing unique elements along the `axis` of a tensor. The returned unique elements is sorted in the same order as they occur along `axis` in `x`. This operation also returns a tensor `idx` and a tensor `count` that are the same size as the number of the elements in `x` along the `axis` dimension. The `idx` contains the index in the unique output `y` and the `count` contains the count in the unique output `y`. In other words, for an `1-D` tensor `x` with `axis = None: `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` For example: ``` # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8] y, idx, count = unique_with_counts(x) y ==> [1, 2, 4, 7, 8] idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] count ==> [2, 1, 3, 1, 2] ``` For an `2-D` tensor `x` with `axis = 0`: ``` # tensor 'x' is [[1, 0, 0], # [1, 0, 0], # [2, 0, 0]] y, idx, count = unique_with_counts(x, axis=0) y ==> [[1, 0, 0], [2, 0, 0]] idx ==> [0, 0, 1] count ==> [2, 1] ``` For an `2-D` tensor `x` with `axis = 1`: ``` # tensor 'x' is [[1, 0, 0], # [1, 0, 0], # [2, 0, 0]] y, idx, count = unique_with_counts(x, axis=1) y ==> [[1, 0], [1, 0], [2, 0]] idx ==> [0, 1, 1] count ==> [1, 2] ``` Args: x: A `Tensor`. A `Tensor`. axis: A `Tensor`. Must be one of the following types: `int32`, `int64`. A `Tensor` of type `int32` (default: None). The axis of the Tensor to find the unique elements. out_idx: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to `tf.int32`. name: A name for the operation (optional). Returns: A tuple of `Tensor` objects (y, idx, count). y: A `Tensor`. Has the same type as `x`. idx: A `Tensor` of type `out_idx`. count: A `Tensor` of type `out_idx`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "UniqueWithCountsV2", name, _ctx._post_execution_callbacks, x, axis, "out_idx", out_idx) _result = _UniqueWithCountsV2Output._make(_result) return _result except _core._FallbackException: try: return unique_with_counts_v2_eager_fallback( x, axis, out_idx=out_idx, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. if out_idx is None: out_idx = _dtypes.int32 out_idx = _execute.make_type(out_idx, "out_idx") _, _, _op = _op_def_lib._apply_op_helper( "UniqueWithCountsV2", x=x, axis=axis, out_idx=out_idx, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T"), "Taxis", _op.get_attr("Taxis"), "out_idx", _op.get_attr("out_idx")) _execute.record_gradient( "UniqueWithCountsV2", _inputs_flat, _attrs, _result, name) _result = _UniqueWithCountsV2Output._make(_result) return _result def unique_with_counts_v2_eager_fallback(x, axis, out_idx=_dtypes.int32, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function unique_with_counts_v2 """ _ctx = ctx if ctx else _context.context() if out_idx is None: out_idx = _dtypes.int32 out_idx = _execute.make_type(out_idx, "out_idx") _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx) _attr_Taxis, (axis,) = _execute.args_to_matching_eager([axis], _ctx, _dtypes.int64) _inputs_flat = [x, axis] _attrs = ("T", _attr_T, "Taxis", _attr_Taxis, "out_idx", out_idx) _result = _execute.execute(b"UniqueWithCountsV2", 3, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "UniqueWithCountsV2", _inputs_flat, _attrs, _result, name) _result = _UniqueWithCountsV2Output._make(_result) return _result def unpack(value, num, axis=0, name=None): r"""Unpacks a given dimension of a rank-`R` tensor into `num` rank-`(R-1)` tensors. Unpacks `num` tensors from `value` by chipping it along the `axis` dimension. For example, given a tensor of shape `(A, B, C, D)`; If `axis == 0` then the i'th tensor in `output` is the slice `value[i, :, :, :]` and each tensor in `output` will have shape `(B, C, D)`. (Note that the dimension unpacked along is gone, unlike `split`). If `axis == 1` then the i'th tensor in `output` is the slice `value[:, i, :, :]` and each tensor in `output` will have shape `(A, C, D)`. Etc. This is the opposite of `pack`. Args: value: A `Tensor`. 1-D or higher, with `axis` dimension size equal to `num`. num: An `int` that is `>= 0`. axis: An optional `int`. Defaults to `0`. Dimension along which to unpack. Negative values wrap around, so the valid range is `[-R, R)`. name: A name for the operation (optional). Returns: A list of `num` `Tensor` objects with the same type as `value`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "Unpack", name, _ctx._post_execution_callbacks, value, "num", num, "axis", axis) return _result except _core._FallbackException: try: return unpack_eager_fallback( value, num=num, axis=axis, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. num = _execute.make_int(num, "num") if axis is None: axis = 0 axis = _execute.make_int(axis, "axis") _, _, _op = _op_def_lib._apply_op_helper( "Unpack", value=value, num=num, axis=axis, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("num", _op.get_attr("num"), "T", _op.get_attr("T"), "axis", _op.get_attr("axis")) _execute.record_gradient( "Unpack", _inputs_flat, _attrs, _result, name) return _result def unpack_eager_fallback(value, num, axis=0, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function unpack """ _ctx = ctx if ctx else _context.context() num = _execute.make_int(num, "num") if axis is None: axis = 0 axis = _execute.make_int(axis, "axis") _attr_T, (value,) = _execute.args_to_matching_eager([value], _ctx) _inputs_flat = [value] _attrs = ("num", num, "T", _attr_T, "axis", axis) _result = _execute.execute(b"Unpack", num, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Unpack", _inputs_flat, _attrs, _result, name) return _result @_dispatch.add_dispatch_list @tf_export('unravel_index') def unravel_index(indices, dims, name=None): r"""Converts a flat index or array of flat indices into a tuple of coordinate arrays. @compatibility(numpy) Equivalent to np.unravel_index @end_compatibility Args: indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. An 0-D or 1-D `int` Tensor whose elements are indices into the flattened version of an array of dimensions dims. dims: A `Tensor`. Must have the same type as `indices`. An 1-D `int` Tensor. The shape of the array to use for unraveling indices. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `indices`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "UnravelIndex", name, _ctx._post_execution_callbacks, indices, dims) return _result except _core._FallbackException: try: return unravel_index_eager_fallback( indices, dims, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except (TypeError, ValueError): result = _dispatch.dispatch( unravel_index, indices=indices, dims=dims, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. try: _, _, _op = _op_def_lib._apply_op_helper( "UnravelIndex", indices=indices, dims=dims, name=name) except (TypeError, ValueError): result = _dispatch.dispatch( unravel_index, indices=indices, dims=dims, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("Tidx", _op.get_attr("Tidx")) _execute.record_gradient( "UnravelIndex", _inputs_flat, _attrs, _result, name) _result, = _result return _result def unravel_index_eager_fallback(indices, dims, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function unravel_index """ _ctx = ctx if ctx else _context.context() _attr_Tidx, _inputs_Tidx = _execute.args_to_matching_eager([indices, dims], _ctx, _dtypes.int32) (indices, dims) = _inputs_Tidx _inputs_flat = [indices, dims] _attrs = ("Tidx", _attr_Tidx) _result = _execute.execute(b"UnravelIndex", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "UnravelIndex", _inputs_flat, _attrs, _result, name) _result, = _result return _result def upper_bound(sorted_inputs, values, out_type=_dtypes.int32, name=None): r"""Applies upper_bound(sorted_search_values, values) along each row. Each set of rows with the same index in (sorted_inputs, values) is treated independently. The resulting row is the equivalent of calling `np.searchsorted(sorted_inputs, values, side='right')`. The result is not a global index to the entire `Tensor`, but rather just the index in the last dimension. A 2-D example: sorted_sequence = [[0, 3, 9, 9, 10], [1, 2, 3, 4, 5]] values = [[2, 4, 9], [0, 2, 6]] result = UpperBound(sorted_sequence, values) result == [[1, 2, 4], [0, 2, 5]] Args: sorted_inputs: A `Tensor`. 2-D Tensor where each row is ordered. values: A `Tensor`. Must have the same type as `sorted_inputs`. 2-D Tensor with the same numbers of rows as `sorted_search_values`. Contains the values that will be searched for in `sorted_search_values`. out_type: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to `tf.int32`. name: A name for the operation (optional). Returns: A `Tensor` of type `out_type`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "UpperBound", name, _ctx._post_execution_callbacks, sorted_inputs, values, "out_type", out_type) return _result except _core._FallbackException: try: return upper_bound_eager_fallback( sorted_inputs, values, out_type=out_type, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. if out_type is None: out_type = _dtypes.int32 out_type = _execute.make_type(out_type, "out_type") _, _, _op = _op_def_lib._apply_op_helper( "UpperBound", sorted_inputs=sorted_inputs, values=values, out_type=out_type, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T"), "out_type", _op.get_attr("out_type")) _execute.record_gradient( "UpperBound", _inputs_flat, _attrs, _result, name) _result, = _result return _result def upper_bound_eager_fallback(sorted_inputs, values, out_type=_dtypes.int32, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function upper_bound """ _ctx = ctx if ctx else _context.context() if out_type is None: out_type = _dtypes.int32 out_type = _execute.make_type(out_type, "out_type") _attr_T, _inputs_T = _execute.args_to_matching_eager([sorted_inputs, values], _ctx) (sorted_inputs, values) = _inputs_T _inputs_flat = [sorted_inputs, values] _attrs = ("T", _attr_T, "out_type", out_type) _result = _execute.execute(b"UpperBound", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "UpperBound", _inputs_flat, _attrs, _result, name) _result, = _result return _result def where(condition, name=None): r"""Returns locations of nonzero / true values in a tensor. This operation returns the coordinates of true elements in `condition`. The coordinates are returned in a 2-D tensor where the first dimension (rows) represents the number of true elements, and the second dimension (columns) represents the coordinates of the true elements. Keep in mind, the shape of the output tensor can vary depending on how many true values there are in `condition`. Indices are output in row-major order. For example: ``` # 'input' tensor is [[True, False] # [True, False]] # 'input' has two true values, so output has two coordinates. # 'input' has rank of 2, so coordinates have two indices. where(input) ==> [[0, 0], [1, 0]] # `condition` tensor is [[[True, False] # [True, False]] # [[False, True] # [False, True]] # [[False, False] # [False, True]]] # 'input' has 5 true values, so output has 5 coordinates. # 'input' has rank of 3, so coordinates have three indices. where(input) ==> [[0, 0, 0], [0, 1, 0], [1, 0, 1], [1, 1, 1], [2, 1, 1]] # `condition` tensor is [[[1.5, 0.0] # [-0.5, 0.0]] # [[0.0, 0.25] # [0.0, 0.75]] # [[0.0, 0.0] # [0.0, 0.01]]] # 'input' has 5 nonzero values, so output has 5 coordinates. # 'input' has rank of 3, so coordinates have three indices. where(input) ==> [[0, 0, 0], [0, 1, 0], [1, 0, 1], [1, 1, 1], [2, 1, 1]] # `condition` tensor is [[[1.5 + 0.0j, 0.0 + 0.0j] # [0.0 + 0.5j, 0.0 + 0.0j]] # [[0.0 + 0.0j, 0.25 + 1.5j] # [0.0 + 0.0j, 0.75 + 0.0j]] # [[0.0 + 0.0j, 0.0 + 0.0j] # [0.0 + 0.0j, 0.01 + 0.0j]]] # 'input' has 5 nonzero magnitude values, so output has 5 coordinates. # 'input' has rank of 3, so coordinates have three indices. where(input) ==> [[0, 0, 0], [0, 1, 0], [1, 0, 1], [1, 1, 1], [2, 1, 1]] ``` Args: condition: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`, `bool`. name: A name for the operation (optional). Returns: A `Tensor` of type `int64`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "Where", name, _ctx._post_execution_callbacks, condition) return _result except _core._FallbackException: try: return where_eager_fallback( condition, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. _, _, _op = _op_def_lib._apply_op_helper( "Where", input=condition, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T")) _execute.record_gradient( "Where", _inputs_flat, _attrs, _result, name) _result, = _result return _result def where_eager_fallback(condition, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function where """ _ctx = ctx if ctx else _context.context() _attr_T, (condition,) = _execute.args_to_matching_eager([condition], _ctx, _dtypes.bool) _inputs_flat = [condition] _attrs = ("T", _attr_T) _result = _execute.execute(b"Where", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Where", _inputs_flat, _attrs, _result, name) _result, = _result return _result def zeros_like(x, name=None): r"""Returns a tensor of zeros with the same shape and type as x. Args: x: A `Tensor`. a tensor of type T. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `x`. """ _ctx = _context._context if _ctx is not None and _ctx._eager_context.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "ZerosLike", name, _ctx._post_execution_callbacks, x) return _result except _core._FallbackException: try: return zeros_like_eager_fallback( x, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) # Add nodes to the TensorFlow graph. _, _, _op = _op_def_lib._apply_op_helper( "ZerosLike", x=x, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T")) _execute.record_gradient( "ZerosLike", _inputs_flat, _attrs, _result, name) _result, = _result return _result def zeros_like_eager_fallback(x, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function zeros_like """ _ctx = ctx if ctx else _context.context() _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx) _inputs_flat = [x] _attrs = ("T", _attr_T) _result = _execute.execute(b"ZerosLike", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "ZerosLike", _inputs_flat, _attrs, _result, name) _result, = _result return _result def _InitOpDefLibrary(op_list_proto_bytes): op_list = _op_def_pb2.OpList() op_list.ParseFromString(op_list_proto_bytes) _op_def_registry.register_op_list(op_list) op_def_lib = _op_def_library.OpDefLibrary() op_def_lib.add_op_list(op_list) return op_def_lib # op { # name: "BatchMatrixBandPart" # input_arg { # name: "input" # type_attr: "T" # } # input_arg { # name: "num_lower" # type: DT_INT64 # } # input_arg { # name: "num_upper" # type: DT_INT64 # } # output_arg { # name: "band" # type_attr: "T" # } # attr { # name: "T" # type: "type" # } # deprecation { # version: 14 # explanation: "Use MatrixBandPart" # } # } # op { # name: "BatchMatrixDiag" # input_arg { # name: "diagonal" # type_attr: "T" # } # output_arg { # name: "output" # type_attr: "T" # } # attr { # name: "T" # type: "type" # } # deprecation { # version: 14 # explanation: "Use MatrixDiag" # } # } # op { # name: "BatchMatrixDiagPart" # input_arg { # name: "input" # type_attr: "T" # } # output_arg { # name: "diagonal" # type_attr: "T" # } # attr { # name: "T" # type: "type" # } # deprecation { # version: 14 # explanation: "Use MatrixDiagPart" # } # } # op { # name: "BatchMatrixSetDiag" # input_arg { # name: "input" # type_attr: "T" # } # input_arg { # name: "diagonal" # type_attr: "T" # } # output_arg { # name: "output" # type_attr: "T" # } # attr { # name: "T" # type: "type" # } # deprecation { # version: 14 # explanation: "Use MatrixSetDiag" # } # } # op { # name: "BatchToSpace" # input_arg { # name: "input" # type_attr: "T" # } # input_arg { # name: "crops" # type_attr: "Tidx" # } # output_arg { # name: "output" # type_attr: "T" # } # attr { # name: "T" # type: "type" # } # attr { # name: "block_size" # type: "int" # has_minimum: true # minimum: 2 # } # attr { # name: "Tidx" # type: "type" # default_value { # type: DT_INT32 # } # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # } # op { # name: "BatchToSpaceND" # input_arg { # name: "input" # type_attr: "T" # } # input_arg { # name: "block_shape" # type_attr: "Tblock_shape" # } # input_arg { # name: "crops" # type_attr: "Tcrops" # } # output_arg { # name: "output" # type_attr: "T" # } # attr { # name: "T" # type: "type" # } # attr { # name: "Tblock_shape" # type: "type" # default_value { # type: DT_INT32 # } # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # attr { # name: "Tcrops" # type: "type" # default_value { # type: DT_INT32 # } # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # } # op { # name: "Bitcast" # input_arg { # name: "input" # type_attr: "T" # } # output_arg { # name: "output" # type_attr: "type" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_BFLOAT16 # type: DT_HALF # type: DT_FLOAT # type: DT_DOUBLE # type: DT_INT64 # type: DT_INT32 # type: DT_UINT8 # type: DT_UINT16 # type: DT_UINT32 # type: DT_UINT64 # type: DT_INT8 # type: DT_INT16 # type: DT_COMPLEX64 # type: DT_COMPLEX128 # type: DT_QINT8 # type: DT_QUINT8 # type: DT_QINT16 # type: DT_QUINT16 # type: DT_QINT32 # } # } # } # attr { # name: "type" # type: "type" # allowed_values { # list { # type: DT_BFLOAT16 # type: DT_HALF # type: DT_FLOAT # type: DT_DOUBLE # type: DT_INT64 # type: DT_INT32 # type: DT_UINT8 # type: DT_UINT16 # type: DT_UINT32 # type: DT_UINT64 # type: DT_INT8 # type: DT_INT16 # type: DT_COMPLEX64 # type: DT_COMPLEX128 # type: DT_QINT8 # type: DT_QUINT8 # type: DT_QINT16 # type: DT_QUINT16 # type: DT_QINT32 # } # } # } # } # op { # name: "BroadcastArgs" # input_arg { # name: "s0" # type_attr: "T" # } # input_arg { # name: "s1" # type_attr: "T" # } # output_arg { # name: "r0" # type_attr: "T" # } # attr { # name: "T" # type: "type" # default_value { # type: DT_INT32 # } # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # } # op { # name: "BroadcastGradientArgs" # input_arg { # name: "s0" # type_attr: "T" # } # input_arg { # name: "s1" # type_attr: "T" # } # output_arg { # name: "r0" # type_attr: "T" # } # output_arg { # name: "r1" # type_attr: "T" # } # attr { # name: "T" # type: "type" # default_value { # type: DT_INT32 # } # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # } # op { # name: "BroadcastTo" # input_arg { # name: "input" # type_attr: "T" # } # input_arg { # name: "shape" # type_attr: "Tidx" # } # output_arg { # name: "output" # type_attr: "T" # } # attr { # name: "T" # type: "type" # } # attr { # name: "Tidx" # type: "type" # default_value { # type: DT_INT32 # } # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # } # op { # name: "CheckNumerics" # input_arg { # name: "tensor" # type_attr: "T" # } # output_arg { # name: "output" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_BFLOAT16 # type: DT_HALF # type: DT_FLOAT # type: DT_DOUBLE # } # } # } # attr { # name: "message" # type: "string" # } # } # op { # name: "Concat" # input_arg { # name: "concat_dim" # type: DT_INT32 # } # input_arg { # name: "values" # type_attr: "T" # number_attr: "N" # } # output_arg { # name: "output" # type_attr: "T" # } # attr { # name: "N" # type: "int" # has_minimum: true # minimum: 2 # } # attr { # name: "T" # type: "type" # } # } # op { # name: "ConcatOffset" # input_arg { # name: "concat_dim" # type: DT_INT32 # } # input_arg { # name: "shape" # type: DT_INT32 # number_attr: "N" # } # output_arg { # name: "offset" # type: DT_INT32 # number_attr: "N" # } # attr { # name: "N" # type: "int" # has_minimum: true # minimum: 2 # } # } # op { # name: "ConcatV2" # input_arg { # name: "values" # type_attr: "T" # number_attr: "N" # } # input_arg { # name: "axis" # type_attr: "Tidx" # } # output_arg { # name: "output" # type_attr: "T" # } # attr { # name: "N" # type: "int" # has_minimum: true # minimum: 2 # } # attr { # name: "T" # type: "type" # } # attr { # name: "Tidx" # type: "type" # default_value { # type: DT_INT32 # } # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # } # op { # name: "ConjugateTranspose" # input_arg { # name: "x" # type_attr: "T" # } # input_arg { # name: "perm" # type_attr: "Tperm" # } # output_arg { # name: "y" # type_attr: "T" # } # attr { # name: "T" # type: "type" # } # attr { # name: "Tperm" # type: "type" # default_value { # type: DT_INT32 # } # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # } # op { # name: "Const" # output_arg { # name: "output" # type_attr: "dtype" # } # attr { # name: "value" # type: "tensor" # } # attr { # name: "dtype" # type: "type" # } # } # op { # name: "DebugGradientIdentity" # input_arg { # name: "input" # type_attr: "T" # } # output_arg { # name: "output" # type_attr: "T" # } # attr { # name: "T" # type: "type" # } # allows_uninitialized_input: true # } # op { # name: "DebugGradientRefIdentity" # input_arg { # name: "input" # type_attr: "T" # is_ref: true # } # output_arg { # name: "output" # type_attr: "T" # is_ref: true # } # attr { # name: "T" # type: "type" # } # allows_uninitialized_input: true # } # op { # name: "DeepCopy" # input_arg { # name: "x" # type_attr: "T" # } # output_arg { # name: "y" # type_attr: "T" # } # attr { # name: "T" # type: "type" # } # is_stateful: true # } # op { # name: "DepthToSpace" # input_arg { # name: "input" # type_attr: "T" # } # output_arg { # name: "output" # type_attr: "T" # } # attr { # name: "T" # type: "type" # } # attr { # name: "block_size" # type: "int" # has_minimum: true # minimum: 2 # } # attr { # name: "data_format" # type: "string" # default_value { # s: "NHWC" # } # allowed_values { # list { # s: "NHWC" # s: "NCHW" # s: "NCHW_VECT_C" # } # } # } # } # op { # name: "Dequantize" # input_arg { # name: "input" # type_attr: "T" # } # input_arg { # name: "min_range" # type: DT_FLOAT # } # input_arg { # name: "max_range" # type: DT_FLOAT # } # output_arg { # name: "output" # type: DT_FLOAT # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_QINT8 # type: DT_QUINT8 # type: DT_QINT32 # type: DT_QINT16 # type: DT_QUINT16 # } # } # } # attr { # name: "mode" # type: "string" # default_value { # s: "MIN_COMBINED" # } # allowed_values { # list { # s: "MIN_COMBINED" # s: "MIN_FIRST" # s: "SCALED" # } # } # } # } # op { # name: "Diag" # input_arg { # name: "diagonal" # type_attr: "T" # } # output_arg { # name: "output" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_BFLOAT16 # type: DT_HALF # type: DT_FLOAT # type: DT_DOUBLE # type: DT_INT32 # type: DT_INT64 # type: DT_COMPLEX64 # type: DT_COMPLEX128 # } # } # } # } # op { # name: "DiagPart" # input_arg { # name: "input" # type_attr: "T" # } # output_arg { # name: "diagonal" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_BFLOAT16 # type: DT_HALF # type: DT_FLOAT # type: DT_DOUBLE # type: DT_INT32 # type: DT_INT64 # type: DT_COMPLEX64 # type: DT_COMPLEX128 # } # } # } # } # op { # name: "EditDistance" # input_arg { # name: "hypothesis_indices" # type: DT_INT64 # } # input_arg { # name: "hypothesis_values" # type_attr: "T" # } # input_arg { # name: "hypothesis_shape" # type: DT_INT64 # } # input_arg { # name: "truth_indices" # type: DT_INT64 # } # input_arg { # name: "truth_values" # type_attr: "T" # } # input_arg { # name: "truth_shape" # type: DT_INT64 # } # output_arg { # name: "output" # type: DT_FLOAT # } # attr { # name: "normalize" # type: "bool" # default_value { # b: true # } # } # attr { # name: "T" # type: "type" # } # } # op { # name: "Empty" # input_arg { # name: "shape" # type: DT_INT32 # } # output_arg { # name: "output" # type_attr: "dtype" # } # attr { # name: "dtype" # type: "type" # } # attr { # name: "init" # type: "bool" # default_value { # b: false # } # } # is_stateful: true # } # op { # name: "EnsureShape" # input_arg { # name: "input" # type_attr: "T" # } # output_arg { # name: "output" # type_attr: "T" # } # attr { # name: "shape" # type: "shape" # } # attr { # name: "T" # type: "type" # } # } # op { # name: "ExpandDims" # input_arg { # name: "input" # type_attr: "T" # } # input_arg { # name: "dim" # type_attr: "Tdim" # } # output_arg { # name: "output" # type_attr: "T" # } # attr { # name: "T" # type: "type" # } # attr { # name: "Tdim" # type: "type" # default_value { # type: DT_INT32 # } # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # } # op { # name: "ExtractImagePatches" # input_arg { # name: "images" # type_attr: "T" # } # output_arg { # name: "patches" # type_attr: "T" # } # attr { # name: "ksizes" # type: "list(int)" # has_minimum: true # minimum: 4 # } # attr { # name: "strides" # type: "list(int)" # has_minimum: true # minimum: 4 # } # attr { # name: "rates" # type: "list(int)" # has_minimum: true # minimum: 4 # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_FLOAT # type: DT_DOUBLE # type: DT_INT32 # type: DT_UINT8 # type: DT_INT16 # type: DT_INT8 # type: DT_INT64 # type: DT_BFLOAT16 # type: DT_UINT16 # type: DT_HALF # type: DT_UINT32 # type: DT_UINT64 # } # } # } # attr { # name: "padding" # type: "string" # allowed_values { # list { # s: "SAME" # s: "VALID" # } # } # } # } # op { # name: "ExtractVolumePatches" # input_arg { # name: "input" # type_attr: "T" # } # output_arg { # name: "patches" # type_attr: "T" # } # attr { # name: "ksizes" # type: "list(int)" # has_minimum: true # minimum: 5 # } # attr { # name: "strides" # type: "list(int)" # has_minimum: true # minimum: 5 # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_FLOAT # type: DT_DOUBLE # type: DT_INT32 # type: DT_UINT8 # type: DT_INT16 # type: DT_INT8 # type: DT_INT64 # type: DT_BFLOAT16 # type: DT_UINT16 # type: DT_HALF # type: DT_UINT32 # type: DT_UINT64 # } # } # } # attr { # name: "padding" # type: "string" # allowed_values { # list { # s: "SAME" # s: "VALID" # } # } # } # } # op { # name: "FakeQuantWithMinMaxArgs" # input_arg { # name: "inputs" # type: DT_FLOAT # } # output_arg { # name: "outputs" # type: DT_FLOAT # } # attr { # name: "min" # type: "float" # default_value { # f: -6 # } # } # attr { # name: "max" # type: "float" # default_value { # f: 6 # } # } # attr { # name: "num_bits" # type: "int" # default_value { # i: 8 # } # } # attr { # name: "narrow_range" # type: "bool" # default_value { # b: false # } # } # } # op { # name: "FakeQuantWithMinMaxArgsGradient" # input_arg { # name: "gradients" # type: DT_FLOAT # } # input_arg { # name: "inputs" # type: DT_FLOAT # } # output_arg { # name: "backprops" # type: DT_FLOAT # } # attr { # name: "min" # type: "float" # default_value { # f: -6 # } # } # attr { # name: "max" # type: "float" # default_value { # f: 6 # } # } # attr { # name: "num_bits" # type: "int" # default_value { # i: 8 # } # } # attr { # name: "narrow_range" # type: "bool" # default_value { # b: false # } # } # } # op { # name: "FakeQuantWithMinMaxVars" # input_arg { # name: "inputs" # type: DT_FLOAT # } # input_arg { # name: "min" # type: DT_FLOAT # } # input_arg { # name: "max" # type: DT_FLOAT # } # output_arg { # name: "outputs" # type: DT_FLOAT # } # attr { # name: "num_bits" # type: "int" # default_value { # i: 8 # } # } # attr { # name: "narrow_range" # type: "bool" # default_value { # b: false # } # } # } # op { # name: "FakeQuantWithMinMaxVarsGradient" # input_arg { # name: "gradients" # type: DT_FLOAT # } # input_arg { # name: "inputs" # type: DT_FLOAT # } # input_arg { # name: "min" # type: DT_FLOAT # } # input_arg { # name: "max" # type: DT_FLOAT # } # output_arg { # name: "backprops_wrt_input" # type: DT_FLOAT # } # output_arg { # name: "backprop_wrt_min" # type: DT_FLOAT # } # output_arg { # name: "backprop_wrt_max" # type: DT_FLOAT # } # attr { # name: "num_bits" # type: "int" # default_value { # i: 8 # } # } # attr { # name: "narrow_range" # type: "bool" # default_value { # b: false # } # } # } # op { # name: "FakeQuantWithMinMaxVarsPerChannel" # input_arg { # name: "inputs" # type: DT_FLOAT # } # input_arg { # name: "min" # type: DT_FLOAT # } # input_arg { # name: "max" # type: DT_FLOAT # } # output_arg { # name: "outputs" # type: DT_FLOAT # } # attr { # name: "num_bits" # type: "int" # default_value { # i: 8 # } # } # attr { # name: "narrow_range" # type: "bool" # default_value { # b: false # } # } # } # op { # name: "FakeQuantWithMinMaxVarsPerChannelGradient" # input_arg { # name: "gradients" # type: DT_FLOAT # } # input_arg { # name: "inputs" # type: DT_FLOAT # } # input_arg { # name: "min" # type: DT_FLOAT # } # input_arg { # name: "max" # type: DT_FLOAT # } # output_arg { # name: "backprops_wrt_input" # type: DT_FLOAT # } # output_arg { # name: "backprop_wrt_min" # type: DT_FLOAT # } # output_arg { # name: "backprop_wrt_max" # type: DT_FLOAT # } # attr { # name: "num_bits" # type: "int" # default_value { # i: 8 # } # } # attr { # name: "narrow_range" # type: "bool" # default_value { # b: false # } # } # } # op { # name: "Fill" # input_arg { # name: "dims" # type_attr: "index_type" # } # input_arg { # name: "value" # type_attr: "T" # } # output_arg { # name: "output" # type_attr: "T" # } # attr { # name: "T" # type: "type" # } # attr { # name: "index_type" # type: "type" # default_value { # type: DT_INT32 # } # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # } # op { # name: "Gather" # input_arg { # name: "params" # type_attr: "Tparams" # } # input_arg { # name: "indices" # type_attr: "Tindices" # } # output_arg { # name: "output" # type_attr: "Tparams" # } # attr { # name: "validate_indices" # type: "bool" # default_value { # b: true # } # } # attr { # name: "Tparams" # type: "type" # } # attr { # name: "Tindices" # type: "type" # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # } # op { # name: "GatherNd" # input_arg { # name: "params" # type_attr: "Tparams" # } # input_arg { # name: "indices" # type_attr: "Tindices" # } # output_arg { # name: "output" # type_attr: "Tparams" # } # attr { # name: "Tparams" # type: "type" # } # attr { # name: "Tindices" # type: "type" # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # } # op { # name: "GatherV2" # input_arg { # name: "params" # type_attr: "Tparams" # } # input_arg { # name: "indices" # type_attr: "Tindices" # } # input_arg { # name: "axis" # type_attr: "Taxis" # } # output_arg { # name: "output" # type_attr: "Tparams" # } # attr { # name: "Tparams" # type: "type" # } # attr { # name: "Tindices" # type: "type" # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # attr { # name: "Taxis" # type: "type" # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # } # op { # name: "GuaranteeConst" # input_arg { # name: "input" # type_attr: "T" # } # output_arg { # name: "output" # type_attr: "T" # } # attr { # name: "T" # type: "type" # } # is_stateful: true # } # op { # name: "Identity" # input_arg { # name: "input" # type_attr: "T" # } # output_arg { # name: "output" # type_attr: "T" # } # attr { # name: "T" # type: "type" # } # } # op { # name: "IdentityN" # input_arg { # name: "input" # type_list_attr: "T" # } # output_arg { # name: "output" # type_list_attr: "T" # } # attr { # name: "T" # type: "list(type)" # has_minimum: true # minimum: 1 # } # } # op { # name: "ImmutableConst" # output_arg { # name: "tensor" # type_attr: "dtype" # } # attr { # name: "dtype" # type: "type" # } # attr { # name: "shape" # type: "shape" # } # attr { # name: "memory_region_name" # type: "string" # } # } # op { # name: "InplaceAdd" # input_arg { # name: "x" # type_attr: "T" # } # input_arg { # name: "i" # type: DT_INT32 # } # input_arg { # name: "v" # type_attr: "T" # } # output_arg { # name: "y" # type_attr: "T" # } # attr { # name: "T" # type: "type" # } # } # op { # name: "InplaceSub" # input_arg { # name: "x" # type_attr: "T" # } # input_arg { # name: "i" # type: DT_INT32 # } # input_arg { # name: "v" # type_attr: "T" # } # output_arg { # name: "y" # type_attr: "T" # } # attr { # name: "T" # type: "type" # } # } # op { # name: "InplaceUpdate" # input_arg { # name: "x" # type_attr: "T" # } # input_arg { # name: "i" # type: DT_INT32 # } # input_arg { # name: "v" # type_attr: "T" # } # output_arg { # name: "y" # type_attr: "T" # } # attr { # name: "T" # type: "type" # } # } # op { # name: "InvertPermutation" # input_arg { # name: "x" # type_attr: "T" # } # output_arg { # name: "y" # type_attr: "T" # } # attr { # name: "T" # type: "type" # default_value { # type: DT_INT32 # } # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # } # op { # name: "ListDiff" # input_arg { # name: "x" # type_attr: "T" # } # input_arg { # name: "y" # type_attr: "T" # } # output_arg { # name: "out" # type_attr: "T" # } # output_arg { # name: "idx" # type_attr: "out_idx" # } # attr { # name: "T" # type: "type" # } # attr { # name: "out_idx" # type: "type" # default_value { # type: DT_INT32 # } # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # } # op { # name: "LowerBound" # input_arg { # name: "sorted_inputs" # type_attr: "T" # } # input_arg { # name: "values" # type_attr: "T" # } # output_arg { # name: "output" # type_attr: "out_type" # } # attr { # name: "T" # type: "type" # } # attr { # name: "out_type" # type: "type" # default_value { # type: DT_INT32 # } # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # } # op { # name: "MatrixBandPart" # input_arg { # name: "input" # type_attr: "T" # } # input_arg { # name: "num_lower" # type_attr: "Tindex" # } # input_arg { # name: "num_upper" # type_attr: "Tindex" # } # output_arg { # name: "band" # type_attr: "T" # } # attr { # name: "T" # type: "type" # } # attr { # name: "Tindex" # type: "type" # default_value { # type: DT_INT64 # } # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # } # op { # name: "MatrixDiag" # input_arg { # name: "diagonal" # type_attr: "T" # } # output_arg { # name: "output" # type_attr: "T" # } # attr { # name: "T" # type: "type" # } # } # op { # name: "MatrixDiagPart" # input_arg { # name: "input" # type_attr: "T" # } # output_arg { # name: "diagonal" # type_attr: "T" # } # attr { # name: "T" # type: "type" # } # } # op { # name: "MatrixSetDiag" # input_arg { # name: "input" # type_attr: "T" # } # input_arg { # name: "diagonal" # type_attr: "T" # } # output_arg { # name: "output" # type_attr: "T" # } # attr { # name: "T" # type: "type" # } # } # op { # name: "MirrorPad" # input_arg { # name: "input" # type_attr: "T" # } # input_arg { # name: "paddings" # type_attr: "Tpaddings" # } # output_arg { # name: "output" # type_attr: "T" # } # attr { # name: "T" # type: "type" # } # attr { # name: "Tpaddings" # type: "type" # default_value { # type: DT_INT32 # } # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # attr { # name: "mode" # type: "string" # allowed_values { # list { # s: "REFLECT" # s: "SYMMETRIC" # } # } # } # } # op { # name: "MirrorPadGrad" # input_arg { # name: "input" # type_attr: "T" # } # input_arg { # name: "paddings" # type_attr: "Tpaddings" # } # output_arg { # name: "output" # type_attr: "T" # } # attr { # name: "T" # type: "type" # } # attr { # name: "Tpaddings" # type: "type" # default_value { # type: DT_INT32 # } # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # attr { # name: "mode" # type: "string" # allowed_values { # list { # s: "REFLECT" # s: "SYMMETRIC" # } # } # } # } # op { # name: "OneHot" # input_arg { # name: "indices" # type_attr: "TI" # } # input_arg { # name: "depth" # type: DT_INT32 # } # input_arg { # name: "on_value" # type_attr: "T" # } # input_arg { # name: "off_value" # type_attr: "T" # } # output_arg { # name: "output" # type_attr: "T" # } # attr { # name: "axis" # type: "int" # default_value { # i: -1 # } # } # attr { # name: "T" # type: "type" # } # attr { # name: "TI" # type: "type" # default_value { # type: DT_INT64 # } # allowed_values { # list { # type: DT_UINT8 # type: DT_INT32 # type: DT_INT64 # } # } # } # } # op { # name: "OnesLike" # input_arg { # name: "x" # type_attr: "T" # } # output_arg { # name: "y" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_BFLOAT16 # type: DT_HALF # type: DT_FLOAT # type: DT_DOUBLE # type: DT_INT8 # type: DT_UINT8 # type: DT_INT16 # type: DT_UINT16 # type: DT_INT32 # type: DT_INT64 # type: DT_COMPLEX64 # type: DT_COMPLEX128 # type: DT_BOOL # } # } # } # } # op { # name: "Pack" # input_arg { # name: "values" # type_attr: "T" # number_attr: "N" # } # output_arg { # name: "output" # type_attr: "T" # } # attr { # name: "N" # type: "int" # has_minimum: true # minimum: 1 # } # attr { # name: "T" # type: "type" # } # attr { # name: "axis" # type: "int" # default_value { # i: 0 # } # } # } # op { # name: "Pad" # input_arg { # name: "input" # type_attr: "T" # } # input_arg { # name: "paddings" # type_attr: "Tpaddings" # } # output_arg { # name: "output" # type_attr: "T" # } # attr { # name: "T" # type: "type" # } # attr { # name: "Tpaddings" # type: "type" # default_value { # type: DT_INT32 # } # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # } # op { # name: "PadV2" # input_arg { # name: "input" # type_attr: "T" # } # input_arg { # name: "paddings" # type_attr: "Tpaddings" # } # input_arg { # name: "constant_values" # type_attr: "T" # } # output_arg { # name: "output" # type_attr: "T" # } # attr { # name: "T" # type: "type" # } # attr { # name: "Tpaddings" # type: "type" # default_value { # type: DT_INT32 # } # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # } # op { # name: "ParallelConcat" # input_arg { # name: "values" # type_attr: "T" # number_attr: "N" # } # output_arg { # name: "output" # type_attr: "T" # } # attr { # name: "N" # type: "int" # has_minimum: true # minimum: 1 # } # attr { # name: "T" # type: "type" # } # attr { # name: "shape" # type: "shape" # } # } # op { # name: "Placeholder" # output_arg { # name: "output" # type_attr: "dtype" # } # attr { # name: "dtype" # type: "type" # } # attr { # name: "shape" # type: "shape" # default_value { # shape { # unknown_rank: true # } # } # } # } # op { # name: "PlaceholderV2" # output_arg { # name: "output" # type_attr: "dtype" # } # attr { # name: "dtype" # type: "type" # } # attr { # name: "shape" # type: "shape" # } # deprecation { # version: 23 # explanation: "Placeholder now behaves the same as PlaceholderV2." # } # } # op { # name: "PlaceholderWithDefault" # input_arg { # name: "input" # type_attr: "dtype" # } # output_arg { # name: "output" # type_attr: "dtype" # } # attr { # name: "dtype" # type: "type" # } # attr { # name: "shape" # type: "shape" # } # } # op { # name: "PreventGradient" # input_arg { # name: "input" # type_attr: "T" # } # output_arg { # name: "output" # type_attr: "T" # } # attr { # name: "T" # type: "type" # } # attr { # name: "message" # type: "string" # default_value { # s: "" # } # } # } # op { # name: "QuantizeAndDequantize" # input_arg { # name: "input" # type_attr: "T" # } # output_arg { # name: "output" # type_attr: "T" # } # attr { # name: "signed_input" # type: "bool" # default_value { # b: true # } # } # attr { # name: "num_bits" # type: "int" # default_value { # i: 8 # } # } # attr { # name: "range_given" # type: "bool" # default_value { # b: false # } # } # attr { # name: "input_min" # type: "float" # default_value { # f: 0 # } # } # attr { # name: "input_max" # type: "float" # default_value { # f: 0 # } # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_BFLOAT16 # type: DT_HALF # type: DT_FLOAT # type: DT_DOUBLE # } # } # } # deprecation { # version: 22 # explanation: "Replaced by QuantizeAndDequantizeV2" # } # } # op { # name: "QuantizeAndDequantizeV2" # input_arg { # name: "input" # type_attr: "T" # } # input_arg { # name: "input_min" # type_attr: "T" # } # input_arg { # name: "input_max" # type_attr: "T" # } # output_arg { # name: "output" # type_attr: "T" # } # attr { # name: "signed_input" # type: "bool" # default_value { # b: true # } # } # attr { # name: "num_bits" # type: "int" # default_value { # i: 8 # } # } # attr { # name: "range_given" # type: "bool" # default_value { # b: false # } # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_BFLOAT16 # type: DT_HALF # type: DT_FLOAT # type: DT_DOUBLE # } # } # } # attr { # name: "round_mode" # type: "string" # default_value { # s: "HALF_TO_EVEN" # } # allowed_values { # list { # s: "HALF_TO_EVEN" # s: "HALF_UP" # } # } # } # } # op { # name: "QuantizeAndDequantizeV3" # input_arg { # name: "input" # type_attr: "T" # } # input_arg { # name: "input_min" # type_attr: "T" # } # input_arg { # name: "input_max" # type_attr: "T" # } # input_arg { # name: "num_bits" # type: DT_INT32 # } # output_arg { # name: "output" # type_attr: "T" # } # attr { # name: "signed_input" # type: "bool" # default_value { # b: true # } # } # attr { # name: "range_given" # type: "bool" # default_value { # b: true # } # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_BFLOAT16 # type: DT_HALF # type: DT_FLOAT # type: DT_DOUBLE # } # } # } # } # op { # name: "QuantizeV2" # input_arg { # name: "input" # type: DT_FLOAT # } # input_arg { # name: "min_range" # type: DT_FLOAT # } # input_arg { # name: "max_range" # type: DT_FLOAT # } # output_arg { # name: "output" # type_attr: "T" # } # output_arg { # name: "output_min" # type: DT_FLOAT # } # output_arg { # name: "output_max" # type: DT_FLOAT # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_QINT8 # type: DT_QUINT8 # type: DT_QINT32 # type: DT_QINT16 # type: DT_QUINT16 # } # } # } # attr { # name: "mode" # type: "string" # default_value { # s: "MIN_COMBINED" # } # allowed_values { # list { # s: "MIN_COMBINED" # s: "MIN_FIRST" # s: "SCALED" # } # } # } # attr { # name: "round_mode" # type: "string" # default_value { # s: "HALF_AWAY_FROM_ZERO" # } # allowed_values { # list { # s: "HALF_AWAY_FROM_ZERO" # s: "HALF_TO_EVEN" # } # } # } # } # op { # name: "QuantizedConcat" # input_arg { # name: "concat_dim" # type: DT_INT32 # } # input_arg { # name: "values" # type_attr: "T" # number_attr: "N" # } # input_arg { # name: "input_mins" # type: DT_FLOAT # number_attr: "N" # } # input_arg { # name: "input_maxes" # type: DT_FLOAT # number_attr: "N" # } # output_arg { # name: "output" # type_attr: "T" # } # output_arg { # name: "output_min" # type: DT_FLOAT # } # output_arg { # name: "output_max" # type: DT_FLOAT # } # attr { # name: "N" # type: "int" # has_minimum: true # minimum: 2 # } # attr { # name: "T" # type: "type" # } # } # op { # name: "QuantizedInstanceNorm" # input_arg { # name: "x" # type_attr: "T" # } # input_arg { # name: "x_min" # type: DT_FLOAT # } # input_arg { # name: "x_max" # type: DT_FLOAT # } # output_arg { # name: "y" # type_attr: "T" # } # output_arg { # name: "y_min" # type: DT_FLOAT # } # output_arg { # name: "y_max" # type: DT_FLOAT # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_QINT8 # type: DT_QUINT8 # type: DT_QINT32 # type: DT_QINT16 # type: DT_QUINT16 # } # } # } # attr { # name: "output_range_given" # type: "bool" # default_value { # b: false # } # } # attr { # name: "given_y_min" # type: "float" # default_value { # f: 0 # } # } # attr { # name: "given_y_max" # type: "float" # default_value { # f: 0 # } # } # attr { # name: "variance_epsilon" # type: "float" # default_value { # f: 1e-05 # } # } # attr { # name: "min_separation" # type: "float" # default_value { # f: 0.001 # } # } # } # op { # name: "QuantizedReshape" # input_arg { # name: "tensor" # type_attr: "T" # } # input_arg { # name: "shape" # type_attr: "Tshape" # } # input_arg { # name: "input_min" # type: DT_FLOAT # } # input_arg { # name: "input_max" # type: DT_FLOAT # } # output_arg { # name: "output" # type_attr: "T" # } # output_arg { # name: "output_min" # type: DT_FLOAT # } # output_arg { # name: "output_max" # type: DT_FLOAT # } # attr { # name: "T" # type: "type" # } # attr { # name: "Tshape" # type: "type" # default_value { # type: DT_INT32 # } # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # } # op { # name: "Rank" # input_arg { # name: "input" # type_attr: "T" # } # output_arg { # name: "output" # type: DT_INT32 # } # attr { # name: "T" # type: "type" # } # } # op { # name: "RefIdentity" # input_arg { # name: "input" # type_attr: "T" # is_ref: true # } # output_arg { # name: "output" # type_attr: "T" # is_ref: true # } # attr { # name: "T" # type: "type" # } # allows_uninitialized_input: true # } # op { # name: "Reshape" # input_arg { # name: "tensor" # type_attr: "T" # } # input_arg { # name: "shape" # type_attr: "Tshape" # } # output_arg { # name: "output" # type_attr: "T" # } # attr { # name: "T" # type: "type" # } # attr { # name: "Tshape" # type: "type" # default_value { # type: DT_INT32 # } # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # } # op { # name: "ResourceStridedSliceAssign" # input_arg { # name: "ref" # type: DT_RESOURCE # } # input_arg { # name: "begin" # type_attr: "Index" # } # input_arg { # name: "end" # type_attr: "Index" # } # input_arg { # name: "strides" # type_attr: "Index" # } # input_arg { # name: "value" # type_attr: "T" # } # attr { # name: "T" # type: "type" # } # attr { # name: "Index" # type: "type" # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # attr { # name: "begin_mask" # type: "int" # default_value { # i: 0 # } # } # attr { # name: "end_mask" # type: "int" # default_value { # i: 0 # } # } # attr { # name: "ellipsis_mask" # type: "int" # default_value { # i: 0 # } # } # attr { # name: "new_axis_mask" # type: "int" # default_value { # i: 0 # } # } # attr { # name: "shrink_axis_mask" # type: "int" # default_value { # i: 0 # } # } # is_stateful: true # } # op { # name: "Reverse" # input_arg { # name: "tensor" # type_attr: "T" # } # input_arg { # name: "dims" # type: DT_BOOL # } # output_arg { # name: "output" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_UINT8 # type: DT_INT8 # type: DT_UINT16 # type: DT_INT16 # type: DT_INT32 # type: DT_INT64 # type: DT_BOOL # type: DT_HALF # type: DT_FLOAT # type: DT_DOUBLE # type: DT_COMPLEX64 # type: DT_COMPLEX128 # type: DT_STRING # } # } # } # } # op { # name: "ReverseSequence" # input_arg { # name: "input" # type_attr: "T" # } # input_arg { # name: "seq_lengths" # type_attr: "Tlen" # } # output_arg { # name: "output" # type_attr: "T" # } # attr { # name: "seq_dim" # type: "int" # } # attr { # name: "batch_dim" # type: "int" # default_value { # i: 0 # } # } # attr { # name: "T" # type: "type" # } # attr { # name: "Tlen" # type: "type" # default_value { # type: DT_INT64 # } # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # } # op { # name: "ReverseV2" # input_arg { # name: "tensor" # type_attr: "T" # } # input_arg { # name: "axis" # type_attr: "Tidx" # } # output_arg { # name: "output" # type_attr: "T" # } # attr { # name: "Tidx" # type: "type" # default_value { # type: DT_INT32 # } # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_UINT8 # type: DT_INT8 # type: DT_UINT16 # type: DT_INT16 # type: DT_INT32 # type: DT_INT64 # type: DT_BOOL # type: DT_BFLOAT16 # type: DT_HALF # type: DT_FLOAT # type: DT_DOUBLE # type: DT_COMPLEX64 # type: DT_COMPLEX128 # type: DT_STRING # } # } # } # } # op { # name: "ScatterNd" # input_arg { # name: "indices" # type_attr: "Tindices" # } # input_arg { # name: "updates" # type_attr: "T" # } # input_arg { # name: "shape" # type_attr: "Tindices" # } # output_arg { # name: "output" # type_attr: "T" # } # attr { # name: "T" # type: "type" # } # attr { # name: "Tindices" # type: "type" # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # } # op { # name: "ScatterNdNonAliasingAdd" # input_arg { # name: "input" # type_attr: "T" # } # input_arg { # name: "indices" # type_attr: "Tindices" # } # input_arg { # name: "updates" # type_attr: "T" # } # output_arg { # name: "output" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_FLOAT # type: DT_DOUBLE # type: DT_INT32 # type: DT_UINT8 # type: DT_INT16 # type: DT_INT8 # type: DT_COMPLEX64 # type: DT_INT64 # type: DT_QINT8 # type: DT_QUINT8 # type: DT_QINT32 # type: DT_BFLOAT16 # type: DT_UINT16 # type: DT_COMPLEX128 # type: DT_HALF # type: DT_UINT32 # type: DT_UINT64 # type: DT_BOOL # } # } # } # attr { # name: "Tindices" # type: "type" # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # } # op { # name: "Shape" # input_arg { # name: "input" # type_attr: "T" # } # output_arg { # name: "output" # type_attr: "out_type" # } # attr { # name: "T" # type: "type" # } # attr { # name: "out_type" # type: "type" # default_value { # type: DT_INT32 # } # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # } # op { # name: "ShapeN" # input_arg { # name: "input" # type_attr: "T" # number_attr: "N" # } # output_arg { # name: "output" # type_attr: "out_type" # number_attr: "N" # } # attr { # name: "N" # type: "int" # has_minimum: true # minimum: 1 # } # attr { # name: "T" # type: "type" # } # attr { # name: "out_type" # type: "type" # default_value { # type: DT_INT32 # } # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # } # op { # name: "Size" # input_arg { # name: "input" # type_attr: "T" # } # output_arg { # name: "output" # type_attr: "out_type" # } # attr { # name: "T" # type: "type" # } # attr { # name: "out_type" # type: "type" # default_value { # type: DT_INT32 # } # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # } # op { # name: "Slice" # input_arg { # name: "input" # type_attr: "T" # } # input_arg { # name: "begin" # type_attr: "Index" # } # input_arg { # name: "size" # type_attr: "Index" # } # output_arg { # name: "output" # type_attr: "T" # } # attr { # name: "T" # type: "type" # } # attr { # name: "Index" # type: "type" # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # } # op { # name: "Snapshot" # input_arg { # name: "input" # type_attr: "T" # } # output_arg { # name: "output" # type_attr: "T" # } # attr { # name: "T" # type: "type" # } # } # op { # name: "SpaceToBatch" # input_arg { # name: "input" # type_attr: "T" # } # input_arg { # name: "paddings" # type_attr: "Tpaddings" # } # output_arg { # name: "output" # type_attr: "T" # } # attr { # name: "T" # type: "type" # } # attr { # name: "Tpaddings" # type: "type" # default_value { # type: DT_INT32 # } # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # attr { # name: "block_size" # type: "int" # has_minimum: true # minimum: 2 # } # } # op { # name: "SpaceToBatchND" # input_arg { # name: "input" # type_attr: "T" # } # input_arg { # name: "block_shape" # type_attr: "Tblock_shape" # } # input_arg { # name: "paddings" # type_attr: "Tpaddings" # } # output_arg { # name: "output" # type_attr: "T" # } # attr { # name: "T" # type: "type" # } # attr { # name: "Tblock_shape" # type: "type" # default_value { # type: DT_INT32 # } # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # attr { # name: "Tpaddings" # type: "type" # default_value { # type: DT_INT32 # } # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # } # op { # name: "SpaceToDepth" # input_arg { # name: "input" # type_attr: "T" # } # output_arg { # name: "output" # type_attr: "T" # } # attr { # name: "T" # type: "type" # } # attr { # name: "block_size" # type: "int" # has_minimum: true # minimum: 2 # } # attr { # name: "data_format" # type: "string" # default_value { # s: "NHWC" # } # allowed_values { # list { # s: "NHWC" # s: "NCHW" # s: "NCHW_VECT_C" # } # } # } # } # op { # name: "Split" # input_arg { # name: "split_dim" # type: DT_INT32 # } # input_arg { # name: "value" # type_attr: "T" # } # output_arg { # name: "output" # type_attr: "T" # number_attr: "num_split" # } # attr { # name: "num_split" # type: "int" # has_minimum: true # minimum: 1 # } # attr { # name: "T" # type: "type" # } # } # op { # name: "SplitV" # input_arg { # name: "value" # type_attr: "T" # } # input_arg { # name: "size_splits" # type_attr: "Tlen" # } # input_arg { # name: "split_dim" # type: DT_INT32 # } # output_arg { # name: "output" # type_attr: "T" # number_attr: "num_split" # } # attr { # name: "num_split" # type: "int" # has_minimum: true # minimum: 1 # } # attr { # name: "T" # type: "type" # } # attr { # name: "Tlen" # type: "type" # default_value { # type: DT_INT64 # } # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # } # op { # name: "Squeeze" # input_arg { # name: "input" # type_attr: "T" # } # output_arg { # name: "output" # type_attr: "T" # } # attr { # name: "T" # type: "type" # } # attr { # name: "squeeze_dims" # type: "list(int)" # default_value { # list { # } # } # has_minimum: true # } # } # op { # name: "StopGradient" # input_arg { # name: "input" # type_attr: "T" # } # output_arg { # name: "output" # type_attr: "T" # } # attr { # name: "T" # type: "type" # } # } # op { # name: "StridedSlice" # input_arg { # name: "input" # type_attr: "T" # } # input_arg { # name: "begin" # type_attr: "Index" # } # input_arg { # name: "end" # type_attr: "Index" # } # input_arg { # name: "strides" # type_attr: "Index" # } # output_arg { # name: "output" # type_attr: "T" # } # attr { # name: "T" # type: "type" # } # attr { # name: "Index" # type: "type" # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # attr { # name: "begin_mask" # type: "int" # default_value { # i: 0 # } # } # attr { # name: "end_mask" # type: "int" # default_value { # i: 0 # } # } # attr { # name: "ellipsis_mask" # type: "int" # default_value { # i: 0 # } # } # attr { # name: "new_axis_mask" # type: "int" # default_value { # i: 0 # } # } # attr { # name: "shrink_axis_mask" # type: "int" # default_value { # i: 0 # } # } # } # op { # name: "StridedSliceAssign" # input_arg { # name: "ref" # type_attr: "T" # is_ref: true # } # input_arg { # name: "begin" # type_attr: "Index" # } # input_arg { # name: "end" # type_attr: "Index" # } # input_arg { # name: "strides" # type_attr: "Index" # } # input_arg { # name: "value" # type_attr: "T" # } # output_arg { # name: "output_ref" # type_attr: "T" # is_ref: true # } # attr { # name: "T" # type: "type" # } # attr { # name: "Index" # type: "type" # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # attr { # name: "begin_mask" # type: "int" # default_value { # i: 0 # } # } # attr { # name: "end_mask" # type: "int" # default_value { # i: 0 # } # } # attr { # name: "ellipsis_mask" # type: "int" # default_value { # i: 0 # } # } # attr { # name: "new_axis_mask" # type: "int" # default_value { # i: 0 # } # } # attr { # name: "shrink_axis_mask" # type: "int" # default_value { # i: 0 # } # } # } # op { # name: "StridedSliceGrad" # input_arg { # name: "shape" # type_attr: "Index" # } # input_arg { # name: "begin" # type_attr: "Index" # } # input_arg { # name: "end" # type_attr: "Index" # } # input_arg { # name: "strides" # type_attr: "Index" # } # input_arg { # name: "dy" # type_attr: "T" # } # output_arg { # name: "output" # type_attr: "T" # } # attr { # name: "T" # type: "type" # } # attr { # name: "Index" # type: "type" # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # attr { # name: "begin_mask" # type: "int" # default_value { # i: 0 # } # } # attr { # name: "end_mask" # type: "int" # default_value { # i: 0 # } # } # attr { # name: "ellipsis_mask" # type: "int" # default_value { # i: 0 # } # } # attr { # name: "new_axis_mask" # type: "int" # default_value { # i: 0 # } # } # attr { # name: "shrink_axis_mask" # type: "int" # default_value { # i: 0 # } # } # } # op { # name: "TensorScatterAdd" # input_arg { # name: "tensor" # type_attr: "T" # } # input_arg { # name: "indices" # type_attr: "Tindices" # } # input_arg { # name: "updates" # type_attr: "T" # } # output_arg { # name: "output" # type_attr: "T" # } # attr { # name: "T" # type: "type" # } # attr { # name: "Tindices" # type: "type" # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # } # op { # name: "TensorScatterSub" # input_arg { # name: "tensor" # type_attr: "T" # } # input_arg { # name: "indices" # type_attr: "Tindices" # } # input_arg { # name: "updates" # type_attr: "T" # } # output_arg { # name: "output" # type_attr: "T" # } # attr { # name: "T" # type: "type" # } # attr { # name: "Tindices" # type: "type" # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # } # op { # name: "TensorScatterUpdate" # input_arg { # name: "tensor" # type_attr: "T" # } # input_arg { # name: "indices" # type_attr: "Tindices" # } # input_arg { # name: "updates" # type_attr: "T" # } # output_arg { # name: "output" # type_attr: "T" # } # attr { # name: "T" # type: "type" # } # attr { # name: "Tindices" # type: "type" # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # } # op { # name: "Tile" # input_arg { # name: "input" # type_attr: "T" # } # input_arg { # name: "multiples" # type_attr: "Tmultiples" # } # output_arg { # name: "output" # type_attr: "T" # } # attr { # name: "T" # type: "type" # } # attr { # name: "Tmultiples" # type: "type" # default_value { # type: DT_INT32 # } # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # } # op { # name: "TileGrad" # input_arg { # name: "input" # type_attr: "T" # } # input_arg { # name: "multiples" # type: DT_INT32 # } # output_arg { # name: "output" # type_attr: "T" # } # attr { # name: "T" # type: "type" # } # deprecation { # version: 3 # explanation: "TileGrad has been replaced with reduce_sum" # } # } # op { # name: "Transpose" # input_arg { # name: "x" # type_attr: "T" # } # input_arg { # name: "perm" # type_attr: "Tperm" # } # output_arg { # name: "y" # type_attr: "T" # } # attr { # name: "T" # type: "type" # } # attr { # name: "Tperm" # type: "type" # default_value { # type: DT_INT32 # } # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # } # op { # name: "Unique" # input_arg { # name: "x" # type_attr: "T" # } # output_arg { # name: "y" # type_attr: "T" # } # output_arg { # name: "idx" # type_attr: "out_idx" # } # attr { # name: "T" # type: "type" # } # attr { # name: "out_idx" # type: "type" # default_value { # type: DT_INT32 # } # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # } # op { # name: "UniqueV2" # input_arg { # name: "x" # type_attr: "T" # } # input_arg { # name: "axis" # type_attr: "Taxis" # } # output_arg { # name: "y" # type_attr: "T" # } # output_arg { # name: "idx" # type_attr: "out_idx" # } # attr { # name: "T" # type: "type" # } # attr { # name: "Taxis" # type: "type" # default_value { # type: DT_INT64 # } # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # attr { # name: "out_idx" # type: "type" # default_value { # type: DT_INT32 # } # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # } # op { # name: "UniqueWithCounts" # input_arg { # name: "x" # type_attr: "T" # } # output_arg { # name: "y" # type_attr: "T" # } # output_arg { # name: "idx" # type_attr: "out_idx" # } # output_arg { # name: "count" # type_attr: "out_idx" # } # attr { # name: "T" # type: "type" # } # attr { # name: "out_idx" # type: "type" # default_value { # type: DT_INT32 # } # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # } # op { # name: "UniqueWithCountsV2" # input_arg { # name: "x" # type_attr: "T" # } # input_arg { # name: "axis" # type_attr: "Taxis" # } # output_arg { # name: "y" # type_attr: "T" # } # output_arg { # name: "idx" # type_attr: "out_idx" # } # output_arg { # name: "count" # type_attr: "out_idx" # } # attr { # name: "T" # type: "type" # } # attr { # name: "Taxis" # type: "type" # default_value { # type: DT_INT64 # } # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # attr { # name: "out_idx" # type: "type" # default_value { # type: DT_INT32 # } # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # } # op { # name: "Unpack" # input_arg { # name: "value" # type_attr: "T" # } # output_arg { # name: "output" # type_attr: "T" # number_attr: "num" # } # attr { # name: "num" # type: "int" # has_minimum: true # } # attr { # name: "T" # type: "type" # } # attr { # name: "axis" # type: "int" # default_value { # i: 0 # } # } # } # op { # name: "UnravelIndex" # input_arg { # name: "indices" # type_attr: "Tidx" # } # input_arg { # name: "dims" # type_attr: "Tidx" # } # output_arg { # name: "output" # type_attr: "Tidx" # } # attr { # name: "Tidx" # type: "type" # default_value { # type: DT_INT32 # } # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # } # op { # name: "UpperBound" # input_arg { # name: "sorted_inputs" # type_attr: "T" # } # input_arg { # name: "values" # type_attr: "T" # } # output_arg { # name: "output" # type_attr: "out_type" # } # attr { # name: "T" # type: "type" # } # attr { # name: "out_type" # type: "type" # default_value { # type: DT_INT32 # } # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # } # op { # name: "Where" # input_arg { # name: "input" # type_attr: "T" # } # output_arg { # name: "index" # type: DT_INT64 # } # attr { # name: "T" # type: "type" # default_value { # type: DT_BOOL # } # allowed_values { # list { # type: DT_FLOAT # type: DT_DOUBLE # type: DT_INT32 # type: DT_UINT8 # type: DT_INT16 # type: DT_INT8 # type: DT_COMPLEX64 # type: DT_INT64 # type: DT_QINT8 # type: DT_QUINT8 # type: DT_QINT32 # type: DT_BFLOAT16 # type: DT_UINT16 # type: DT_COMPLEX128 # type: DT_HALF # type: DT_UINT32 # type: DT_UINT64 # type: DT_BOOL # } # } # } # } # op { # name: "ZerosLike" # input_arg { # name: "x" # type_attr: "T" # } # output_arg { # name: "y" # type_attr: "T" # } # attr { # name: "T" # type: "type" # } # } _op_def_lib = _InitOpDefLibrary(b"\nm\n\023BatchMatrixBandPart\022\n\n\005input\"\001T\022\r\n\tnum_lower\030\t\022\r\n\tnum_upper\030\t\032\t\n\004band\"\001T\"\t\n\001T\022\004typeB\026\010\016\022\022Use MatrixBandPart\nL\n\017BatchMatrixDiag\022\r\n\010diagonal\"\001T\032\013\n\006output\"\001T\"\t\n\001T\022\004typeB\022\010\016\022\016Use MatrixDiag\nS\n\023BatchMatrixDiagPart\022\n\n\005input\"\001T\032\r\n\010diagonal\"\001T\"\t\n\001T\022\004typeB\026\010\016\022\022Use MatrixDiagPart\n^\n\022BatchMatrixSetDiag\022\n\n\005input\"\001T\022\r\n\010diagonal\"\001T\032\013\n\006output\"\001T\"\t\n\001T\022\004typeB\025\010\016\022\021Use MatrixSetDiag\nr\n\014BatchToSpace\022\n\n\005input\"\001T\022\r\n\005crops\"\004Tidx\032\013\n\006output\"\001T\"\t\n\001T\022\004type\"\025\n\nblock_size\022\003int(\0010\002\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\n\240\001\n\016BatchToSpaceND\022\n\n\005input\"\001T\022\033\n\013block_shape\"\014Tblock_shape\022\017\n\005crops\"\006Tcrops\032\013\n\006output\"\001T\"\t\n\001T\022\004type\" \n\014Tblock_shape\022\004type\032\0020\003:\006\n\0042\002\003\t\"\032\n\006Tcrops\022\004type\032\0020\003:\006\n\0042\002\003\t\np\n\007Bitcast\022\n\n\005input\"\001T\032\016\n\006output\"\004type\"\"\n\001T\022\004type:\027\n\0252\023\016\023\001\002\t\003\004\021\026\027\006\005\010\022\013\014\017\020\r\"%\n\004type\022\004type:\027\n\0252\023\016\023\001\002\t\003\004\021\026\027\006\005\010\022\013\014\017\020\r\nA\n\rBroadcastArgs\022\007\n\002s0\"\001T\022\007\n\002s1\"\001T\032\007\n\002r0\"\001T\"\025\n\001T\022\004type\032\0020\003:\006\n\0042\002\003\t\nR\n\025BroadcastGradientArgs\022\007\n\002s0\"\001T\022\007\n\002s1\"\001T\032\007\n\002r0\"\001T\032\007\n\002r1\"\001T\"\025\n\001T\022\004type\032\0020\003:\006\n\0042\002\003\t\nZ\n\013BroadcastTo\022\n\n\005input\"\001T\022\r\n\005shape\"\004Tidx\032\013\n\006output\"\001T\"\t\n\001T\022\004type\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\nQ\n\rCheckNumerics\022\013\n\006tensor\"\001T\032\013\n\006output\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\016\023\001\002\"\021\n\007message\022\006string\nN\n\006Concat\022\016\n\nconcat_dim\030\003\022\016\n\006values\"\001T*\001N\032\013\n\006output\"\001T\"\014\n\001N\022\003int(\0010\002\"\t\n\001T\022\004type\nI\n\014ConcatOffset\022\016\n\nconcat_dim\030\003\022\014\n\005shape\030\003*\001N\032\r\n\006offset\030\003*\001N\"\014\n\001N\022\003int(\0010\002\nh\n\010ConcatV2\022\016\n\006values\"\001T*\001N\022\014\n\004axis\"\004Tidx\032\013\n\006output\"\001T\"\014\n\001N\022\003int(\0010\002\"\t\n\001T\022\004type\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\nY\n\022ConjugateTranspose\022\006\n\001x\"\001T\022\r\n\004perm\"\005Tperm\032\006\n\001y\"\001T\"\t\n\001T\022\004type\"\031\n\005Tperm\022\004type\032\0020\003:\006\n\0042\002\003\t\n8\n\005Const\032\017\n\006output\"\005dtype\"\017\n\005value\022\006tensor\"\r\n\005dtype\022\004type\n>\n\025DebugGradientIdentity\022\n\n\005input\"\001T\032\013\n\006output\"\001T\"\t\n\001T\022\004type\230\001\001\nG\n\030DebugGradientRefIdentity\022\r\n\005input\"\001T\200\001\001\032\016\n\006output\"\001T\200\001\001\"\t\n\001T\022\004type\230\001\001\n(\n\010DeepCopy\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\t\n\001T\022\004type\210\001\001\n\205\001\n\014DepthToSpace\022\n\n\005input\"\001T\032\013\n\006output\"\001T\"\t\n\001T\022\004type\"\025\n\nblock_size\022\003int(\0010\002\":\n\013data_format\022\006string\032\006\022\004NHWC:\033\n\031\022\004NHWC\022\004NCHW\022\013NCHW_VECT_C\n\235\001\n\nDequantize\022\n\n\005input\"\001T\022\r\n\tmin_range\030\001\022\r\n\tmax_range\030\001\032\n\n\006output\030\001\"\024\n\001T\022\004type:\t\n\0072\005\013\014\r\017\020\"C\n\004mode\022\006string\032\016\022\014MIN_COMBINED:#\n!\022\014MIN_COMBINED\022\tMIN_FIRST\022\006SCALED\n;\n\004Diag\022\r\n\010diagonal\"\001T\032\013\n\006output\"\001T\"\027\n\001T\022\004type:\014\n\n2\010\016\023\001\002\003\t\010\022\n>\n\010DiagPart\022\n\n\005input\"\001T\032\r\n\010diagonal\"\001T\"\027\n\001T\022\004type:\014\n\n2\010\016\023\001\002\003\t\010\022\n\271\001\n\014EditDistance\022\026\n\022hypothesis_indices\030\t\022\026\n\021hypothesis_values\"\001T\022\024\n\020hypothesis_shape\030\t\022\021\n\rtruth_indices\030\t\022\021\n\014truth_values\"\001T\022\017\n\013truth_shape\030\t\032\n\n\006output\030\001\"\025\n\tnormalize\022\004bool\032\002(\001\"\t\n\001T\022\004type\nG\n\005Empty\022\t\n\005shape\030\003\032\017\n\006output\"\005dtype\"\r\n\005dtype\022\004type\"\020\n\004init\022\004bool\032\002(\000\210\001\001\nA\n\013EnsureShape\022\n\n\005input\"\001T\032\013\n\006output\"\001T\"\016\n\005shape\022\005shape\"\t\n\001T\022\004type\nW\n\nExpandDims\022\n\n\005input\"\001T\022\013\n\003dim\"\004Tdim\032\013\n\006output\"\001T\"\t\n\001T\022\004type\"\030\n\004Tdim\022\004type\032\0020\003:\006\n\0042\002\003\t\n\274\001\n\023ExtractImagePatches\022\013\n\006images\"\001T\032\014\n\007patches\"\001T\"\027\n\006ksizes\022\tlist(int)(\0010\004\"\030\n\007strides\022\tlist(int)(\0010\004\"\026\n\005rates\022\tlist(int)(\0010\004\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\"\"\n\007padding\022\006string:\017\n\r\022\004SAME\022\005VALID\n\244\001\n\024ExtractVolumePatches\022\n\n\005input\"\001T\032\014\n\007patches\"\001T\"\027\n\006ksizes\022\tlist(int)(\0010\005\"\030\n\007strides\022\tlist(int)(\0010\005\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\"\"\n\007padding\022\006string:\017\n\r\022\004SAME\022\005VALID\n\213\001\n\027FakeQuantWithMinMaxArgs\022\n\n\006inputs\030\001\032\013\n\007outputs\030\001\"\023\n\003min\022\005float\032\005%\000\000\300\300\"\023\n\003max\022\005float\032\005%\000\000\300@\"\023\n\010num_bits\022\003int\032\002\030\010\"\030\n\014narrow_range\022\004bool\032\002(\000\n\244\001\n\037FakeQuantWithMinMaxArgsGradient\022\r\n\tgradients\030\001\022\n\n\006inputs\030\001\032\r\n\tbackprops\030\001\"\023\n\003min\022\005float\032\005%\000\000\300\300\"\023\n\003max\022\005float\032\005%\000\000\300@\"\023\n\010num_bits\022\003int\032\002\030\010\"\030\n\014narrow_range\022\004bool\032\002(\000\ns\n\027FakeQuantWithMinMaxVars\022\n\n\006inputs\030\001\022\007\n\003min\030\001\022\007\n\003max\030\001\032\013\n\007outputs\030\001\"\023\n\010num_bits\022\003int\032\002\030\010\"\030\n\014narrow_range\022\004bool\032\002(\000\n\302\001\n\037FakeQuantWithMinMaxVarsGradient\022\r\n\tgradients\030\001\022\n\n\006inputs\030\001\022\007\n\003min\030\001\022\007\n\003max\030\001\032\027\n\023backprops_wrt_input\030\001\032\024\n\020backprop_wrt_min\030\001\032\024\n\020backprop_wrt_max\030\001\"\023\n\010num_bits\022\003int\032\002\030\010\"\030\n\014narrow_range\022\004bool\032\002(\000\n}\n!FakeQuantWithMinMaxVarsPerChannel\022\n\n\006inputs\030\001\022\007\n\003min\030\001\022\007\n\003max\030\001\032\013\n\007outputs\030\001\"\023\n\010num_bits\022\003int\032\002\030\010\"\030\n\014narrow_range\022\004bool\032\002(\000\n\314\001\n)FakeQuantWithMinMaxVarsPerChannelGradient\022\r\n\tgradients\030\001\022\n\n\006inputs\030\001\022\007\n\003min\030\001\022\007\n\003max\030\001\032\027\n\023backprops_wrt_input\030\001\032\024\n\020backprop_wrt_min\030\001\032\024\n\020backprop_wrt_max\030\001\"\023\n\010num_bits\022\003int\032\002\030\010\"\030\n\014narrow_range\022\004bool\032\002(\000\n^\n\004Fill\022\022\n\004dims\"\nindex_type\022\n\n\005value\"\001T\032\013\n\006output\"\001T\"\t\n\001T\022\004type\"\036\n\nindex_type\022\004type\032\0020\003:\006\n\0042\002\003\t\n\214\001\n\006Gather\022\021\n\006params\"\007Tparams\022\023\n\007indices\"\010Tindices\032\021\n\006output\"\007Tparams\"\034\n\020validate_indices\022\004bool\032\002(\001\"\017\n\007Tparams\022\004type\"\030\n\010Tindices\022\004type:\006\n\0042\002\003\t\np\n\010GatherNd\022\021\n\006params\"\007Tparams\022\023\n\007indices\"\010Tindices\032\021\n\006output\"\007Tparams\"\017\n\007Tparams\022\004type\"\030\n\010Tindices\022\004type:\006\n\0042\002\003\t\n\226\001\n\010GatherV2\022\021\n\006params\"\007Tparams\022\023\n\007indices\"\010Tindices\022\r\n\004axis\"\005Taxis\032\021\n\006output\"\007Tparams\"\017\n\007Tparams\022\004type\"\030\n\010Tindices\022\004type:\006\n\0042\002\003\t\"\025\n\005Taxis\022\004type:\006\n\0042\002\003\t\n7\n\016GuaranteeConst\022\n\n\005input\"\001T\032\013\n\006output\"\001T\"\t\n\001T\022\004type\210\001\001\n.\n\010Identity\022\n\n\005input\"\001T\032\013\n\006output\"\001T\"\t\n\001T\022\004type\n9\n\tIdentityN\022\n\n\005input2\001T\032\013\n\006output2\001T\"\023\n\001T\022\nlist(type)(\0010\001\n^\n\016ImmutableConst\032\017\n\006tensor\"\005dtype\"\r\n\005dtype\022\004type\"\016\n\005shape\022\005shape\"\034\n\022memory_region_name\022\006string\n6\n\nInplaceAdd\022\006\n\001x\"\001T\022\005\n\001i\030\003\022\006\n\001v\"\001T\032\006\n\001y\"\001T\"\t\n\001T\022\004type\n6\n\nInplaceSub\022\006\n\001x\"\001T\022\005\n\001i\030\003\022\006\n\001v\"\001T\032\006\n\001y\"\001T\"\t\n\001T\022\004type\n9\n\rInplaceUpdate\022\006\n\001x\"\001T\022\005\n\001i\030\003\022\006\n\001v\"\001T\032\006\n\001y\"\001T\"\t\n\001T\022\004type\n:\n\021InvertPermutation\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\025\n\001T\022\004type\032\0020\003:\006\n\0042\002\003\t\n\\\n\010ListDiff\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\010\n\003out\"\001T\032\016\n\003idx\"\007out_idx\"\t\n\001T\022\004type\"\033\n\007out_idx\022\004type\032\0020\003:\006\n\0042\002\003\t\nj\n\nLowerBound\022\022\n\rsorted_inputs\"\001T\022\013\n\006values\"\001T\032\022\n\006output\"\010out_type\"\t\n\001T\022\004type\"\034\n\010out_type\022\004type\032\0020\003:\006\n\0042\002\003\t\nx\n\016MatrixBandPart\022\n\n\005input\"\001T\022\023\n\tnum_lower\"\006Tindex\022\023\n\tnum_upper\"\006Tindex\032\t\n\004band\"\001T\"\t\n\001T\022\004type\"\032\n\006Tindex\022\004type\032\0020\t:\006\n\0042\002\003\t\n3\n\nMatrixDiag\022\r\n\010diagonal\"\001T\032\013\n\006output\"\001T\"\t\n\001T\022\004type\n6\n\016MatrixDiagPart\022\n\n\005input\"\001T\032\r\n\010diagonal\"\001T\"\t\n\001T\022\004type\nB\n\rMatrixSetDiag\022\n\n\005input\"\001T\022\r\n\010diagonal\"\001T\032\013\n\006output\"\001T\"\t\n\001T\022\004type\n\215\001\n\tMirrorPad\022\n\n\005input\"\001T\022\025\n\010paddings\"\tTpaddings\032\013\n\006output\"\001T\"\t\n\001T\022\004type\"\035\n\tTpaddings\022\004type\032\0020\003:\006\n\0042\002\003\t\"&\n\004mode\022\006string:\026\n\024\022\007REFLECT\022\tSYMMETRIC\n\221\001\n\rMirrorPadGrad\022\n\n\005input\"\001T\022\025\n\010paddings\"\tTpaddings\032\013\n\006output\"\001T\"\t\n\001T\022\004type\"\035\n\tTpaddings\022\004type\032\0020\003:\006\n\0042\002\003\t\"&\n\004mode\022\006string:\026\n\024\022\007REFLECT\022\tSYMMETRIC\n\214\001\n\006OneHot\022\r\n\007indices\"\002TI\022\t\n\005depth\030\003\022\r\n\010on_value\"\001T\022\016\n\toff_value\"\001T\032\013\n\006output\"\001T\"\030\n\004axis\022\003int\032\013\030\377\377\377\377\377\377\377\377\377\001\"\t\n\001T\022\004type\"\027\n\002TI\022\004type\032\0020\t:\007\n\0052\003\004\003\t\n8\n\010OnesLike\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\034\n\001T\022\004type:\021\n\0172\r\016\023\001\002\006\004\005\021\003\t\010\022\n\nM\n\004Pack\022\016\n\006values\"\001T*\001N\032\013\n\006output\"\001T\"\014\n\001N\022\003int(\0010\001\"\t\n\001T\022\004type\"\017\n\004axis\022\003int\032\002\030\000\n_\n\003Pad\022\n\n\005input\"\001T\022\025\n\010paddings\"\tTpaddings\032\013\n\006output\"\001T\"\t\n\001T\022\004type\"\035\n\tTpaddings\022\004type\032\0020\003:\006\n\0042\002\003\t\nw\n\005PadV2\022\n\n\005input\"\001T\022\025\n\010paddings\"\tTpaddings\022\024\n\017constant_values\"\001T\032\013\n\006output\"\001T\"\t\n\001T\022\004type\"\035\n\tTpaddings\022\004type\032\0020\003:\006\n\0042\002\003\t\nV\n\016ParallelConcat\022\016\n\006values\"\001T*\001N\032\013\n\006output\"\001T\"\014\n\001N\022\003int(\0010\001\"\t\n\001T\022\004type\"\016\n\005shape\022\005shape\nC\n\013Placeholder\032\017\n\006output\"\005dtype\"\r\n\005dtype\022\004type\"\024\n\005shape\022\005shape\032\004:\002\030\001\nw\n\rPlaceholderV2\032\017\n\006output\"\005dtype\"\r\n\005dtype\022\004type\"\016\n\005shape\022\005shapeB6\010\027\0222Placeholder now behaves the same as PlaceholderV2.\nX\n\026PlaceholderWithDefault\022\016\n\005input\"\005dtype\032\017\n\006output\"\005dtype\"\r\n\005dtype\022\004type\"\016\n\005shape\022\005shape\nL\n\017PreventGradient\022\n\n\005input\"\001T\032\013\n\006output\"\001T\"\t\n\001T\022\004type\"\025\n\007message\022\006string\032\002\022\000\n\354\001\n\025QuantizeAndDequantize\022\n\n\005input\"\001T\032\013\n\006output\"\001T\"\030\n\014signed_input\022\004bool\032\002(\001\"\023\n\010num_bits\022\003int\032\002\030\010\"\027\n\013range_given\022\004bool\032\002(\000\"\031\n\tinput_min\022\005float\032\005%\000\000\000\000\"\031\n\tinput_max\022\005float\032\005%\000\000\000\000\"\023\n\001T\022\004type:\010\n\0062\004\016\023\001\002B\'\010\026\022#Replaced by QuantizeAndDequantizeV2\n\360\001\n\027QuantizeAndDequantizeV2\022\n\n\005input\"\001T\022\016\n\tinput_min\"\001T\022\016\n\tinput_max\"\001T\032\013\n\006output\"\001T\"\030\n\014signed_input\022\004bool\032\002(\001\"\023\n\010num_bits\022\003int\032\002\030\010\"\027\n\013range_given\022\004bool\032\002(\000\"\023\n\001T\022\004type:\010\n\0062\004\016\023\001\002\"?\n\nround_mode\022\006string\032\016\022\014HALF_TO_EVEN:\031\n\027\022\014HALF_TO_EVEN\022\007HALF_UP\n\250\001\n\027QuantizeAndDequantizeV3\022\n\n\005input\"\001T\022\016\n\tinput_min\"\001T\022\016\n\tinput_max\"\001T\022\014\n\010num_bits\030\003\032\013\n\006output\"\001T\"\030\n\014signed_input\022\004bool\032\002(\001\"\027\n\013range_given\022\004bool\032\002(\001\"\023\n\001T\022\004type:\010\n\0062\004\016\023\001\002\n\221\002\n\nQuantizeV2\022\t\n\005input\030\001\022\r\n\tmin_range\030\001\022\r\n\tmax_range\030\001\032\013\n\006output\"\001T\032\016\n\noutput_min\030\001\032\016\n\noutput_max\030\001\"\024\n\001T\022\004type:\t\n\0072\005\013\014\r\017\020\"C\n\004mode\022\006string\032\016\022\014MIN_COMBINED:#\n!\022\014MIN_COMBINED\022\tMIN_FIRST\022\006SCALED\"R\n\nround_mode\022\006string\032\025\022\023HALF_AWAY_FROM_ZERO:%\n#\022\023HALF_AWAY_FROM_ZERO\022\014HALF_TO_EVEN\n\236\001\n\017QuantizedConcat\022\016\n\nconcat_dim\030\003\022\016\n\006values\"\001T*\001N\022\021\n\ninput_mins\030\001*\001N\022\022\n\013input_maxes\030\001*\001N\032\013\n\006output\"\001T\032\016\n\noutput_min\030\001\032\016\n\noutput_max\030\001\"\014\n\001N\022\003int(\0010\002\"\t\n\001T\022\004type\n\205\002\n\025QuantizedInstanceNorm\022\006\n\001x\"\001T\022\t\n\005x_min\030\001\022\t\n\005x_max\030\001\032\006\n\001y\"\001T\032\t\n\005y_min\030\001\032\t\n\005y_max\030\001\"\024\n\001T\022\004type:\t\n\0072\005\013\014\r\017\020\"\036\n\022output_range_given\022\004bool\032\002(\000\"\033\n\013given_y_min\022\005float\032\005%\000\000\000\000\"\033\n\013given_y_max\022\005float\032\005%\000\000\000\000\" \n\020variance_epsilon\022\005float\032\005%\254\305\'7\"\036\n\016min_separation\022\005float\032\005%o\022\203:\n\242\001\n\020QuantizedReshape\022\013\n\006tensor\"\001T\022\017\n\005shape\"\006Tshape\022\r\n\tinput_min\030\001\022\r\n\tinput_max\030\001\032\013\n\006output\"\001T\032\016\n\noutput_min\030\001\032\016\n\noutput_max\030\001\"\t\n\001T\022\004type\"\032\n\006Tshape\022\004type\032\0020\003:\006\n\0042\002\003\t\n)\n\004Rank\022\n\n\005input\"\001T\032\n\n\006output\030\003\"\t\n\001T\022\004type\n:\n\013RefIdentity\022\r\n\005input\"\001T\200\001\001\032\016\n\006output\"\001T\200\001\001\"\t\n\001T\022\004type\230\001\001\n[\n\007Reshape\022\013\n\006tensor\"\001T\022\017\n\005shape\"\006Tshape\032\013\n\006output\"\001T\"\t\n\001T\022\004type\"\032\n\006Tshape\022\004type\032\0020\003:\006\n\0042\002\003\t\n\203\002\n\032ResourceStridedSliceAssign\022\007\n\003ref\030\024\022\016\n\005begin\"\005Index\022\014\n\003end\"\005Index\022\020\n\007strides\"\005Index\022\n\n\005value\"\001T\"\t\n\001T\022\004type\"\025\n\005Index\022\004type:\006\n\0042\002\003\t\"\025\n\nbegin_mask\022\003int\032\002\030\000\"\023\n\010end_mask\022\003int\032\002\030\000\"\030\n\rellipsis_mask\022\003int\032\002\030\000\"\030\n\rnew_axis_mask\022\003int\032\002\030\000\"\033\n\020shrink_axis_mask\022\003int\032\002\030\000\210\001\001\nK\n\007Reverse\022\013\n\006tensor\"\001T\022\010\n\004dims\030\n\032\013\n\006output\"\001T\"\034\n\001T\022\004type:\021\n\0172\r\004\006\021\005\003\t\n\023\001\002\010\022\007\n\212\001\n\017ReverseSequence\022\n\n\005input\"\001T\022\023\n\013seq_lengths\"\004Tlen\032\013\n\006output\"\001T\"\016\n\007seq_dim\022\003int\"\024\n\tbatch_dim\022\003int\032\002\030\000\"\t\n\001T\022\004type\"\030\n\004Tlen\022\004type\032\0020\t:\006\n\0042\002\003\t\nl\n\tReverseV2\022\013\n\006tensor\"\001T\022\014\n\004axis\"\004Tidx\032\013\n\006output\"\001T\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\"\035\n\001T\022\004type:\022\n\0202\016\004\006\021\005\003\t\n\016\023\001\002\010\022\007\ns\n\tScatterNd\022\023\n\007indices\"\010Tindices\022\014\n\007updates\"\001T\022\021\n\005shape\"\010Tindices\032\013\n\006output\"\001T\"\t\n\001T\022\004type\"\030\n\010Tindices\022\004type:\006\n\0042\002\003\t\n\222\001\n\027ScatterNdNonAliasingAdd\022\n\n\005input\"\001T\022\023\n\007indices\"\010Tindices\022\014\n\007updates\"\001T\032\013\n\006output\"\001T\"!\n\001T\022\004type:\026\n\0242\022\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\n\"\030\n\010Tindices\022\004type:\006\n\0042\002\003\t\nP\n\005Shape\022\n\n\005input\"\001T\032\022\n\006output\"\010out_type\"\t\n\001T\022\004type\"\034\n\010out_type\022\004type\032\0020\003:\006\n\0042\002\003\t\ne\n\006ShapeN\022\r\n\005input\"\001T*\001N\032\025\n\006output\"\010out_type*\001N\"\014\n\001N\022\003int(\0010\001\"\t\n\001T\022\004type\"\034\n\010out_type\022\004type\032\0020\003:\006\n\0042\002\003\t\nO\n\004Size\022\n\n\005input\"\001T\032\022\n\006output\"\010out_type\"\t\n\001T\022\004type\"\034\n\010out_type\022\004type\032\0020\003:\006\n\0042\002\003\t\na\n\005Slice\022\n\n\005input\"\001T\022\016\n\005begin\"\005Index\022\r\n\004size\"\005Index\032\013\n\006output\"\001T\"\t\n\001T\022\004type\"\025\n\005Index\022\004type:\006\n\0042\002\003\t\n.\n\010Snapshot\022\n\n\005input\"\001T\032\013\n\006output\"\001T\"\t\n\001T\022\004type\n\177\n\014SpaceToBatch\022\n\n\005input\"\001T\022\025\n\010paddings\"\tTpaddings\032\013\n\006output\"\001T\"\t\n\001T\022\004type\"\035\n\tTpaddings\022\004type\032\0020\003:\006\n\0042\002\003\t\"\025\n\nblock_size\022\003int(\0010\002\n\251\001\n\016SpaceToBatchND\022\n\n\005input\"\001T\022\033\n\013block_shape\"\014Tblock_shape\022\025\n\010paddings\"\tTpaddings\032\013\n\006output\"\001T\"\t\n\001T\022\004type\" \n\014Tblock_shape\022\004type\032\0020\003:\006\n\0042\002\003\t\"\035\n\tTpaddings\022\004type\032\0020\003:\006\n\0042\002\003\t\n\205\001\n\014SpaceToDepth\022\n\n\005input\"\001T\032\013\n\006output\"\001T\"\t\n\001T\022\004type\"\025\n\nblock_size\022\003int(\0010\002\":\n\013data_format\022\006string\032\006\022\004NHWC:\033\n\031\022\004NHWC\022\004NCHW\022\013NCHW_VECT_C\n[\n\005Split\022\r\n\tsplit_dim\030\003\022\n\n\005value\"\001T\032\026\n\006output\"\001T*\tnum_split\"\024\n\tnum_split\022\003int(\0010\001\"\t\n\001T\022\004type\n\213\001\n\006SplitV\022\n\n\005value\"\001T\022\023\n\013size_splits\"\004Tlen\022\r\n\tsplit_dim\030\003\032\026\n\006output\"\001T*\tnum_split\"\024\n\tnum_split\022\003int(\0010\001\"\t\n\001T\022\004type\"\030\n\004Tlen\022\004type\032\0020\t:\006\n\0042\002\003\t\nN\n\007Squeeze\022\n\n\005input\"\001T\032\013\n\006output\"\001T\"\t\n\001T\022\004type\"\037\n\014squeeze_dims\022\tlist(int)\032\002\n\000(\001\n2\n\014StopGradient\022\n\n\005input\"\001T\032\013\n\006output\"\001T\"\t\n\001T\022\004type\n\366\001\n\014StridedSlice\022\n\n\005input\"\001T\022\016\n\005begin\"\005Index\022\014\n\003end\"\005Index\022\020\n\007strides\"\005Index\032\013\n\006output\"\001T\"\t\n\001T\022\004type\"\025\n\005Index\022\004type:\006\n\0042\002\003\t\"\025\n\nbegin_mask\022\003int\032\002\030\000\"\023\n\010end_mask\022\003int\032\002\030\000\"\030\n\rellipsis_mask\022\003int\032\002\030\000\"\030\n\rnew_axis_mask\022\003int\032\002\030\000\"\033\n\020shrink_axis_mask\022\003int\032\002\030\000\n\220\002\n\022StridedSliceAssign\022\013\n\003ref\"\001T\200\001\001\022\016\n\005begin\"\005Index\022\014\n\003end\"\005Index\022\020\n\007strides\"\005Index\022\n\n\005value\"\001T\032\022\n\noutput_ref\"\001T\200\001\001\"\t\n\001T\022\004type\"\025\n\005Index\022\004type:\006\n\0042\002\003\t\"\025\n\nbegin_mask\022\003int\032\002\030\000\"\023\n\010end_mask\022\003int\032\002\030\000\"\030\n\rellipsis_mask\022\003int\032\002\030\000\"\030\n\rnew_axis_mask\022\003int\032\002\030\000\"\033\n\020shrink_axis_mask\022\003int\032\002\030\000\n\207\002\n\020StridedSliceGrad\022\016\n\005shape\"\005Index\022\016\n\005begin\"\005Index\022\014\n\003end\"\005Index\022\020\n\007strides\"\005Index\022\007\n\002dy\"\001T\032\013\n\006output\"\001T\"\t\n\001T\022\004type\"\025\n\005Index\022\004type:\006\n\0042\002\003\t\"\025\n\nbegin_mask\022\003int\032\002\030\000\"\023\n\010end_mask\022\003int\032\002\030\000\"\030\n\rellipsis_mask\022\003int\032\002\030\000\"\030\n\rnew_axis_mask\022\003int\032\002\030\000\"\033\n\020shrink_axis_mask\022\003int\032\002\030\000\nt\n\020TensorScatterAdd\022\013\n\006tensor\"\001T\022\023\n\007indices\"\010Tindices\022\014\n\007updates\"\001T\032\013\n\006output\"\001T\"\t\n\001T\022\004type\"\030\n\010Tindices\022\004type:\006\n\0042\002\003\t\nt\n\020TensorScatterSub\022\013\n\006tensor\"\001T\022\023\n\007indices\"\010Tindices\022\014\n\007updates\"\001T\032\013\n\006output\"\001T\"\t\n\001T\022\004type\"\030\n\010Tindices\022\004type:\006\n\0042\002\003\t\nw\n\023TensorScatterUpdate\022\013\n\006tensor\"\001T\022\023\n\007indices\"\010Tindices\022\014\n\007updates\"\001T\032\013\n\006output\"\001T\"\t\n\001T\022\004type\"\030\n\010Tindices\022\004type:\006\n\0042\002\003\t\nc\n\004Tile\022\n\n\005input\"\001T\022\027\n\tmultiples\"\nTmultiples\032\013\n\006output\"\001T\"\t\n\001T\022\004type\"\036\n\nTmultiples\022\004type\032\0020\003:\006\n\0042\002\003\t\nm\n\010TileGrad\022\n\n\005input\"\001T\022\r\n\tmultiples\030\003\032\013\n\006output\"\001T\"\t\n\001T\022\004typeB.\010\003\022*TileGrad has been replaced with reduce_sum\nP\n\tTranspose\022\006\n\001x\"\001T\022\r\n\004perm\"\005Tperm\032\006\n\001y\"\001T\"\t\n\001T\022\004type\"\031\n\005Tperm\022\004type\032\0020\003:\006\n\0042\002\003\t\nP\n\006Unique\022\006\n\001x\"\001T\032\006\n\001y\"\001T\032\016\n\003idx\"\007out_idx\"\t\n\001T\022\004type\"\033\n\007out_idx\022\004type\032\0020\003:\006\n\0042\002\003\t\n|\n\010UniqueV2\022\006\n\001x\"\001T\022\r\n\004axis\"\005Taxis\032\006\n\001y\"\001T\032\016\n\003idx\"\007out_idx\"\t\n\001T\022\004type\"\031\n\005Taxis\022\004type\032\0020\t:\006\n\0042\002\003\t\"\033\n\007out_idx\022\004type\032\0020\003:\006\n\0042\002\003\t\nl\n\020UniqueWithCounts\022\006\n\001x\"\001T\032\006\n\001y\"\001T\032\016\n\003idx\"\007out_idx\032\020\n\005count\"\007out_idx\"\t\n\001T\022\004type\"\033\n\007out_idx\022\004type\032\0020\003:\006\n\0042\002\003\t\n\230\001\n\022UniqueWithCountsV2\022\006\n\001x\"\001T\022\r\n\004axis\"\005Taxis\032\006\n\001y\"\001T\032\016\n\003idx\"\007out_idx\032\020\n\005count\"\007out_idx\"\t\n\001T\022\004type\"\031\n\005Taxis\022\004type\032\0020\t:\006\n\0042\002\003\t\"\033\n\007out_idx\022\004type\032\0020\003:\006\n\0042\002\003\t\nP\n\006Unpack\022\n\n\005value\"\001T\032\020\n\006output\"\001T*\003num\"\014\n\003num\022\003int(\001\"\t\n\001T\022\004type\"\017\n\004axis\022\003int\032\002\030\000\nW\n\014UnravelIndex\022\017\n\007indices\"\004Tidx\022\014\n\004dims\"\004Tidx\032\016\n\006output\"\004Tidx\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\nj\n\nUpperBound\022\022\n\rsorted_inputs\"\001T\022\013\n\006values\"\001T\032\022\n\006output\"\010out_type\"\t\n\001T\022\004type\"\034\n\010out_type\022\004type\032\0020\003:\006\n\0042\002\003\t\nE\n\005Where\022\n\n\005input\"\001T\032\t\n\005index\030\t\"%\n\001T\022\004type\032\0020\n:\026\n\0242\022\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\n\n&\n\tZerosLike\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\t\n\001T\022\004type")
[ "yuxuan@mit.edu" ]
yuxuan@mit.edu
6cab586c5fe18fc1b1339548aefc8d5ae4987248
77191e8a70c99048e319c35446f8538cb2242618
/train_val.py
d7bac05ead774ccb5c5d03bf3e8e93d806ab3c8d
[]
no_license
alexander-rakhlin/embl_project
d5a19ff25dc53ce82b48bb107c379b5ad9a49cd5
de3b1accf888ce4ecbb3f060182d39b095e42f65
refs/heads/master
2020-07-23T19:27:15.422250
2019-12-04T16:18:08
2019-12-04T16:18:08
137,078,576
0
0
null
null
null
null
UTF-8
Python
false
false
5,396
py
from datagen import Iterator from models import m46c, r34, xcpt import numpy as np import pandas as pd import pickle from pathlib import Path from keras.callbacks import ModelCheckpoint, CSVLogger from utils import double_division_tracks, curated_tracks ROOT = Path('data/Timelapse_2019') INTENSITY_COLS = ['GFP_20', 'Cy3_20'] CHANNEL_ROOTS = [ROOT / d for d in ['DAPI', 'BF']] TARGET_COLUMN = 'sq20_cls2x2' CROP_SZ = 48 LR = 1e-3 MODEL = r34 CLASSES = 4 CHANNELS = 2 BATCH_SIZE = 32 EPOCHS = 20 VERBOSE = False INIT_WEIGHTS = None # 'checkpoints/checkpoint.r34.sz48.03-0.68.hdf5' MODEL_CHECKPOINT = f'checkpoints/checkpoint.{MODEL.__name__}.sz{CROP_SZ}.{{epoch:02d}}-{{val_acc:.2f}}.hdf5' CSV_LOGGER = CSVLogger(f'logs/{MODEL.__name__}.sz{CROP_SZ}.log', append=True) FRAMES = range(0, 200, 1) VAL_TRACKS = list(double_division_tracks) TRAIN_TRACKS = [t for t in curated_tracks if t not in VAL_TRACKS] CELL_DF = pd.read_csv(ROOT / 'statistics_mean_std.csv') DESCRIPTORS_PATH = ROOT / f'descriptors_all.{MODEL.__name__}.sz{CROP_SZ}.pkl' def filter_cells(cell_df, frames='all', tracks='all'): if frames != 'all': cell_df = cell_df.loc[cell_df['FRAME'].isin(frames)] if tracks != 'all': cell_df = cell_df.loc[cell_df['TRACK_ID'].isin(tracks)] return cell_df def train_test_split(cell_df, frames, train_tracks, test_tracks): train_df = cell_df.loc[cell_df['FRAME'].isin(frames) & cell_df['TRACK_ID'].isin(train_tracks)] test_df = cell_df.loc[cell_df['FRAME'].isin(frames) & cell_df['TRACK_ID'].isin(test_tracks)] return train_df, test_df def train(): train_df, val_df = train_test_split(CELL_DF, FRAMES, TRAIN_TRACKS, VAL_TRACKS) model = MODEL(channels=CHANNELS, lr=LR, include_top=True, classes=CLASSES, weights=INIT_WEIGHTS) train_iterator = Iterator(CHANNEL_ROOTS, train_df, CROP_SZ, shuffle=True, seed=None, infinite_loop=True, batch_size=BATCH_SIZE, classes=CLASSES, target_column=TARGET_COLUMN, intensity_cols=INTENSITY_COLS, output_intensities=False, output_df_index=False, verbose=VERBOSE, gen_id='train') val_iterator = Iterator(CHANNEL_ROOTS, val_df, CROP_SZ, shuffle=False, seed=None, infinite_loop=False, batch_size=BATCH_SIZE, classes=CLASSES, target_column=TARGET_COLUMN, intensity_cols=INTENSITY_COLS, output_intensities=False, output_df_index=False, verbose=VERBOSE, gen_id='val') x, y = zip(*val_iterator) x = np.concatenate(x) y = np.concatenate(y) validation_data = x, y callbacks = [ModelCheckpoint(MODEL_CHECKPOINT, monitor='val_acc', save_best_only=True), CSV_LOGGER] model.fit_generator( train_iterator, steps_per_epoch=len(train_df) // BATCH_SIZE, epochs=EPOCHS, validation_data=validation_data, validation_steps=len(val_df) // BATCH_SIZE, workers=3, callbacks=callbacks ) def predict(weights_path, df, descriptors_path): model = MODEL(channels=CHANNELS, include_top=True, classes=CLASSES, weights=weights_path) val_iterator = Iterator(CHANNEL_ROOTS, df, CROP_SZ, shuffle=False, seed=None, infinite_loop=False, batch_size=BATCH_SIZE, classes=CLASSES, target_column=TARGET_COLUMN, intensity_cols=INTENSITY_COLS, output_intensities=True, output_df_index=True, verbose=True, gen_id='val') x, y, intensities, df_index = zip(*val_iterator) x = np.concatenate(x) y = np.concatenate(y) intensities = np.concatenate(intensities) df_index = np.concatenate(df_index) descriptors = model.predict(x, batch_size=BATCH_SIZE) with open(descriptors_path, 'wb') as f: pickle.dump((y, descriptors, intensities, df_index), f) def predict_batched(weights_path, df, descriptors_path): model = MODEL(channels=CHANNELS, include_top=True, classes=CLASSES, weights=weights_path) val_iterator = Iterator(CHANNEL_ROOTS, df, CROP_SZ, shuffle=False, seed=None, infinite_loop=False, batch_size=BATCH_SIZE, classes=CLASSES, target_column=TARGET_COLUMN, intensity_cols=INTENSITY_COLS, output_intensities=True, output_df_index=True, verbose=True, gen_id='val') y = [] df_index = [] intensities = [] descriptors = [] for x_, y_, intensities_, df_index_ in val_iterator: descriptors_ = model.predict_on_batch(x_) y.extend(y_) df_index.extend(df_index_) intensities.extend(intensities_) descriptors.extend(descriptors_) y = np.array(y) df_index = np.array(df_index) intensities = np.array(intensities) descriptors = np.array(descriptors) with open(descriptors_path, 'wb') as f: pickle.dump((y, descriptors, intensities, df_index), f) if __name__ == '__main__': # train() # _, pred_df = train_test_split(CELL_DF, frames=range(200), train_tracks=TRAIN_TRACKS, test_tracks=VAL_TRACKS) pred_df = filter_cells(CELL_DF, frames='all') predict_batched('checkpoints/checkpoint.r34.sz48.03-0.73.hdf5', pred_df, DESCRIPTORS_PATH)
[ "rakhlin@gmx.net" ]
rakhlin@gmx.net
63d551ef4891915d2717ca602c39ab55c4d0e968
56b817cae5fdace44692046a3e2615dba611bb56
/WebcamSekilAlgilama.py
ae74a95b14e9cc6c333878733a123b0afa0af746
[]
no_license
ozgurnrttn/OpenCV
b2b29239502780df882626fc37e5c8ddc3d90f7f
c161bf7cbbcfc2ab996a85a1660a8e3c727c0e73
refs/heads/master
2023-05-04T08:12:21.474066
2021-05-20T13:10:07
2021-05-20T13:10:07
369,151,138
0
0
null
null
null
null
UTF-8
Python
false
false
2,783
py
import cv2 import numpy as np def nothing(x): pass cap = cv2.VideoCapture(0) cv2.namedWindow("Settings") cv2.createTrackbar("Lower-Hue","Settings",0,180,nothing) cv2.createTrackbar("Lower-Saturation","Settings",0,255,nothing) cv2.createTrackbar("Lower-Value","Settings",0,255,nothing) cv2.createTrackbar("Upper-Hue","Settings",0,180,nothing) cv2.createTrackbar("Upper-Saturation","Settings",0,255,nothing) cv2.createTrackbar("Upper-Value","Settings",0,255,nothing) cv2.setTrackbarPos("Upper-Hue", "Settings", 180) cv2.setTrackbarPos("Upper-Saturation", "Settings", 255) cv2.setTrackbarPos("Upper-Value", "Settings", 255) font = cv2.FONT_HERSHEY_SIMPLEX while 1: ret,frame = cap.read() frame = cv2.flip(frame,1) hsv = cv2.cvtColor(frame,cv2.COLOR_BGR2HSV) lh = cv2.getTrackbarPos("Lower-Hue","Settings") ls = cv2.getTrackbarPos("Lower-Saturation","Settings") lv = cv2.getTrackbarPos("Lower-Value","Settings") uh = cv2.getTrackbarPos("Upper-Hue","Settings") us = cv2.getTrackbarPos("Upper-Saturation","Settings") uv = cv2.getTrackbarPos("Upper-Value","Settings") lower_color = np.array([lh,ls,lv]) upper_color = np.array([uh,us,uv]) mask = cv2.inRange(hsv,lower_color,upper_color) kernel = np.ones((5,5),np.uint8) mask = cv2.erode(mask,kernel) contours,_ = cv2.findContours(mask,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) for cnt in contours: area = cv2.contourArea(cnt) epsilon = 0.02*cv2.arcLength(cnt,True) approx = cv2.approxPolyDP(cnt,epsilon,True) x = approx.ravel()[0] y = approx.ravel()[1] if area > 400: cv2.drawContours(frame,[approx],0,(0,0,0),5) if len(approx) == 3: cv2.putText(frame, "Triangle",(x,y),font,1,(0,0,0)) elif len(approx) == 4: cv2.putText(frame, "Rectangle",(x,y),font,1,(0,0,0)) elif len(approx) == 5: cv2.putText(frame, "Pentagon",(x,y),font,1,(0,0,0)) elif len(approx) == 6: cv2.putText(frame, "Hexagon",(x,y),font,1,(0,0,0)) elif len(approx) == 7: cv2.putText(frame, "Heptagon",(x,y),font,1,(0,0,0)) elif len(approx) == 8: cv2.putText(frame, "Octagon",(x,y),font,1,(0,0,0)) elif len(approx) == 9: cv2.putText(frame, "Nanogon",(x,y),font,1,(0,0,0)) elif len(approx) == 10: cv2.putText(frame, "Decaton",(x,y),font,1,(0,0,0)) else: cv2.putText(frame, "Circle",(x,y),font,1,(0,0,0)) cv2.imshow("frame",frame) cv2.imshow("mask",mask) if cv2.waitKey(3) & 0xFF == ord('q'): break cap.release() cv2.destroyAllWindows()
[ "ozturk96.zrzr@gmail.com" ]
ozturk96.zrzr@gmail.com
3be06eb873cdd1760ff6e5f63aa67790705e4936
70cdf0741a22c678401a306229003bf036ffe5a6
/ocbind/interfaces/interface/routed_vlan/ipv4/state/counters/__init__.py
fc80550902dca33ef1415b13bb6c12c3d63fa5ce
[]
no_license
zsblevins/nanog81-hackathon
5001e034339d6b0c6452ae2474f06916bcd715cf
1b64fd207dd69837f947094fbd6d6c1cea3a1070
refs/heads/main
2023-03-03T09:39:28.460000
2021-02-15T13:41:38
2021-02-15T13:41:38
336,698,856
2
0
null
null
null
null
UTF-8
Python
false
false
42,316
py
# -*- coding: utf-8 -*- from operator import attrgetter from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType from pyangbind.lib.yangtypes import RestrictedClassType from pyangbind.lib.yangtypes import TypedListType from pyangbind.lib.yangtypes import YANGBool from pyangbind.lib.yangtypes import YANGListType from pyangbind.lib.yangtypes import YANGDynClass from pyangbind.lib.yangtypes import ReferenceType from pyangbind.lib.base import PybindBase from collections import OrderedDict from decimal import Decimal from bitarray import bitarray import six # PY3 support of some PY2 keywords (needs improved) if six.PY3: import builtins as __builtin__ long = int elif six.PY2: import __builtin__ class counters(PybindBase): """ This class was auto-generated by the PythonClass plugin for PYANG from YANG module openconfig-interfaces - based on the path /interfaces/interface/routed-vlan/ipv4/state/counters. Each member element of the container is represented as a class variable - with a specific YANG type. YANG Description: Packet and byte counters for IP transmission and reception for the address family. """ __slots__ = ('_path_helper', '_extmethods', '__in_pkts','__in_octets','__in_error_pkts','__in_forwarded_pkts','__in_forwarded_octets','__in_discarded_pkts','__out_pkts','__out_octets','__out_error_pkts','__out_forwarded_pkts','__out_forwarded_octets','__out_discarded_pkts',) _yang_name = 'counters' _pybind_generated_by = 'container' def __init__(self, *args, **kwargs): self._path_helper = False self._extmethods = False self.__in_pkts = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="in-pkts", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='oc-yang:counter64', is_config=False) self.__in_octets = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="in-octets", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='oc-yang:counter64', is_config=False) self.__in_error_pkts = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="in-error-pkts", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='oc-yang:counter64', is_config=False) self.__in_forwarded_pkts = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="in-forwarded-pkts", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='oc-yang:counter64', is_config=False) self.__in_forwarded_octets = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="in-forwarded-octets", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='oc-yang:counter64', is_config=False) self.__in_discarded_pkts = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="in-discarded-pkts", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='oc-yang:counter64', is_config=False) self.__out_pkts = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="out-pkts", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='oc-yang:counter64', is_config=False) self.__out_octets = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="out-octets", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='oc-yang:counter64', is_config=False) self.__out_error_pkts = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="out-error-pkts", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='oc-yang:counter64', is_config=False) self.__out_forwarded_pkts = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="out-forwarded-pkts", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='oc-yang:counter64', is_config=False) self.__out_forwarded_octets = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="out-forwarded-octets", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='oc-yang:counter64', is_config=False) self.__out_discarded_pkts = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="out-discarded-pkts", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='oc-yang:counter64', is_config=False) load = kwargs.pop("load", None) if args: if len(args) > 1: raise TypeError("cannot create a YANG container with >1 argument") all_attr = True for e in self._pyangbind_elements: if not hasattr(args[0], e): all_attr = False break if not all_attr: raise ValueError("Supplied object did not have the correct attributes") for e in self._pyangbind_elements: nobj = getattr(args[0], e) if nobj._changed() is False: continue setmethod = getattr(self, "_set_%s" % e) if load is None: setmethod(getattr(args[0], e)) else: setmethod(getattr(args[0], e), load=load) def _path(self): if hasattr(self, "_parent"): return self._parent._path()+[self._yang_name] else: return ['interfaces', 'interface', 'routed-vlan', 'ipv4', 'state', 'counters'] def _get_in_pkts(self): """ Getter method for in_pkts, mapped from YANG variable /interfaces/interface/routed_vlan/ipv4/state/counters/in_pkts (oc-yang:counter64) YANG Description: The total number of IP packets received for the specified address family, including those received in error """ return self.__in_pkts def _set_in_pkts(self, v, load=False): """ Setter method for in_pkts, mapped from YANG variable /interfaces/interface/routed_vlan/ipv4/state/counters/in_pkts (oc-yang:counter64) If this variable is read-only (config: false) in the source YANG file, then _set_in_pkts is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_in_pkts() directly. YANG Description: The total number of IP packets received for the specified address family, including those received in error """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="in-pkts", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='oc-yang:counter64', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """in_pkts must be of a type compatible with oc-yang:counter64""", 'defined-type': "oc-yang:counter64", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="in-pkts", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='oc-yang:counter64', is_config=False)""", }) self.__in_pkts = t if hasattr(self, '_set'): self._set() def _unset_in_pkts(self): self.__in_pkts = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="in-pkts", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='oc-yang:counter64', is_config=False) def _get_in_octets(self): """ Getter method for in_octets, mapped from YANG variable /interfaces/interface/routed_vlan/ipv4/state/counters/in_octets (oc-yang:counter64) YANG Description: The total number of octets received in input IP packets for the specified address family, including those received in error. """ return self.__in_octets def _set_in_octets(self, v, load=False): """ Setter method for in_octets, mapped from YANG variable /interfaces/interface/routed_vlan/ipv4/state/counters/in_octets (oc-yang:counter64) If this variable is read-only (config: false) in the source YANG file, then _set_in_octets is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_in_octets() directly. YANG Description: The total number of octets received in input IP packets for the specified address family, including those received in error. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="in-octets", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='oc-yang:counter64', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """in_octets must be of a type compatible with oc-yang:counter64""", 'defined-type': "oc-yang:counter64", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="in-octets", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='oc-yang:counter64', is_config=False)""", }) self.__in_octets = t if hasattr(self, '_set'): self._set() def _unset_in_octets(self): self.__in_octets = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="in-octets", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='oc-yang:counter64', is_config=False) def _get_in_error_pkts(self): """ Getter method for in_error_pkts, mapped from YANG variable /interfaces/interface/routed_vlan/ipv4/state/counters/in_error_pkts (oc-yang:counter64) YANG Description: Number of IP packets discarded due to errors for the specified address family, including errors in the IP header, no route found to the IP destination, invalid address, unknown protocol, etc. """ return self.__in_error_pkts def _set_in_error_pkts(self, v, load=False): """ Setter method for in_error_pkts, mapped from YANG variable /interfaces/interface/routed_vlan/ipv4/state/counters/in_error_pkts (oc-yang:counter64) If this variable is read-only (config: false) in the source YANG file, then _set_in_error_pkts is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_in_error_pkts() directly. YANG Description: Number of IP packets discarded due to errors for the specified address family, including errors in the IP header, no route found to the IP destination, invalid address, unknown protocol, etc. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="in-error-pkts", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='oc-yang:counter64', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """in_error_pkts must be of a type compatible with oc-yang:counter64""", 'defined-type': "oc-yang:counter64", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="in-error-pkts", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='oc-yang:counter64', is_config=False)""", }) self.__in_error_pkts = t if hasattr(self, '_set'): self._set() def _unset_in_error_pkts(self): self.__in_error_pkts = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="in-error-pkts", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='oc-yang:counter64', is_config=False) def _get_in_forwarded_pkts(self): """ Getter method for in_forwarded_pkts, mapped from YANG variable /interfaces/interface/routed_vlan/ipv4/state/counters/in_forwarded_pkts (oc-yang:counter64) YANG Description: The number of input packets for which the device was not their final IP destination and for which the device attempted to find a route to forward them to that final destination. """ return self.__in_forwarded_pkts def _set_in_forwarded_pkts(self, v, load=False): """ Setter method for in_forwarded_pkts, mapped from YANG variable /interfaces/interface/routed_vlan/ipv4/state/counters/in_forwarded_pkts (oc-yang:counter64) If this variable is read-only (config: false) in the source YANG file, then _set_in_forwarded_pkts is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_in_forwarded_pkts() directly. YANG Description: The number of input packets for which the device was not their final IP destination and for which the device attempted to find a route to forward them to that final destination. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="in-forwarded-pkts", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='oc-yang:counter64', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """in_forwarded_pkts must be of a type compatible with oc-yang:counter64""", 'defined-type': "oc-yang:counter64", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="in-forwarded-pkts", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='oc-yang:counter64', is_config=False)""", }) self.__in_forwarded_pkts = t if hasattr(self, '_set'): self._set() def _unset_in_forwarded_pkts(self): self.__in_forwarded_pkts = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="in-forwarded-pkts", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='oc-yang:counter64', is_config=False) def _get_in_forwarded_octets(self): """ Getter method for in_forwarded_octets, mapped from YANG variable /interfaces/interface/routed_vlan/ipv4/state/counters/in_forwarded_octets (oc-yang:counter64) YANG Description: The number of octets received in input IP packets for the specified address family for which the device was not their final IP destination and for which the device attempted to find a route to forward them to that final destination. """ return self.__in_forwarded_octets def _set_in_forwarded_octets(self, v, load=False): """ Setter method for in_forwarded_octets, mapped from YANG variable /interfaces/interface/routed_vlan/ipv4/state/counters/in_forwarded_octets (oc-yang:counter64) If this variable is read-only (config: false) in the source YANG file, then _set_in_forwarded_octets is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_in_forwarded_octets() directly. YANG Description: The number of octets received in input IP packets for the specified address family for which the device was not their final IP destination and for which the device attempted to find a route to forward them to that final destination. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="in-forwarded-octets", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='oc-yang:counter64', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """in_forwarded_octets must be of a type compatible with oc-yang:counter64""", 'defined-type': "oc-yang:counter64", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="in-forwarded-octets", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='oc-yang:counter64', is_config=False)""", }) self.__in_forwarded_octets = t if hasattr(self, '_set'): self._set() def _unset_in_forwarded_octets(self): self.__in_forwarded_octets = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="in-forwarded-octets", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='oc-yang:counter64', is_config=False) def _get_in_discarded_pkts(self): """ Getter method for in_discarded_pkts, mapped from YANG variable /interfaces/interface/routed_vlan/ipv4/state/counters/in_discarded_pkts (oc-yang:counter64) YANG Description: The number of input IP packets for the specified address family, for which no problems were encountered to prevent their continued processing, but were discarded (e.g., for lack of buffer space). """ return self.__in_discarded_pkts def _set_in_discarded_pkts(self, v, load=False): """ Setter method for in_discarded_pkts, mapped from YANG variable /interfaces/interface/routed_vlan/ipv4/state/counters/in_discarded_pkts (oc-yang:counter64) If this variable is read-only (config: false) in the source YANG file, then _set_in_discarded_pkts is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_in_discarded_pkts() directly. YANG Description: The number of input IP packets for the specified address family, for which no problems were encountered to prevent their continued processing, but were discarded (e.g., for lack of buffer space). """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="in-discarded-pkts", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='oc-yang:counter64', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """in_discarded_pkts must be of a type compatible with oc-yang:counter64""", 'defined-type': "oc-yang:counter64", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="in-discarded-pkts", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='oc-yang:counter64', is_config=False)""", }) self.__in_discarded_pkts = t if hasattr(self, '_set'): self._set() def _unset_in_discarded_pkts(self): self.__in_discarded_pkts = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="in-discarded-pkts", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='oc-yang:counter64', is_config=False) def _get_out_pkts(self): """ Getter method for out_pkts, mapped from YANG variable /interfaces/interface/routed_vlan/ipv4/state/counters/out_pkts (oc-yang:counter64) YANG Description: The total number of IP packets for the specified address family that the device supplied to the lower layers for transmission. This includes packets generated locally and those forwarded by the device. """ return self.__out_pkts def _set_out_pkts(self, v, load=False): """ Setter method for out_pkts, mapped from YANG variable /interfaces/interface/routed_vlan/ipv4/state/counters/out_pkts (oc-yang:counter64) If this variable is read-only (config: false) in the source YANG file, then _set_out_pkts is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_out_pkts() directly. YANG Description: The total number of IP packets for the specified address family that the device supplied to the lower layers for transmission. This includes packets generated locally and those forwarded by the device. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="out-pkts", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='oc-yang:counter64', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """out_pkts must be of a type compatible with oc-yang:counter64""", 'defined-type': "oc-yang:counter64", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="out-pkts", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='oc-yang:counter64', is_config=False)""", }) self.__out_pkts = t if hasattr(self, '_set'): self._set() def _unset_out_pkts(self): self.__out_pkts = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="out-pkts", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='oc-yang:counter64', is_config=False) def _get_out_octets(self): """ Getter method for out_octets, mapped from YANG variable /interfaces/interface/routed_vlan/ipv4/state/counters/out_octets (oc-yang:counter64) YANG Description: The total number of octets in IP packets for the specified address family that the device supplied to the lower layers for transmission. This includes packets generated locally and those forwarded by the device. """ return self.__out_octets def _set_out_octets(self, v, load=False): """ Setter method for out_octets, mapped from YANG variable /interfaces/interface/routed_vlan/ipv4/state/counters/out_octets (oc-yang:counter64) If this variable is read-only (config: false) in the source YANG file, then _set_out_octets is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_out_octets() directly. YANG Description: The total number of octets in IP packets for the specified address family that the device supplied to the lower layers for transmission. This includes packets generated locally and those forwarded by the device. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="out-octets", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='oc-yang:counter64', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """out_octets must be of a type compatible with oc-yang:counter64""", 'defined-type': "oc-yang:counter64", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="out-octets", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='oc-yang:counter64', is_config=False)""", }) self.__out_octets = t if hasattr(self, '_set'): self._set() def _unset_out_octets(self): self.__out_octets = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="out-octets", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='oc-yang:counter64', is_config=False) def _get_out_error_pkts(self): """ Getter method for out_error_pkts, mapped from YANG variable /interfaces/interface/routed_vlan/ipv4/state/counters/out_error_pkts (oc-yang:counter64) YANG Description: Number of IP packets for the specified address family locally generated and discarded due to errors, including no route found to the IP destination. """ return self.__out_error_pkts def _set_out_error_pkts(self, v, load=False): """ Setter method for out_error_pkts, mapped from YANG variable /interfaces/interface/routed_vlan/ipv4/state/counters/out_error_pkts (oc-yang:counter64) If this variable is read-only (config: false) in the source YANG file, then _set_out_error_pkts is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_out_error_pkts() directly. YANG Description: Number of IP packets for the specified address family locally generated and discarded due to errors, including no route found to the IP destination. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="out-error-pkts", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='oc-yang:counter64', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """out_error_pkts must be of a type compatible with oc-yang:counter64""", 'defined-type': "oc-yang:counter64", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="out-error-pkts", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='oc-yang:counter64', is_config=False)""", }) self.__out_error_pkts = t if hasattr(self, '_set'): self._set() def _unset_out_error_pkts(self): self.__out_error_pkts = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="out-error-pkts", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='oc-yang:counter64', is_config=False) def _get_out_forwarded_pkts(self): """ Getter method for out_forwarded_pkts, mapped from YANG variable /interfaces/interface/routed_vlan/ipv4/state/counters/out_forwarded_pkts (oc-yang:counter64) YANG Description: The number of packets for which this entity was not their final IP destination and for which it was successful in finding a path to their final destination. """ return self.__out_forwarded_pkts def _set_out_forwarded_pkts(self, v, load=False): """ Setter method for out_forwarded_pkts, mapped from YANG variable /interfaces/interface/routed_vlan/ipv4/state/counters/out_forwarded_pkts (oc-yang:counter64) If this variable is read-only (config: false) in the source YANG file, then _set_out_forwarded_pkts is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_out_forwarded_pkts() directly. YANG Description: The number of packets for which this entity was not their final IP destination and for which it was successful in finding a path to their final destination. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="out-forwarded-pkts", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='oc-yang:counter64', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """out_forwarded_pkts must be of a type compatible with oc-yang:counter64""", 'defined-type': "oc-yang:counter64", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="out-forwarded-pkts", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='oc-yang:counter64', is_config=False)""", }) self.__out_forwarded_pkts = t if hasattr(self, '_set'): self._set() def _unset_out_forwarded_pkts(self): self.__out_forwarded_pkts = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="out-forwarded-pkts", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='oc-yang:counter64', is_config=False) def _get_out_forwarded_octets(self): """ Getter method for out_forwarded_octets, mapped from YANG variable /interfaces/interface/routed_vlan/ipv4/state/counters/out_forwarded_octets (oc-yang:counter64) YANG Description: The number of octets in packets for which this entity was not their final IP destination and for which it was successful in finding a path to their final destination. """ return self.__out_forwarded_octets def _set_out_forwarded_octets(self, v, load=False): """ Setter method for out_forwarded_octets, mapped from YANG variable /interfaces/interface/routed_vlan/ipv4/state/counters/out_forwarded_octets (oc-yang:counter64) If this variable is read-only (config: false) in the source YANG file, then _set_out_forwarded_octets is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_out_forwarded_octets() directly. YANG Description: The number of octets in packets for which this entity was not their final IP destination and for which it was successful in finding a path to their final destination. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="out-forwarded-octets", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='oc-yang:counter64', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """out_forwarded_octets must be of a type compatible with oc-yang:counter64""", 'defined-type': "oc-yang:counter64", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="out-forwarded-octets", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='oc-yang:counter64', is_config=False)""", }) self.__out_forwarded_octets = t if hasattr(self, '_set'): self._set() def _unset_out_forwarded_octets(self): self.__out_forwarded_octets = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="out-forwarded-octets", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='oc-yang:counter64', is_config=False) def _get_out_discarded_pkts(self): """ Getter method for out_discarded_pkts, mapped from YANG variable /interfaces/interface/routed_vlan/ipv4/state/counters/out_discarded_pkts (oc-yang:counter64) YANG Description: The number of output IP packets for the specified address family for which no problem was encountered to prevent their transmission to their destination, but were discarded (e.g., for lack of buffer space). """ return self.__out_discarded_pkts def _set_out_discarded_pkts(self, v, load=False): """ Setter method for out_discarded_pkts, mapped from YANG variable /interfaces/interface/routed_vlan/ipv4/state/counters/out_discarded_pkts (oc-yang:counter64) If this variable is read-only (config: false) in the source YANG file, then _set_out_discarded_pkts is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_out_discarded_pkts() directly. YANG Description: The number of output IP packets for the specified address family for which no problem was encountered to prevent their transmission to their destination, but were discarded (e.g., for lack of buffer space). """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="out-discarded-pkts", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='oc-yang:counter64', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """out_discarded_pkts must be of a type compatible with oc-yang:counter64""", 'defined-type': "oc-yang:counter64", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="out-discarded-pkts", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='oc-yang:counter64', is_config=False)""", }) self.__out_discarded_pkts = t if hasattr(self, '_set'): self._set() def _unset_out_discarded_pkts(self): self.__out_discarded_pkts = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="out-discarded-pkts", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='oc-yang:counter64', is_config=False) in_pkts = __builtin__.property(_get_in_pkts) in_octets = __builtin__.property(_get_in_octets) in_error_pkts = __builtin__.property(_get_in_error_pkts) in_forwarded_pkts = __builtin__.property(_get_in_forwarded_pkts) in_forwarded_octets = __builtin__.property(_get_in_forwarded_octets) in_discarded_pkts = __builtin__.property(_get_in_discarded_pkts) out_pkts = __builtin__.property(_get_out_pkts) out_octets = __builtin__.property(_get_out_octets) out_error_pkts = __builtin__.property(_get_out_error_pkts) out_forwarded_pkts = __builtin__.property(_get_out_forwarded_pkts) out_forwarded_octets = __builtin__.property(_get_out_forwarded_octets) out_discarded_pkts = __builtin__.property(_get_out_discarded_pkts) _pyangbind_elements = OrderedDict([('in_pkts', in_pkts), ('in_octets', in_octets), ('in_error_pkts', in_error_pkts), ('in_forwarded_pkts', in_forwarded_pkts), ('in_forwarded_octets', in_forwarded_octets), ('in_discarded_pkts', in_discarded_pkts), ('out_pkts', out_pkts), ('out_octets', out_octets), ('out_error_pkts', out_error_pkts), ('out_forwarded_pkts', out_forwarded_pkts), ('out_forwarded_octets', out_forwarded_octets), ('out_discarded_pkts', out_discarded_pkts), ])
[ "zblevins@netflix.com" ]
zblevins@netflix.com
1bc2bad1c8d403cdc99de557444a6e0a0f503eb2
fe3759747f709a41e5ff3acf78872dd6b74f772a
/samples/openapi3/client/petstore/python-experimental/petstore_api/model/animal.py
81432c292c6459b54e18f5be8a654084c4f140d5
[ "Apache-2.0" ]
permissive
Januson/openapi-generator
c50e3b52765e41adba9712d745918cea39dfa490
5b6b4c9d4829b57716741dc35b3f1033e5483784
refs/heads/master
2022-10-19T04:16:38.042495
2022-04-23T08:42:21
2022-04-23T08:42:21
238,659,737
0
0
Apache-2.0
2023-09-05T01:01:23
2020-02-06T10:12:38
Java
UTF-8
Python
false
false
2,359
py
# coding: utf-8 """ OpenAPI Petstore This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501 The version of the OpenAPI document: 1.0.0 Generated by: https://openapi-generator.tech """ import re # noqa: F401 import sys # noqa: F401 import typing # noqa: F401 from frozendict import frozendict # noqa: F401 import decimal # noqa: F401 from datetime import date, datetime # noqa: F401 from frozendict import frozendict # noqa: F401 from petstore_api.schemas import ( # noqa: F401 AnyTypeSchema, ComposedSchema, DictSchema, ListSchema, StrSchema, IntSchema, Int32Schema, Int64Schema, Float32Schema, Float64Schema, NumberSchema, UUIDSchema, DateSchema, DateTimeSchema, DecimalSchema, BoolSchema, BinarySchema, NoneSchema, none_type, Configuration, Unset, unset, ComposedBase, ListBase, DictBase, NoneBase, StrBase, IntBase, Int32Base, Int64Base, Float32Base, Float64Base, NumberBase, UUIDBase, DateBase, DateTimeBase, BoolBase, BinaryBase, Schema, _SchemaValidator, _SchemaTypeChecker, _SchemaEnumMaker ) class Animal( DictSchema ): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ _required_property_names = set(( 'className', )) className = StrSchema color = StrSchema @classmethod @property def _discriminator(cls): return { 'className': { 'Cat': Cat, 'Dog': Dog, } } def __new__( cls, *args: typing.Union[dict, frozendict, ], className: className, color: typing.Union[color, Unset] = unset, _configuration: typing.Optional[Configuration] = None, **kwargs: typing.Type[Schema], ) -> 'Animal': return super().__new__( cls, *args, className=className, color=color, _configuration=_configuration, **kwargs, ) from petstore_api.model.cat import Cat from petstore_api.model.dog import Dog
[ "noreply@github.com" ]
noreply@github.com
8ccc55335ccb38651211ca0b39eaa69cc7912ec9
d2f94613b342c143554e0d1649244f71f634cfe2
/TOPSIDE/MATE_2019/auto-ROV.py
2bed4354a8a84a479f8e9a820a2de5f20a9c80aa
[]
no_license
jacksonsugar/ROV_2019
f79f8757289d35c94b71466922505889a68ed851
186e70864da354195ebc4aeb2d9336c1072b8a74
refs/heads/master
2022-03-11T10:48:02.454006
2019-10-24T15:59:13
2019-10-24T15:59:13
216,714,409
1
0
null
null
null
null
UTF-8
Python
false
false
5,070
py
#!/usr/bin/env python import numpy as np import cv2 import math import gi import os import rospy from std_msgs.msg import String import imutils from skimage import exposure from geometry_msgs.msg import Twist import math font = cv2.FONT_HERSHEY_COMPLEX i = 0 depth = 0 boundaries = [([17, 15, 100], [50, 56, 200])] #lower = [160, 100, 40] #upper = [179, 255, 255] lower = [1, 100, 40] upper = [30, 255, 255] # Capture Video and set resolution pipeline_string = "udpsrc port=5000 ! application/x-rtp,encoding-name=JPEG,payload=26 ! rtpjpegdepay ! jpegdec ! videoconvert ! appsink " video_capture = cv2.VideoCapture(pipeline_string, cv2.CAP_GSTREAMER) bigline = 0 def callback(data): rospy.loginfo('%s', data.data) global depth #global i depth = data.data #depth = float(depth) def listener(): # In ROS, nodes are uniquely named. If two nodes with the same # name are launched, the previous one is kicked off. The # anonymous=True flag means that rospy will choose a unique # name for our 'listener' node so that multiple listeners can # run simultaneously. rospy.init_node('listener', anonymous=True) rospy.Subscriber('pressure', String, callback) # spin() simply keeps python from exiting until this node is stopped # in this case we will avoid this at all costs #rospy.spin() if __name__ == '__main__': while video_capture.isOpened(): # Where the feature detection goes ret, img = video_capture.read() imCopy = img.copy() img = cv2.GaussianBlur(img, (5, 5), 0) roi = img[100:380, 200:600] if not ret: print('empty frame') hsv = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV) lower_red = np.array(lower) upper_red = np.array(upper) mask = cv2.inRange(hsv, lower_red, upper_red) output = cv2.bitwise_and(roi, roi, mask = mask) cv2.line(img,(400,600),(400,0),(0,255,0),2) cv2.line(img,(380,300),(420,300),(0,255,0),2) forward = 0 turn = 0 updown = 0 edges = cv2.Canny(mask, 75, 150) lines = cv2.HoughLinesP(edges, 1, np.pi/180, 50, maxLineGap=50) if lines is not None: bigline = 0 for line in lines: x1, y1, x2, y2 = line[0] x_diff = x2 - x1 y_diff = y2 - y1 length = abs(math.tan(x_diff/y_diff)) if length >= bigline: bigline = length x1big, y1big, x2big, y2big = line[0] angle = math.atan2(y2big - y1big, x2big - x1big) * 180 print angle if angle < 245 and angle > 0: print "LEFT" turn = .2 elif angle > -245 and angle < 0: print "RIGHT" turn = -.2 else: print "STRAIGHT" forward = .2 print "X1big = %s" % x1big cv2.line(roi, (x1big, y1big), (x2big, y2big), (255, 0, 0), 5) listener() if depth != 0 and i == 0: depth1 = float(depth) print depth1 i = i + 1 else: pass ## This is where we show everyone the magic depth_gui = "Depth: %s" % depth cv2.putText(roi, depth_gui, (0, 10), font, .5, (0,0,255)) #Rcolor_filtered = np.hstack([img, output]) cv2.imshow("Crop", roi) cv2.imshow("Normal View", img) #cv2.imshow("AUTOROV", Rcolor_filtered) if cv2.waitKey(1) & 0xFF == ord('q'): break # When everything done, release the capture pub = rospy.Publisher("/cmd_vel", Twist, queue_size=1) # Create Twist message & add linear x and angular z from left joystick twist = Twist() twist.linear.x = forward # Move forward backward twist.linear.z = updown twist.angular.z = turn # Turn left right # record values to log file and screen rospy.loginfo("Linear.x: %f :: Linear.z: %f :: Angular.z: %f ", twist.linear.x, twist.linear.z, twist.angular.z) # publish cmd_vel move command to ROV pub.publish(twist) # When everything done, release the capture pub = rospy.Publisher("/cmd_vel", Twist, queue_size=1) # Create Twist message & add linear x and angular z from left joystick twist = Twist() twist.linear.x = 0 # Move forward backward twist.linear.z = 0 twist.angular.z = 0 # Turn left right # record values to log file and screen rospy.loginfo("Linear.x: %f :: Linear.z: %f :: Angular.z: %f ", twist.linear.x, twist.linear.z, twist.angular.z) # publish cmd_vel move command to ROV pub.publish(twist) video_capture.release() cv2.destroyAllWindows() ''' lower_red = np.array([110,50,50]) upper_red = np.array([130,255,255]) '''
[ "noreply@github.com" ]
noreply@github.com
7c90c47186ac7a9a198dd4e1dcb44cefa65e5230
d00cebec4173f9e57b3f23342fe3260ecbc90fc9
/code wars/challenges.py
3948e4e455925bf8627d23d275d1b508e75bf627
[]
no_license
crispad/algorthims_pratice
22cb45bdf149f100ad69f6fbafe3f694b7ccf95a
f968617e53700e1965f5b0f8ae99b8481a4b410c
refs/heads/master
2022-11-13T14:23:51.303678
2020-06-16T21:29:51
2020-06-16T21:29:51
255,792,067
0
0
null
null
null
null
UTF-8
Python
false
false
1,087
py
# Create fibonacci sequence with n elements and output it in a list. If n == 0 return empty list. # 0 1 1 2 3 5 8 11 19 ...etc # [0, 1, 1, 2, 3, 5, 8, 11, 19] def fib_sequence(n): base = [0, 1] if n == 0: return None if n == 1: return base[0] if n == 2: return base if n > 2: counter = 2 while counter < n: base.append(base[counter - 1] + base[counter - 2]) counter += 1 return base #print(fib_sequence(7)) #2 # Create a tribonacci sequence with n elements and output it in a list. If n == 0 return empty list. # 0 1 1 2 4 7 13 24 ... etc # [0, 1, 1, 2, 4, 7, 13, 24] def trib_sequence(n): base = [0,1,1] if n == 0: return [] if n == 1: return [base[0]] if n == 2: return [base[0] + base[1]] if n == 3: return base if n > 3: counter = 3 while counter < n: base.append(base[counter - 3] + base[counter - 2] + base[counter - 1]) counter += 1 return base print(trib_sequence(4))
[ "crispad06@gmail.com" ]
crispad06@gmail.com
833980c8158fa0f25d3ae7485542f3655bc24ef9
18e48f22f88fe80ce54d12fdbf9d05a7ca5bd65a
/0x04-python-more_data_structures/3-common_elements.py
bdcdc3ae779109657e7ebde98aeb7e93558c88ae
[]
no_license
SantiagoHerreG/holbertonschool-higher_level_programming
426c4bc9bc080a81b72d2f740c8ed2eb365023eb
ca2612ef3be92a60764d584cf39de3a2ba310f84
refs/heads/master
2020-07-22T19:33:48.507287
2020-02-14T04:34:00
2020-02-14T04:34:00
207,305,022
0
0
null
null
null
null
UTF-8
Python
false
false
105
py
#!/usr/bin/python3 def common_elements(set_1, set_2): new_set = set_1 & set_2 return (new_set)
[ "888@holbertonschool.com" ]
888@holbertonschool.com
ddbc0c95647448fd2b5ee0f7983a9b7eda1fc03c
7b054cd92eece331d2494b1ecbd6ebae76deed55
/ecommerce/urls.py
748d4606d02d80bf738204eb134afca866a6623b
[]
no_license
Tanmoy-Sarkar/Django-Ecommerce-Website
f23ec5a60b64f6b3f614bb9dd7aced2c694d1d75
24810153dcb51ae1d57c1b182b59cb40fbb8a3d2
refs/heads/master
2023-07-03T17:23:58.746887
2021-07-31T08:21:35
2021-07-31T08:21:35
389,021,036
0
0
null
null
null
null
UTF-8
Python
false
false
948
py
"""ecommerce URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/3.2/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: path('', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.urls import include, path 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) """ from django.contrib import admin from django.urls import path,include from django.conf.urls.static import static from django.conf import settings urlpatterns = [ path('admin/', admin.site.urls), path('',include('store.urls')), ] urlpatterns += static(settings.MEDIA_URL,document_root=settings.MEDIA_ROOT)
[ "tanmoy9920@gmail.com" ]
tanmoy9920@gmail.com
ab36bce5acedddf55bb70f404d5acbce4d262f7c
ad5e88b23c8599400184a364c4c7efedadd5b7dc
/manage.py
8cf1c1a4208251828bb326d711c94deb5be8d6d3
[]
no_license
jy-yuan/edu-satellite-platform
3cb8f77df79ef79bc88e7b42aea4ec969c113d33
89e7d19cd5814602a4faa2423a2df91ce5511e2d
refs/heads/master
2021-07-03T10:33:58.692047
2021-05-10T08:04:43
2021-05-10T08:04:43
229,178,255
2
1
null
2021-05-10T07:59:42
2019-12-20T02:52:35
JavaScript
UTF-8
Python
false
false
617
py
#!/usr/bin/env python """Django's command-line utility for administrative tasks.""" import os import sys def main(): os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'dbtest.settings') try: from django.core.management import execute_from_command_line except ImportError as exc: raise ImportError( "Couldn't import Django. Are you sure it's installed and " "available on your PYTHONPATH environment variable? Did you " "forget to activate a virtual environment?") from exc execute_from_command_line(sys.argv) if __name__ == '__main__': main()
[ "1462936118@qq.com" ]
1462936118@qq.com
034ed03c88bab1e13251ff4cd083aed6ff1c7b3a
1eb04e960be4b5b2c3b752596311d8652e217dba
/python/datatype-set,boolean.py
e67652d55a7860785104808e702c899f0b0142ec
[]
no_license
vyshakhRemesh/rev
69de99899a1757a458f4ff8ee2f91052ed83fcf7
f12aff5b20555a7c47430f0f500a65b3012b6c97
refs/heads/main
2023-07-09T10:24:43.052720
2021-08-08T17:25:41
2021-08-08T17:25:41
388,158,046
0
0
null
null
null
null
UTF-8
Python
false
false
370
py
#set thisset = {"apple", "banana", "cherry"} print(thisset) #access thisset = {"apple", "banana", "cherry"} for x in thisset: print(x) #remove thisset = {"apple", "banana", "cherry"} thisset.discard("banana") print(thisset) #boolean a = 200 b = 33 if b > a: print("b is greater than a") else: print("b is not greater than a")
[ "noreply@github.com" ]
noreply@github.com
9115d8c3b4724daa74540af198ec9ad1e57212b8
c1ae4278dfee5f2a2a9d899d308488095c70df1d
/projects/ex46-AProjectSkeleton/tests/nose_tests.py
a00e2661244e4a2be3e1751d42a64cd84b74e23c
[]
no_license
BeatsBassBoom/LP3THW
be2b0a48a21b5ee0476b59e8f2b55401888e4a7d
37b170817d874de37b22f59c33ddadfd780377cd
refs/heads/master
2020-04-02T08:19:40.383456
2019-11-11T01:51:28
2019-11-11T01:51:28
145,632,501
0
0
null
null
null
null
UTF-8
Python
false
false
307
py
# First test skeleton from nose.tools import * import nose_runner def setup(): print("SETUP!") def teardown(): print("TEAR DOWN!") def test_basic(): print("I RAN!", end='') def test_nose_runner_first_try(): name = nose_runner.nose_runner.first_try() assert name.first_try() == 1
[ "28370491+BeatsBassBoom@users.noreply.github.com" ]
28370491+BeatsBassBoom@users.noreply.github.com
0ee61681b51f1c92da7226b74c1bb7a15fcd4fdb
8cec98d8ac9a55bc4176cd83286ef95c149fa98f
/Cases to Events [Python]/comprehend.py
0f2b2deb91ae81f4c194252abab19d5f4b938083
[]
no_license
MinMat/Hudoc-to-Eventlogs
0c917a7c4cc9919d3f3fb1b45d29aaf8d36a7f47
1f9af65a6cbe51763207fcf872276b43acab43b8
refs/heads/master
2021-05-05T12:21:31.740399
2018-01-09T21:07:54
2018-01-09T21:07:54
null
0
0
null
null
null
null
UTF-8
Python
false
false
5,636
py
import json import sys import os import boto3 # reads in a case file, breaks it up into pieces of under 5000 bytes # runs it through aws comprehend # produces a json including the casetext and all detected entities class ComprehendCaseEntities(): def __init__(self, fileName, caseName, awsregion, langCode): with open(fileName) as f: caseText = f.read() self._caseName = caseName self._awsregion = awsregion self._language = langCode # skip everything before and including procedure and skip everything after "II. " # => only keep the procedure and the first part of the facts section which is "I. THE CIRCUMSTANCES OF THE CASE" procedureIndex = caseText.find('\nPROCEDURE\n') sectionTwoIndex = caseText.find('\nII. ', procedureIndex) if sectionTwoIndex == -1: sectionTwoIndex = caseText.find('\nTHE LAW\n', procedureIndex) if sectionTwoIndex == -1: sectionTwoIndex = caseText.find('\nRELEVANT DOMESTIC LAW\n', procedureIndex) if procedureIndex == -1 or sectionTwoIndex == -1: raise Exception('No PROCEDURE or section "II. " in this file') caseText = caseText[procedureIndex+9:sectionTwoIndex] self._caseText = caseText self._stringArray = self._4800bytesStringArray(caseText) # the case text broken up into strings of 5000 bytes def stringArray(self): return self._stringArray def caseText(self): return self._caseText def caseName(self): return self._caseName def caseJson(self): if hasattr(self, "_caseJson"): return self._caseJson else: raise Exception('No caseJson found. Run comprehend first!') # sends all elements of stringArray to aws comprehend # returns a json representing the whole case text def comprehend(self): if len(self._stringArray) == 0: raise Exception('Empty stringArray!') comprehend = boto3.client(service_name='comprehend', region_name=self._awsregion) # start a Json including the case name, empty case text and empty list of entities caseJson = self._startCaseJson() segmentCounter = 1 totSegments = len(self._stringArray) for segment in self._stringArray: if len(segment) == 0: continue print('Sending segment (' + str(segmentCounter) + '/' + str(totSegments) + ') to aws comprehend.') segmentCounter += 1 compResult = comprehend.detect_entities(Text=segment, LanguageCode=self._language) self._appendSegment(caseJson, segment, compResult['Entities']) # keep the json self._caseJson = caseJson return caseJson # break up a text into a string array each containing at maximum 5000 bytes # the last char of each element must be a "." or the last char of the text # (to not break in the middle of a sentence) def _4800bytesStringArray(self, text): currentSegment = '' stringArray = [] while len(text) != 0: # add the next sentence nextSentence = self._getNextSentence(text) currentSegment += nextSentence # if there is still room for more if sys.getsizeof(currentSegment) < 4800: # remove the sentence from the remaining text text = text[len(nextSentence):] # if the element is full else: # remove the last sentence currentSegment = currentSegment[:-len(nextSentence)] print("Current segment size:" + str(sys.getsizeof(currentSegment))) stringArray.append(currentSegment) currentSegment = '' # fill the last element with the remaining text if len(currentSegment) > 0: stringArray.append(currentSegment) return stringArray # returns the substring from the start to the next "." in the text # (including the colon) # if there is no "." the whole text is returned def _getNextSentence(self, text): colonIndex = text.find('.') if colonIndex == -1: return text return text[:colonIndex+1] # intitializes the output json with case name, empty case text and empty list of entities def _startCaseJson(self): caseJson = dict() caseJson['CaseName'] = self._caseName caseJson['CaseText'] = '' caseJson['Entities'] = [] return caseJson # appends the segment and entities list to the caseJson def _appendSegment(self, caseJson, segment, entities): for entity in entities: # adjust the character offets caseTextlen = len(caseJson['CaseText']) entity['BeginOffset'] += caseTextlen entity['EndOffset'] += caseTextlen caseJson['Entities'].append(entity) caseJson['CaseText'] += segment caseName = sys.argv[1] fileName = "Cases/" + caseName + ".txt" if not os.path.isdir("Cases"): os.makedirs("Cases") if not os.path.isfile(fileName): raise Exception(fileName + " does not exist") if not os.path.isdir("Comprehend Json"): os.makedirs("Comprehend Json") c = ComprehendCaseEntities(fileName, caseName, 'eu-west-1', 'en') with open("Comprehend Json/comprehend-" + caseName + ".json", 'w+') as f: f.seek(0) f.write(json.dumps(c.comprehend(), indent=4))
[ "georg.prohaska@gmail.com" ]
georg.prohaska@gmail.com
f7b8dceb9b4cf4acba51c4da0d7fe1a97fb0e10c
089cdec42966ca40ba3cbf4a5ffe166c80f19389
/module2.py
619ef8431015b77f50857ae5f8c4cf1b60f093b5
[]
no_license
feelosophy13/bioinformatics_algorithm_1
0d01035f7e590bfe982edef9ce0424a4f5fde5c1
6514f9035d35f14e3d85d50c9c63e38e57b45238
refs/heads/master
2020-06-03T22:44:48.995592
2015-05-14T23:58:44
2015-05-14T23:58:44
35,568,928
0
0
null
null
null
null
UTF-8
Python
false
false
5,706
py
from module1 import * import itertools ## this function outputs a list of skew, where each skew is the difference between the number of guanines and that of cytosines (#G - #C) at (i+1)-th index in the DNA string def create_GC_skews_list(DNA_str): skew = 0 result = [0] for nucleotide in DNA_str: if nucleotide == 'G': skew += 1 elif nucleotide == 'C': skew -= 1 result.append(skew) return result ## this function outputs the indices where minimum G-C skews occur in a DNA string def get_min_GC_skew_indices(DNA_str): DNA_str = DNA_str[:-1] GC_skews_list = create_GC_skews_list(DNA_str) min_skew_val = min(GC_skews_list) min_skew_indices = [] for i in range(0, len(GC_skews_list)): if min_skew_val == GC_skews_list[i]: min_skew_indices.append(i) return min_skew_indices ## this function calculates the number of mis-matches in two given DNA strings def calc_Hamming_dist(DNA_str1, DNA_str2): if len(DNA_str1) == len(DNA_str2): n_mismatches = 0 for i in range(0, len(DNA_str1)): if DNA_str1[i] != DNA_str2[i]: n_mismatches += 1 return n_mismatches return None ## this function returns start indices of a pattern if it approximately matches a k-mer in a DNA string def find_ptrn_start_indices_aprx_match(ptrn, DNA_str, d): indices = [] ptrn_len = len(ptrn) k_iter = len(DNA_str) - ptrn_len + 1 for i in range(0, k_iter): obs_kmer = DNA_str[i:i+ptrn_len] Hamming_distance = calc_Hamming_dist(obs_kmer, ptrn) if Hamming_distance <= d: indices.append(i) return indices ## this function counts the number of matches of a single (specific) k-mer that approximately matches the pattern in a DNA string def get_kmer_freq_cnt_aprx_match(DNA_str, ptrn, d): freq_cnt = 0 ptrn_len = len(ptrn) n_iter = len(DNA_str) - ptrn_len + 1 for i in range(0, n_iter): kmer = DNA_str[i:i+ptrn_len] if calc_Hamming_dist(kmer, ptrn) <= d: freq_cnt += 1 return freq_cnt ## this function generates approximate-match patterns whose Hamming distances to the original pattern are less than or equal to d; includes the observed pattern in the list; (THERE MUST EXIST A MORE EFFICIENT METHOD) def get_aprx_match_ptrns(ptrn, d): aprx_match_ptrns = [] ptrn_len = len(ptrn) perms = get_ordered_kmer_permutations(ptrn_len) for perm in perms: Hamming_distance = calc_Hamming_dist(perm, ptrn) if Hamming_distance <= d: aprx_match_ptrns.append(perm) return aprx_match_ptrns ## this function returns the most frequent k-mers (with mismatches and reverse complements) in a DNA string ## if greedy is set to True, then the function evaluates all possible k-mers ## if greedy is set to False, then the function evaluates only the k-mers observed in the DNA string ## if incl_rev_comp is set to True, then the function also evaluates the reverse complements of k-mers def get_most_freq_kmers_aprx_match(DNA_str, k, d, greedy=False, incl_rev_comp=False): freq_array = get_ordered_kmers_freq_array(DNA_str, k) aprx_match_freq_dict = {} ## for every possible k-mer for i in range(0, len(freq_array)): ## if not greedy, don't evaluate k-mer that hasn't been observed in the DNA string if not greedy: if freq_array[i] == 0: continue ## find k-mer and its approximate matches count kmer = conv_base10_num_to_DNA_str(i, k) aprx_match_ptrns = get_aprx_match_ptrns(kmer, d) # includes the observed k-mer pattern itself aprx_match_ptrns_base10 = [conv_DNA_str_to_base10_num(ptrn) for ptrn in aprx_match_ptrns] aprx_match_cnt = sum([freq_array[base10] for base10 in aprx_match_ptrns_base10]) kmer_list = [kmer] ## k-mer reverse complement and its approximate matches if incl_rev_comp: kmer_rev_comp = get_rev_complement(kmer) aprx_match_ptrns_rev_comp = [get_rev_complement(ptrn) for ptrn in aprx_match_ptrns] aprx_match_ptrns_base10_rev_comp = [conv_DNA_str_to_base10_num(ptrn) for ptrn in aprx_match_ptrns_rev_comp] aprx_match_cnt_rev_comp = sum([freq_array[base10] for base10 in aprx_match_ptrns_base10_rev_comp]) aprx_match_cnt += aprx_match_cnt_rev_comp kmer_list = [kmer, kmer_rev_comp] ## add k-mers to the match count dictionary if aprx_match_cnt in aprx_match_freq_dict.keys(): aprx_match_freq_dict[aprx_match_cnt] += kmer_list else: aprx_match_freq_dict[aprx_match_cnt] = kmer_list max_cnt = max(aprx_match_freq_dict.keys()) most_freq_kmers = aprx_match_freq_dict[max_cnt] return most_freq_kmers """ DNA = 'CATGGGCATCGGCCATACGCC' print create_GC_skews_list(DNA) DNA = 'TAAAGACTGCCGAGAGGCCAACACGAGTGCTAGAACGAGGGGCGTAAACGCGGGTCCGAT' print create_GC_skews_list(DNA) print get_min_GC_skew_indices(DNA) DNA1 = 'GGGCCGTTGGT' DNA2 = 'GGACCGTTGAC' print calc_Hamming_dist(DNA1, DNA2) ptrn = 'ATTCTGGA' DNA = 'CGCCCGAATCCAGAACGCATTCCCATATTTCGGGACCACTGGCCTCCACGGTACGGACGTCAATCAAAT' d = 3 print find_ptrn_start_indices_aprx_match(ptrn, DNA, d) DNA = 'TTTAGAGCCTTCAGAGG' ptrn = 'GAGG' d = 2 print get_kmer_freq_cnt_aprx_match(DNA, ptrn, d) DNA = 'ACGTTGCATGTCGCATGATGCATGAGAGCT' k = 4 d = 1 print get_most_freq_kmers_aprx_match(DNA, k, d) DNA = 'ACGTTGCATGTCGCATGATGCATGAGAGCT' k = 4 d = 1 print get_most_freq_kmers_aprx_match(DNA, k, d, greedy=False, incl_rev_comp=False) print get_most_freq_kmers_aprx_match(DNA, k, d, greedy=False, incl_rev_comp=True) """
[ "feelosophy13@gmail.com" ]
feelosophy13@gmail.com
52c042f3f625dd35b3d5f51b79ba0db084da328e
b556ce9f81c91f8e31d44712a37d460b321453a9
/src/truecaller_lookup.py
abe98a96c39acd8652b351c0c3234dd0bd9ce648
[ "MIT" ]
permissive
hvgh88/num-info
2f08adf6198124b39e5f646110f859b03edf9c46
db7cacf1e3271d8cccee710989d10fcc939703e8
refs/heads/main
2023-07-15T17:30:41.627121
2021-09-03T20:40:14
2021-09-03T20:40:14
null
0
0
null
null
null
null
UTF-8
Python
false
false
6,971
py
from selenium import webdriver from selenium.webdriver.common.keys import Keys from selenium.common.exceptions import NoSuchElementException from time import sleep from getpass import getpass from termcolor import colored import auxillary class TrueCaller: # initializes the instance attributes def __init__( self, iso3166_code, phone_no, browser, web_ui, microsoft_details ) -> None: self.iso3166_code = iso3166_code self.phone_no = phone_no self.url = ( "https://www.truecaller.com/search/" + iso3166_code + "/" + str(phone_no) ) self.browser = browser self.lookup_status = False self.web_ui = web_ui if self.web_ui: self.your_email_id = microsoft_details[0] self.your_password = microsoft_details[1] else: self.your_email_id = None self.your_password = None # runs the Truecaller lookup process def process(self): # checks if browser is firefox or chrome # headless argument is to prevent the browser from being displayed # log-level=3 argument is to prevent warning messages from being displayed on the console if self.browser == "firefox": options = webdriver.FirefoxOptions() options.set_headless() options.add_argument("log-level=3") driver = webdriver.Firefox(options=options) else: options = webdriver.ChromeOptions() options.add_argument("headless") options.add_argument("log-level=3") driver = webdriver.Chrome(options=options) # the URL is provided which the webdriver opens in the browser (in the background) driver.get(self.url) # NOTE: sleep functions are in order to ensure that the webpage gets fully loaded sleep(3) # NOTE: the elements in the webpage are either found with the help of their xpath microsoft_sign_in = driver.find_element_by_xpath('//*[@id="app"]/main/div/a[2]') microsoft_sign_in.click() if not self.web_ui: sleep(3) print() auxillary.line() print( "Personal details required for enabling the required services\nNOTE: Details aren't stored" ) auxillary.line() print() # signing in with microsoft account was the only option since google didn't allow signing-in due to security reasons print("Microsoft Details:") self.your_email_id = input("Email-ID : ") self.your_password = getpass(prompt="Password : ") sleep(3) your_email_id_input = driver.find_element_by_xpath('//*[@id="i0116"]') your_email_id_input.send_keys(self.your_email_id, Keys.RETURN) sleep(4) security = driver.find_elements_by_xpath( '//*[@id="idA_PWD_SwitchToCredPicker"]' ) if len(security) != 0: security[0].click() sleep(2) driver.find_element_by_xpath( '//*[@id="credentialList"]/div[3]/div/div/div[2]' ).click() sleep(2) your_password_input = driver.find_element_by_xpath('//*[@id="i0118"]') your_password_input.send_keys(self.your_password, Keys.RETURN) sleep(3) try: stay_signed_in_comfirmation = driver.find_element_by_xpath( '//*[@id="idBtn_Back"]' ) stay_signed_in_comfirmation.send_keys(Keys.RETURN) except NoSuchElementException: pass sleep(6) # condition to check if the results have loaded (since only 3 lookups a day per IP address can be performed using Phndir) # exiting if the first entity is not found, since that indicates search limit being exceeded try: self.name = driver.find_element_by_xpath( '//*[@id="app"]/main/div/div[1]/div[1]/header/div[2]/h1/span' ).text.title() except NoSuchElementException: print("Issue could be one among the following:") print("- Incorrect username\n- Incorrect password") print( "- Search limit exceeded. Try again in a day or use a different microsoft account." ) driver.quit() return -1 self.email_id = driver.find_element_by_xpath( '//*[@id="app"]/main/div/div[1]/div[2]/a[2]/div' ).text.lower() self.service_provider = driver.find_element_by_xpath( '//*[@id="app"]/main/div/div[1]/div[2]/a[1]/div/div[2]' ).text.title() self.local_time = driver.find_element_by_xpath( '//*[@id="app"]/main/div/div[1]/div[2]/a[3]/div/div[2]' ).text.title() self.location = driver.find_element_by_xpath( '//*[@id="app"]/main/div/div[1]/div[2]/a[3]/div/div[1]' ).text.title() # quits the browser driver.quit() return 0 # sets the lookup value to true, indicating that the lookup has taken place def set_lookup_status(self): self.lookup_status = True # returns if or not the lookup has taken place def get_lookup_status(self): return self.lookup_status # sets the results in order to be displayed in the web UI # creation of a dictionary for easier referencing def set_results(self): self.heading = "Truecaller Lookup" self.dictionary = ( { "Name": self.name, "Email address": self.email_id, "Service Provider": self.service_provider, "Local time": self.local_time, "Location": self.location, }, ) # returns the results in order to be displayed in the web UI # NOTE: the returned value is a tuple consisting of the heading for the lookup and a dictionary (for mapping) def get_results(self): return (self.heading, self.dictionary) # displays results in the CLI def display_results(self, colors): print() auxillary.line() print(colored("Truecaller Lookup Results:", colors[0])) auxillary.line() print( colored("Name : ", colors[1]), colored(self.name, colors[2]), ) print( colored("Email-ID : ", colors[1]), colored(self.email_id, colors[2]), ) print( colored("Service Provider : ", colors[1]), colored(self.service_provider, colors[2]), ) print( colored("Local Time : ", colors[1]), colored(self.local_time, colors[2]), ) print( colored("Location : ", colors[1]), colored(self.location, colors[2]), ) auxillary.line()
[ "manishgowd09@gmail.com" ]
manishgowd09@gmail.com
6aaf178780774e4f6235b5b93d0111edfc538729
e4807e683e5fcb913f86715f216159e1d9d35ece
/erc_plot_sqlite_Updated_OnlinemapSupport.py
ef974fe3db29d00ba66149995f3735759583a23a
[]
no_license
PeterTFS/ERC
cc2c128a9ce644c951de834428213a0557630a24
99b5dcb1f2254f9afe840544d2ae07c8fb9cecad
refs/heads/master
2023-03-29T06:28:32.914084
2021-03-29T20:55:01
2021-03-29T20:55:01
109,144,472
0
0
null
null
null
null
UTF-8
Python
false
false
12,159
py
#------------------------------------------------------------------------------- # Name: erc_plot_sqlite_Updated_OnlinemapSupport.py # Purpose: Retrive archived erc from database for climtology plotting # Author: pyang # Created: 21/01/2016 # Updated: 06/14/2016 for creating png as well as pdf files # Updated? 12/31/2016 for new year! # Updated: 05/11/2017 to support arcgis online map # Copyright: (c) pyang 2016 #------------------------------------------------------------------------------- ##def main(): ## pass ##if __name__ == '__main__': ## main() import os import csv import matplotlib import matplotlib.pyplot as plt import matplotlib.dates as mdates import matplotlib.cbook as cbook import pandas import datetime import numpy as np from sqlalchemy import create_engine, MetaData import pandas.io.sql as psql import sqlite3 from matplotlib.backends.backend_pdf import PdfPages #combine several pdfs file into one file! plt.style.use('ggplot') #A function for writing a dict to a file def saveDict(fn,dict_rap): f=open(fn, "wb") w = csv.writer(f) for key, val in dict_rap.items(): w.writerow([key, val]) f.close() FuelModel = {'CENTRLTX':'8G', 'NTXS' : '8G', 'TRANSPEC':'7G', 'SETX':'8G', 'ROLLPLN':'7G', 'NETX':'8G', 'COASTLPL':'8G', 'HIGHPLAN':'7G', 'HILLCNTY':'7G', 'LOWCOAST':'8G', 'RIOGRAND':'8G', 'SOUTHPLN':'7G', 'UPRCOAST':'8G', 'WESTPINE':'8G'} PSAs = ['P_CentralTX','P_CoastalPlns','P_HighPlns','P_HillCountry','P_LowerCoast','P_NETX','P_NorthTX', 'P_RioGrandPlns','P_RollingPlns','P_SETX','P_SouthernPlns','P_TransPecos','P_UpperCoast','P_WPineywoods'] PSAinDB = {'P_CentralTX':'CENTRLTX', 'P_CoastalPlns':'COASTLPL', 'P_HighPlns':'HIGHPLAN', 'P_HillCountry':'HILLCNTY', 'P_LowerCoast':'LOWCOAST', 'P_NETX':'NETX', 'P_NorthTX':'NTXS', 'P_RioGrandPlns':'RIOGRAND', 'P_RollingPlns':'ROLLPLN', 'P_SETX':'SETX', 'P_SouthernPlns':'SOUTHPLN', 'P_TransPecos':'TRANSPEC', 'P_UpperCoast':'UPRCOAST', 'P_WPineywoods':'WESTPINE' } PSAwithLongname ={'CENTRLTX':'Central TX', 'NTXS':'North TX', 'TRANSPEC':"Trans Pecos", 'ROLLPLN':"Rolling Plains", 'SETX':"SE TX", 'NETX':"NE TX", 'COASTLPL':"Coastal Plains", 'HIGHPLAN':"High Plains", 'HILLCNTY':"Hill Country", 'LOWCOAST':"Lower Coast", 'RIOGRAND':"Rio Grande Pla", 'SOUTHPLN':"Southern Plain", 'UPRCOAST':"Upper Coast", 'WESTPINE':"W Pinewoods"} PSAPDF ={'CENTRLTX':'erc_ctx', 'NTXS':'erc_ntx', 'TRANSPEC':"erc_tp", 'ROLLPLN':"erc_rp", 'SETX':"erc_setx", 'NETX':"erc_netx", 'COASTLPL':"erc_cp", 'HIGHPLAN':"erc_hp", 'HILLCNTY':"erc_hc", 'LOWCOAST':"erc_lgc", 'RIOGRAND':"erc_rgp", 'SOUTHPLN':"erc_sp", 'UPRCOAST':"erc_ugc", 'WESTPINE':"erc_wpw"} #For Seasonal ERC arcgis online updating, a dict created for contain the percentile value for each PSA PSAUpdate ={'CENTRLTX':'Central Texas', 'NTXS':'North Texas', 'TRANSPEC':"Trans Pecos", 'ROLLPLN':"Rolling Plains", 'SETX':"Southeast Texas", 'NETX':"Northeast Texas", 'COASTLPL':"Coastal Plains", 'HIGHPLAN':"High Plains", 'HILLCNTY':"Hill Country", 'LOWCOAST':"Lower Gulf Coast", 'RIOGRAND':"Rio Grande Plains", 'SOUTHPLN':"Southern Plains", 'UPRCOAST':"Upper Gulf Coast", 'WESTPINE':"Western Pineywoods"} #Then A dict for value for each PSA PSALevelDict = {} #For review purpose, create a pdf book to the FTP(or SyncPlicity) for sharing producetime = datetime.datetime.now() date_today = producetime.strftime("%Y%m%d") pdf_ARCHIVE_Path = os.path.join(os.getcwd(),'ARCHIVE') if not os.path.exists(pdf_ARCHIVE_Path): os.makedirs(pdf_ARCHIVE_Path) multipage_pdf = pdf_ARCHIVE_Path + '\\' + date_today + ".pdf" print multipage_pdf with PdfPages(multipage_pdf) as pdf: for PSA,PSA_longname in PSAwithLongname.items(): print PSA,PSA_longname #List a series of table need to acceess from database #Table of erc for all PSAs for this year uptodate (updated every day) table_erc = 'PSA_ERC_AVG' #Table for the historical analyses (including Max, Avg and last year ERC) for each PSA #The historial should be 2016 for 2017 table_hist =PSA + '_ERC_HIST2016' #Updated 12/31/2016 #Table for daily historical erc for all the years for each PSA table_hist_full = PSA + '_ERC_ALLYEAR' #retrive the plotting data from database(PostgreSQL) ## try: ## #PostgerSQL through ps** pm windows ## #engine = create_engine(r'postgresql://postgres:2016@tFs@localhost/ERC') ## #sqlite through sqlite3 on Windows ## #engine = create_engine('sqlite:///E:\\ERC\\ercdb_updated.sqlite') ## engine = create_engine('sqlite:///C:\\DEV\\ERC\\ercdb_updated.sqlite') ## #Try Retrieving the data form the data ## df_ERC = pandas.read_sql_table(table_erc,engine) ## df_HIST = pandas.read_sql_table(table_hist,engine) ## df_HIST_FULL = pandas.read_sql_table(table_hist_full,engine) ## engine.dispose() ## except: ## print 'there is a problem in connecting to database' ## exit(0) engine = create_engine('sqlite:///C:\\DEV\\ERC\\ercdb_updated.sqlite') #Try Retrieving the data form the data df_ERC = pandas.read_sql_table(table_erc,engine) df_HIST = pandas.read_sql_table(table_hist,engine) df_HIST_FULL = pandas.read_sql_table(table_hist_full,engine) engine.dispose() date_todate = df_ERC.loc[:,'index'].values erc = df_ERC.loc[:,PSA].values date_2016 = df_HIST.loc[:,'index'].values avg_erc = df_HIST.loc[:,'ercAvg'].values max_erc = df_HIST.loc[:,'ercMax'].values lastyear_erc = df_HIST.loc[:,'lastYear'].values #df_HIST_FULL.index = pandas.to_datetime(df_HIST_FULL.loc[:,'DATE']) ts_ERC = df_HIST_FULL.loc[:,'ERC'].values firstYear = df_HIST_FULL.loc[0,'DATE'].year latestYear = '2016' latestYear = '2017' #For year 2017 print firstYear,latestYear #ts= pandas.Series(df_HIST_FULL.loc[:,'ERC'].values, index=df_HIST_FULL.loc[:,'DATE'].values) # percentile97 = np.percentile(erc_allyear,97) Percentile97 = pandas.Series(np.nanpercentile(ts_ERC,97), index=date_2016 ) Percentile90 = pandas.Series(np.nanpercentile(ts_ERC,90), index=date_2016 ) Percentile75 = pandas.Series(np.nanpercentile(ts_ERC,75), index=date_2016 ) Percentile50 = pandas.Series(np.nanpercentile(ts_ERC,50), index=date_2016 ) #Create a number based on current days ERC value regarding to the historical percentile ''' 5--- >97% 4--- 90~97% 3--- 75~90% 2--- 50~75% 1--- <50% ''' PSAname = PSAUpdate[PSA] print erc print len(erc) ercPSA = erc[-1] print ercPSA,Percentile50[0],Percentile75[0],Percentile90[0],Percentile97[0] if ercPSA <= Percentile50[0]: ercLevel = 1 elif ercPSA > Percentile50[0] and ercPSA <= Percentile75[0]: ercLevel = 2 elif ercPSA > Percentile75[0] and ercPSA <= Percentile90[0]: ercLevel = 3 elif ercPSA > Percentile90[0] and ercPSA <= Percentile97[0]: ercLevel = 4 else: ercLevel = 5 PSALevelDict[PSAname] = ercLevel #the number of days with observation data dayofObs = len(ts_ERC) + len(erc) #print dfread formatter = mdates.DateFormatter('%b %d') matplotlib.rc('xtick', labelsize=9) fig = plt.figure() ax = fig.add_subplot(111) ax.xaxis.set_major_formatter(formatter) #Historical Average ERC ax.plot_date(date_2016, avg_erc, '-',c='grey',lw=1.5, label='Avg') #Historical Maximum ERC ax.plot_date(date_2016, max_erc, '-',c='red',lw=1.5, label='Max') #Last year ERC ax.plot_date(date_2016, lastyear_erc, ':',c='blue',lw=1.3, label='2016') #97 and 90 Percentile for all the Previous years ## ax.plot_date(date_2016, Percentile97, '-',c='purple',lw=1,label='97%') ## ax.plot_date(date_2016, Percentile90, '-',c='green',lw=1,label='90%') ax.plot_date(date_2016, Percentile97, '-',c='purple',lw=1,label='97% ['+ str(int(Percentile97[0])) + ']') ax.plot_date(date_2016, Percentile90, '-',c='green',lw=1,label='90% ['+ str(int(Percentile90[0])) + ']') #Uptodate ERC ax.plot_date(date_todate, erc,'-',c='black',lw=1.5, label='2017') #add titles and legends,etc plt.xlabel('1 Day Periods',fontsize='xx-large') plt.ylabel('Energy Release Component',fontsize='x-large') # PSA_longname = PSA_longname[PSA] subtitlename = "PSA - " + PSA_longname plt.suptitle(subtitlename,fontsize='x-large') title_year = str(firstYear) + '-' + str(latestYear) plt.title(title_year) leg = plt.legend(loc='lower center',ncol=2,fontsize='small') bb = leg.legendPatch.get_bbox().inverse_transformed(ax.transAxes) xOffset = 0.305 yOffset = 0.15 newX0 = bb.x0 + xOffset newX1 = bb.x1 + xOffset newY0 = bb.y0 - yOffset newY1 = bb.y1 - yOffset bb.set_points([[newX0, newY0], [newX1, newY1]]) leg.set_bbox_to_anchor(bb) #Text to show the fuel model used and the date generated fuelmodel = 'Fuel Model: 8G' #Either 8G or 7G #Need to create a dictionary for the fuel model definition(it has all 8G, use 8G, otherwise will be 7G), it can be based on each PSA #fuelmodel = 'Fuel Model: 7G' #fuelmodel = FuelModel[PSA] #fuelmodel = 'Fuel Model: ' + fuelmodel fuelmodel = 'Fuel Model: G' ##Meeting on Jan 26 2015, no difference will be made between 7G or 8G for the signature plt.figtext(0.9, 0.09, fuelmodel, horizontalalignment='right') observationtext = str(dayofObs) + ' Wx Observations' plt.figtext(0.9, 0.055, observationtext , horizontalalignment='right') producetime = datetime.datetime.now() producetext = 'Generated on ' + producetime.strftime("%m/%d/%Y-%H:%M") plt.figtext(0.9, 0.02, producetext , horizontalalignment='right') fig.autofmt_xdate() #plt.show() fig = plt.gcf() # get current figure date_today = producetime.strftime("%Y%m%d") pdf_filePath = os.path.join(os.getcwd(),'PDF',date_today) png_filePath = os.path.join(os.getcwd(),'PNG',date_today) if not os.path.exists(pdf_filePath): os.makedirs(pdf_filePath) if not os.path.exists(png_filePath): os.makedirs(png_filePath) pdf_filename = os.path.join(pdf_filePath, PSAPDF[PSA] +".pdf") png_filename = os.path.join(png_filePath, PSAPDF[PSA] +".png") if(fig.savefig(pdf_filename)==None): pdf.savefig() print 'ERC graph: ' + pdf_filename + ' Successfully Produced!' if(fig.savefig(png_filename)==None): pdf.savefig() print 'ERC graph: ' + png_filename + ' Successfully Produced!' print PSALevelDict csv_filename = os.path.join(os.getcwd(),"PSALevel.csv") saveDict(csv_filename,PSALevelDict)
[ "pyang@TFS-24834.tfs.tamu.edu" ]
pyang@TFS-24834.tfs.tamu.edu
ee0624df8353edb64e16e66fcc0fffd8ecd0aa7a
d235eaf56abb8dc8e253d6f705efae407e11f01f
/wheel_maker.py
f2ccd2461b1a92c92cc66072f414a6cf230bd751
[]
no_license
griffgudge/Griffin-Casino
8b708fe3c6d0757b5192f12c16e5da70e4235ace
50c748885bcbb7bb41d33d469b7f1186484d6184
refs/heads/master
2022-11-07T21:42:59.897871
2020-06-22T13:18:27
2020-06-22T13:18:27
273,698,195
0
0
null
null
null
null
UTF-8
Python
false
false
1,078
py
def wheel_maker(): wheel_nums = [] wheel_colors = [] num_type = [] for i in range(37): wheel_nums.append(i) for i in range(37): if i == 0: num_type.append("0") elif i % 2 != 0: num_type.append("Odd") else: num_type.append("Even") for i in range(37): if i > 28 and i % 2 != 0: wheel_colors.append("Black") if i > 28 and i % 2 == 0: wheel_colors.append("Red") elif i > 18 and i % 2 == 0: wheel_colors.append("Black") elif i > 18 and i % 2 != 0: wheel_colors.append("Red") elif i > 10 and i % 2 != 0: wheel_colors.append("Black") elif i > 10 and i % 2 == 0: wheel_colors.append("Red") elif i > 0 and i % 2 == 0: wheel_colors.append("Black") elif i > 0 and i % 2 != 0: wheel_colors.append("Red") else: wheel_colors.append("Green") wheel = list(zip(wheel_nums, num_type, wheel_colors)) return wheel
[ "67191383+griffgudge@users.noreply.github.com" ]
67191383+griffgudge@users.noreply.github.com
c78a128037d56a51af8dd35b30b236235e8555eb
2173387d499bc75cf3f21be6f5d7e01009dae148
/Ingestion/Kafka/kafkaC.py
1e0362e94e05e8e5dd13bde90959fe7d709935ef
[]
no_license
prarthanabhattarai/AdReportProject
13c8059f82b25913b61575a6629d79da14a345d3
f1d41135e39c6b1688ea43a690353ba47f1ad404
refs/heads/master
2021-01-23T20:14:28.235866
2015-07-14T17:51:01
2015-07-14T17:51:01
38,027,815
2
2
null
null
null
null
UTF-8
Python
false
false
3,721
py
import time from kafka.client import KafkaClient from kafka.consumer import SimpleConsumer import os class Consumer(object): def __init__(self, addr, group, topic): self.client = KafkaClient(addr) self.consumer = SimpleConsumer(self.client, group, topic, max_buffer_size=1310720000) self.temp_file_path = None self.temp_file = None self.hadoop_path = "/user/AdReport/%s/history" % (topic) self.cached_path = "/user/AdReport/%s/cached" % (topic) self.topic = topic self.group = group self.block_cnt = 0 def consume_topic(self, output_dir): timestamp = time.strftime('%Y%m%d%H%M%S') #open file for writing self.temp_file_path = "%s/kafka_%s_%s_%s.dat" % (output_dir, self.topic, self.group, timestamp) self.temp_file = open(self.temp_file_path,"w") print ( self.temp_file) #one_entry = False while True: try: messages = self.consumer.get_messages(count=100, block=False) #OffsetAndMessage(offset=43, message=Message(magic=0, # attributes=0, key=None, value='some message')) for message in messages: #print (message) #one_entry = True #print (self.temp_file.tell()) self.temp_file.write(message.message.value + "\n") if self.temp_file.tell() > 2000000: self.save_to_hdfs(output_dir) self.consumer.commit() except: self.consumer.seek(0, 2) #if one_entry: #print ("sending to hdfs") #self.save_to_hdfs(output_dir, self.topic) #self.consumer.commit() def save_to_hdfs(self, output_dir): #print ("Saving file to hdfs") self.temp_file.close() #print ("Closed open file") timestamp = time.strftime('%Y%m%d%H%M%S') hadoop_fullpath = "%s/%s_%s_%s.dat" % (self.hadoop_path, self.group, self.topic, timestamp) cached_fullpath = "%s/%s_%s_%s.dat" % (self.cached_path, self.group, self.topic, timestamp) #print ("Block " + str(self.block_cnt) + ": Saving file to HDFS " + hadoop_fullpath) self.block_cnt += 1 # place blocked messages into history and cached folders on hdfs os.system("sudo -u ubuntu /usr/local/hadoop/bin/hdfs dfs -put %s %s" % (self.temp_file_path, hadoop_fullpath)) os.system("sudo -u ubuntu /usr/local/hadoop/bin/hdfs dfs -put %s %s" % (self.temp_file_path, cached_fullpath)) os.remove(self.temp_file_path) timestamp = time.strftime('%Y%m%d%H%M%S') self.temp_file_path = "%s/kafka_%s_%s_%s.dat" % (output_dir, self.topic, self.group, timestamp) self.temp_file = open(self.temp_file_path, "w") if __name__ == '__main__': print "\nConsuming messages for bids..." cons1 = Consumer(addr="52.8.165.110", group="hdfs", topic="fb_bids") cons1.consume_topic("/home/ubuntu/user/AdReport/kafka_messages_bids") print "\nConsuming messages for ads..." cons2 = Consumer(addr="52.8.165.110", group="hdfs", topic="fb_ads") cons2.consume_topic("/home/ubuntu/user/AdReport/kafka_messages_ads")
[ "ubuntu@ip-172-31-9-43.us-west-1.compute.internal" ]
ubuntu@ip-172-31-9-43.us-west-1.compute.internal