id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
21,600
tzutalin/labelImg
libs/utils.py
natural_sort
def natural_sort(list, key=lambda s:s): """ Sort the list into natural alphanumeric order. """ def get_alphanum_key_func(key): convert = lambda text: int(text) if text.isdigit() else text return lambda s: [convert(c) for c in re.split('([0-9]+)', key(s))] sort_key = get_alphanum_key_func(key) list.sort(key=sort_key)
python
def natural_sort(list, key=lambda s:s): """ Sort the list into natural alphanumeric order. """ def get_alphanum_key_func(key): convert = lambda text: int(text) if text.isdigit() else text return lambda s: [convert(c) for c in re.split('([0-9]+)', key(s))] sort_key = get_alphanum_key_func(key) list.sort(key=sort_key)
[ "def", "natural_sort", "(", "list", ",", "key", "=", "lambda", "s", ":", "s", ")", ":", "def", "get_alphanum_key_func", "(", "key", ")", ":", "convert", "=", "lambda", "text", ":", "int", "(", "text", ")", "if", "text", ".", "isdigit", "(", ")", "else", "text", "return", "lambda", "s", ":", "[", "convert", "(", "c", ")", "for", "c", "in", "re", ".", "split", "(", "'([0-9]+)'", ",", "key", "(", "s", ")", ")", "]", "sort_key", "=", "get_alphanum_key_func", "(", "key", ")", "list", ".", "sort", "(", "key", "=", "sort_key", ")" ]
Sort the list into natural alphanumeric order.
[ "Sort", "the", "list", "into", "natural", "alphanumeric", "order", "." ]
6afd15aa88f89f41254e0004ed219b3965eb2c0d
https://github.com/tzutalin/labelImg/blob/6afd15aa88f89f41254e0004ed219b3965eb2c0d/libs/utils.py#L95-L103
21,601
tzutalin/labelImg
libs/canvas.py
Canvas.selectShapePoint
def selectShapePoint(self, point): """Select the first shape created which contains this point.""" self.deSelectShape() if self.selectedVertex(): # A vertex is marked for selection. index, shape = self.hVertex, self.hShape shape.highlightVertex(index, shape.MOVE_VERTEX) self.selectShape(shape) return for shape in reversed(self.shapes): if self.isVisible(shape) and shape.containsPoint(point): self.selectShape(shape) self.calculateOffsets(shape, point) return
python
def selectShapePoint(self, point): """Select the first shape created which contains this point.""" self.deSelectShape() if self.selectedVertex(): # A vertex is marked for selection. index, shape = self.hVertex, self.hShape shape.highlightVertex(index, shape.MOVE_VERTEX) self.selectShape(shape) return for shape in reversed(self.shapes): if self.isVisible(shape) and shape.containsPoint(point): self.selectShape(shape) self.calculateOffsets(shape, point) return
[ "def", "selectShapePoint", "(", "self", ",", "point", ")", ":", "self", ".", "deSelectShape", "(", ")", "if", "self", ".", "selectedVertex", "(", ")", ":", "# A vertex is marked for selection.", "index", ",", "shape", "=", "self", ".", "hVertex", ",", "self", ".", "hShape", "shape", ".", "highlightVertex", "(", "index", ",", "shape", ".", "MOVE_VERTEX", ")", "self", ".", "selectShape", "(", "shape", ")", "return", "for", "shape", "in", "reversed", "(", "self", ".", "shapes", ")", ":", "if", "self", ".", "isVisible", "(", "shape", ")", "and", "shape", ".", "containsPoint", "(", "point", ")", ":", "self", ".", "selectShape", "(", "shape", ")", "self", ".", "calculateOffsets", "(", "shape", ",", "point", ")", "return" ]
Select the first shape created which contains this point.
[ "Select", "the", "first", "shape", "created", "which", "contains", "this", "point", "." ]
6afd15aa88f89f41254e0004ed219b3965eb2c0d
https://github.com/tzutalin/labelImg/blob/6afd15aa88f89f41254e0004ed219b3965eb2c0d/libs/canvas.py#L307-L319
21,602
tzutalin/labelImg
labelImg.py
MainWindow.toggleDrawingSensitive
def toggleDrawingSensitive(self, drawing=True): """In the middle of drawing, toggling between modes should be disabled.""" self.actions.editMode.setEnabled(not drawing) if not drawing and self.beginner(): # Cancel creation. print('Cancel creation.') self.canvas.setEditing(True) self.canvas.restoreCursor() self.actions.create.setEnabled(True)
python
def toggleDrawingSensitive(self, drawing=True): """In the middle of drawing, toggling between modes should be disabled.""" self.actions.editMode.setEnabled(not drawing) if not drawing and self.beginner(): # Cancel creation. print('Cancel creation.') self.canvas.setEditing(True) self.canvas.restoreCursor() self.actions.create.setEnabled(True)
[ "def", "toggleDrawingSensitive", "(", "self", ",", "drawing", "=", "True", ")", ":", "self", ".", "actions", ".", "editMode", ".", "setEnabled", "(", "not", "drawing", ")", "if", "not", "drawing", "and", "self", ".", "beginner", "(", ")", ":", "# Cancel creation.", "print", "(", "'Cancel creation.'", ")", "self", ".", "canvas", ".", "setEditing", "(", "True", ")", "self", ".", "canvas", ".", "restoreCursor", "(", ")", "self", ".", "actions", ".", "create", ".", "setEnabled", "(", "True", ")" ]
In the middle of drawing, toggling between modes should be disabled.
[ "In", "the", "middle", "of", "drawing", "toggling", "between", "modes", "should", "be", "disabled", "." ]
6afd15aa88f89f41254e0004ed219b3965eb2c0d
https://github.com/tzutalin/labelImg/blob/6afd15aa88f89f41254e0004ed219b3965eb2c0d/labelImg.py#L621-L629
21,603
tzutalin/labelImg
labelImg.py
MainWindow.btnstate
def btnstate(self, item= None): """ Function to handle difficult examples Update on each object """ if not self.canvas.editing(): return item = self.currentItem() if not item: # If not selected Item, take the first one item = self.labelList.item(self.labelList.count()-1) difficult = self.diffcButton.isChecked() try: shape = self.itemsToShapes[item] except: pass # Checked and Update try: if difficult != shape.difficult: shape.difficult = difficult self.setDirty() else: # User probably changed item visibility self.canvas.setShapeVisible(shape, item.checkState() == Qt.Checked) except: pass
python
def btnstate(self, item= None): """ Function to handle difficult examples Update on each object """ if not self.canvas.editing(): return item = self.currentItem() if not item: # If not selected Item, take the first one item = self.labelList.item(self.labelList.count()-1) difficult = self.diffcButton.isChecked() try: shape = self.itemsToShapes[item] except: pass # Checked and Update try: if difficult != shape.difficult: shape.difficult = difficult self.setDirty() else: # User probably changed item visibility self.canvas.setShapeVisible(shape, item.checkState() == Qt.Checked) except: pass
[ "def", "btnstate", "(", "self", ",", "item", "=", "None", ")", ":", "if", "not", "self", ".", "canvas", ".", "editing", "(", ")", ":", "return", "item", "=", "self", ".", "currentItem", "(", ")", "if", "not", "item", ":", "# If not selected Item, take the first one", "item", "=", "self", ".", "labelList", ".", "item", "(", "self", ".", "labelList", ".", "count", "(", ")", "-", "1", ")", "difficult", "=", "self", ".", "diffcButton", ".", "isChecked", "(", ")", "try", ":", "shape", "=", "self", ".", "itemsToShapes", "[", "item", "]", "except", ":", "pass", "# Checked and Update", "try", ":", "if", "difficult", "!=", "shape", ".", "difficult", ":", "shape", ".", "difficult", "=", "difficult", "self", ".", "setDirty", "(", ")", "else", ":", "# User probably changed item visibility", "self", ".", "canvas", ".", "setShapeVisible", "(", "shape", ",", "item", ".", "checkState", "(", ")", "==", "Qt", ".", "Checked", ")", "except", ":", "pass" ]
Function to handle difficult examples Update on each object
[ "Function", "to", "handle", "difficult", "examples", "Update", "on", "each", "object" ]
6afd15aa88f89f41254e0004ed219b3965eb2c0d
https://github.com/tzutalin/labelImg/blob/6afd15aa88f89f41254e0004ed219b3965eb2c0d/labelImg.py#L685-L709
21,604
tzutalin/labelImg
labelImg.py
MainWindow.newShape
def newShape(self): """Pop-up and give focus to the label editor. position MUST be in global coordinates. """ if not self.useDefaultLabelCheckbox.isChecked() or not self.defaultLabelTextLine.text(): if len(self.labelHist) > 0: self.labelDialog = LabelDialog( parent=self, listItem=self.labelHist) # Sync single class mode from PR#106 if self.singleClassMode.isChecked() and self.lastLabel: text = self.lastLabel else: text = self.labelDialog.popUp(text=self.prevLabelText) self.lastLabel = text else: text = self.defaultLabelTextLine.text() # Add Chris self.diffcButton.setChecked(False) if text is not None: self.prevLabelText = text generate_color = generateColorByText(text) shape = self.canvas.setLastLabel(text, generate_color, generate_color) self.addLabel(shape) if self.beginner(): # Switch to edit mode. self.canvas.setEditing(True) self.actions.create.setEnabled(True) else: self.actions.editMode.setEnabled(True) self.setDirty() if text not in self.labelHist: self.labelHist.append(text) else: # self.canvas.undoLastLine() self.canvas.resetAllLines()
python
def newShape(self): """Pop-up and give focus to the label editor. position MUST be in global coordinates. """ if not self.useDefaultLabelCheckbox.isChecked() or not self.defaultLabelTextLine.text(): if len(self.labelHist) > 0: self.labelDialog = LabelDialog( parent=self, listItem=self.labelHist) # Sync single class mode from PR#106 if self.singleClassMode.isChecked() and self.lastLabel: text = self.lastLabel else: text = self.labelDialog.popUp(text=self.prevLabelText) self.lastLabel = text else: text = self.defaultLabelTextLine.text() # Add Chris self.diffcButton.setChecked(False) if text is not None: self.prevLabelText = text generate_color = generateColorByText(text) shape = self.canvas.setLastLabel(text, generate_color, generate_color) self.addLabel(shape) if self.beginner(): # Switch to edit mode. self.canvas.setEditing(True) self.actions.create.setEnabled(True) else: self.actions.editMode.setEnabled(True) self.setDirty() if text not in self.labelHist: self.labelHist.append(text) else: # self.canvas.undoLastLine() self.canvas.resetAllLines()
[ "def", "newShape", "(", "self", ")", ":", "if", "not", "self", ".", "useDefaultLabelCheckbox", ".", "isChecked", "(", ")", "or", "not", "self", ".", "defaultLabelTextLine", ".", "text", "(", ")", ":", "if", "len", "(", "self", ".", "labelHist", ")", ">", "0", ":", "self", ".", "labelDialog", "=", "LabelDialog", "(", "parent", "=", "self", ",", "listItem", "=", "self", ".", "labelHist", ")", "# Sync single class mode from PR#106", "if", "self", ".", "singleClassMode", ".", "isChecked", "(", ")", "and", "self", ".", "lastLabel", ":", "text", "=", "self", ".", "lastLabel", "else", ":", "text", "=", "self", ".", "labelDialog", ".", "popUp", "(", "text", "=", "self", ".", "prevLabelText", ")", "self", ".", "lastLabel", "=", "text", "else", ":", "text", "=", "self", ".", "defaultLabelTextLine", ".", "text", "(", ")", "# Add Chris", "self", ".", "diffcButton", ".", "setChecked", "(", "False", ")", "if", "text", "is", "not", "None", ":", "self", ".", "prevLabelText", "=", "text", "generate_color", "=", "generateColorByText", "(", "text", ")", "shape", "=", "self", ".", "canvas", ".", "setLastLabel", "(", "text", ",", "generate_color", ",", "generate_color", ")", "self", ".", "addLabel", "(", "shape", ")", "if", "self", ".", "beginner", "(", ")", ":", "# Switch to edit mode.", "self", ".", "canvas", ".", "setEditing", "(", "True", ")", "self", ".", "actions", ".", "create", ".", "setEnabled", "(", "True", ")", "else", ":", "self", ".", "actions", ".", "editMode", ".", "setEnabled", "(", "True", ")", "self", ".", "setDirty", "(", ")", "if", "text", "not", "in", "self", ".", "labelHist", ":", "self", ".", "labelHist", ".", "append", "(", "text", ")", "else", ":", "# self.canvas.undoLastLine()", "self", ".", "canvas", ".", "resetAllLines", "(", ")" ]
Pop-up and give focus to the label editor. position MUST be in global coordinates.
[ "Pop", "-", "up", "and", "give", "focus", "to", "the", "label", "editor", "." ]
6afd15aa88f89f41254e0004ed219b3965eb2c0d
https://github.com/tzutalin/labelImg/blob/6afd15aa88f89f41254e0004ed219b3965eb2c0d/labelImg.py#L839-L876
21,605
tzutalin/labelImg
labelImg.py
MainWindow.scaleFitWindow
def scaleFitWindow(self): """Figure out the size of the pixmap in order to fit the main widget.""" e = 2.0 # So that no scrollbars are generated. w1 = self.centralWidget().width() - e h1 = self.centralWidget().height() - e a1 = w1 / h1 # Calculate a new scale value based on the pixmap's aspect ratio. w2 = self.canvas.pixmap.width() - 0.0 h2 = self.canvas.pixmap.height() - 0.0 a2 = w2 / h2 return w1 / w2 if a2 >= a1 else h1 / h2
python
def scaleFitWindow(self): """Figure out the size of the pixmap in order to fit the main widget.""" e = 2.0 # So that no scrollbars are generated. w1 = self.centralWidget().width() - e h1 = self.centralWidget().height() - e a1 = w1 / h1 # Calculate a new scale value based on the pixmap's aspect ratio. w2 = self.canvas.pixmap.width() - 0.0 h2 = self.canvas.pixmap.height() - 0.0 a2 = w2 / h2 return w1 / w2 if a2 >= a1 else h1 / h2
[ "def", "scaleFitWindow", "(", "self", ")", ":", "e", "=", "2.0", "# So that no scrollbars are generated.", "w1", "=", "self", ".", "centralWidget", "(", ")", ".", "width", "(", ")", "-", "e", "h1", "=", "self", ".", "centralWidget", "(", ")", ".", "height", "(", ")", "-", "e", "a1", "=", "w1", "/", "h1", "# Calculate a new scale value based on the pixmap's aspect ratio.", "w2", "=", "self", ".", "canvas", ".", "pixmap", ".", "width", "(", ")", "-", "0.0", "h2", "=", "self", ".", "canvas", ".", "pixmap", ".", "height", "(", ")", "-", "0.0", "a2", "=", "w2", "/", "h2", "return", "w1", "/", "w2", "if", "a2", ">=", "a1", "else", "h1", "/", "h2" ]
Figure out the size of the pixmap in order to fit the main widget.
[ "Figure", "out", "the", "size", "of", "the", "pixmap", "in", "order", "to", "fit", "the", "main", "widget", "." ]
6afd15aa88f89f41254e0004ed219b3965eb2c0d
https://github.com/tzutalin/labelImg/blob/6afd15aa88f89f41254e0004ed219b3965eb2c0d/labelImg.py#L1069-L1079
21,606
tzutalin/labelImg
libs/pascal_voc_io.py
PascalVocWriter.genXML
def genXML(self): """ Return XML root """ # Check conditions if self.filename is None or \ self.foldername is None or \ self.imgSize is None: return None top = Element('annotation') if self.verified: top.set('verified', 'yes') folder = SubElement(top, 'folder') folder.text = self.foldername filename = SubElement(top, 'filename') filename.text = self.filename if self.localImgPath is not None: localImgPath = SubElement(top, 'path') localImgPath.text = self.localImgPath source = SubElement(top, 'source') database = SubElement(source, 'database') database.text = self.databaseSrc size_part = SubElement(top, 'size') width = SubElement(size_part, 'width') height = SubElement(size_part, 'height') depth = SubElement(size_part, 'depth') width.text = str(self.imgSize[1]) height.text = str(self.imgSize[0]) if len(self.imgSize) == 3: depth.text = str(self.imgSize[2]) else: depth.text = '1' segmented = SubElement(top, 'segmented') segmented.text = '0' return top
python
def genXML(self): """ Return XML root """ # Check conditions if self.filename is None or \ self.foldername is None or \ self.imgSize is None: return None top = Element('annotation') if self.verified: top.set('verified', 'yes') folder = SubElement(top, 'folder') folder.text = self.foldername filename = SubElement(top, 'filename') filename.text = self.filename if self.localImgPath is not None: localImgPath = SubElement(top, 'path') localImgPath.text = self.localImgPath source = SubElement(top, 'source') database = SubElement(source, 'database') database.text = self.databaseSrc size_part = SubElement(top, 'size') width = SubElement(size_part, 'width') height = SubElement(size_part, 'height') depth = SubElement(size_part, 'depth') width.text = str(self.imgSize[1]) height.text = str(self.imgSize[0]) if len(self.imgSize) == 3: depth.text = str(self.imgSize[2]) else: depth.text = '1' segmented = SubElement(top, 'segmented') segmented.text = '0' return top
[ "def", "genXML", "(", "self", ")", ":", "# Check conditions", "if", "self", ".", "filename", "is", "None", "or", "self", ".", "foldername", "is", "None", "or", "self", ".", "imgSize", "is", "None", ":", "return", "None", "top", "=", "Element", "(", "'annotation'", ")", "if", "self", ".", "verified", ":", "top", ".", "set", "(", "'verified'", ",", "'yes'", ")", "folder", "=", "SubElement", "(", "top", ",", "'folder'", ")", "folder", ".", "text", "=", "self", ".", "foldername", "filename", "=", "SubElement", "(", "top", ",", "'filename'", ")", "filename", ".", "text", "=", "self", ".", "filename", "if", "self", ".", "localImgPath", "is", "not", "None", ":", "localImgPath", "=", "SubElement", "(", "top", ",", "'path'", ")", "localImgPath", ".", "text", "=", "self", ".", "localImgPath", "source", "=", "SubElement", "(", "top", ",", "'source'", ")", "database", "=", "SubElement", "(", "source", ",", "'database'", ")", "database", ".", "text", "=", "self", ".", "databaseSrc", "size_part", "=", "SubElement", "(", "top", ",", "'size'", ")", "width", "=", "SubElement", "(", "size_part", ",", "'width'", ")", "height", "=", "SubElement", "(", "size_part", ",", "'height'", ")", "depth", "=", "SubElement", "(", "size_part", ",", "'depth'", ")", "width", ".", "text", "=", "str", "(", "self", ".", "imgSize", "[", "1", "]", ")", "height", ".", "text", "=", "str", "(", "self", ".", "imgSize", "[", "0", "]", ")", "if", "len", "(", "self", ".", "imgSize", ")", "==", "3", ":", "depth", ".", "text", "=", "str", "(", "self", ".", "imgSize", "[", "2", "]", ")", "else", ":", "depth", ".", "text", "=", "'1'", "segmented", "=", "SubElement", "(", "top", ",", "'segmented'", ")", "segmented", ".", "text", "=", "'0'", "return", "top" ]
Return XML root
[ "Return", "XML", "root" ]
6afd15aa88f89f41254e0004ed219b3965eb2c0d
https://github.com/tzutalin/labelImg/blob/6afd15aa88f89f41254e0004ed219b3965eb2c0d/libs/pascal_voc_io.py#L37-L78
21,607
ccxt/ccxt
python/ccxt/base/exchange.py
Exchange.fetch2
def fetch2(self, path, api='public', method='GET', params={}, headers=None, body=None): """A better wrapper over request for deferred signing""" if self.enableRateLimit: self.throttle() self.lastRestRequestTimestamp = self.milliseconds() request = self.sign(path, api, method, params, headers, body) return self.fetch(request['url'], request['method'], request['headers'], request['body'])
python
def fetch2(self, path, api='public', method='GET', params={}, headers=None, body=None): """A better wrapper over request for deferred signing""" if self.enableRateLimit: self.throttle() self.lastRestRequestTimestamp = self.milliseconds() request = self.sign(path, api, method, params, headers, body) return self.fetch(request['url'], request['method'], request['headers'], request['body'])
[ "def", "fetch2", "(", "self", ",", "path", ",", "api", "=", "'public'", ",", "method", "=", "'GET'", ",", "params", "=", "{", "}", ",", "headers", "=", "None", ",", "body", "=", "None", ")", ":", "if", "self", ".", "enableRateLimit", ":", "self", ".", "throttle", "(", ")", "self", ".", "lastRestRequestTimestamp", "=", "self", ".", "milliseconds", "(", ")", "request", "=", "self", ".", "sign", "(", "path", ",", "api", ",", "method", ",", "params", ",", "headers", ",", "body", ")", "return", "self", ".", "fetch", "(", "request", "[", "'url'", "]", ",", "request", "[", "'method'", "]", ",", "request", "[", "'headers'", "]", ",", "request", "[", "'body'", "]", ")" ]
A better wrapper over request for deferred signing
[ "A", "better", "wrapper", "over", "request", "for", "deferred", "signing" ]
23062efd7a5892c79b370c9d951c03cf8c0ddf23
https://github.com/ccxt/ccxt/blob/23062efd7a5892c79b370c9d951c03cf8c0ddf23/python/ccxt/base/exchange.py#L423-L429
21,608
ccxt/ccxt
python/ccxt/base/exchange.py
Exchange.request
def request(self, path, api='public', method='GET', params={}, headers=None, body=None): """Exchange.request is the entry point for all generated methods""" return self.fetch2(path, api, method, params, headers, body)
python
def request(self, path, api='public', method='GET', params={}, headers=None, body=None): """Exchange.request is the entry point for all generated methods""" return self.fetch2(path, api, method, params, headers, body)
[ "def", "request", "(", "self", ",", "path", ",", "api", "=", "'public'", ",", "method", "=", "'GET'", ",", "params", "=", "{", "}", ",", "headers", "=", "None", ",", "body", "=", "None", ")", ":", "return", "self", ".", "fetch2", "(", "path", ",", "api", ",", "method", ",", "params", ",", "headers", ",", "body", ")" ]
Exchange.request is the entry point for all generated methods
[ "Exchange", ".", "request", "is", "the", "entry", "point", "for", "all", "generated", "methods" ]
23062efd7a5892c79b370c9d951c03cf8c0ddf23
https://github.com/ccxt/ccxt/blob/23062efd7a5892c79b370c9d951c03cf8c0ddf23/python/ccxt/base/exchange.py#L431-L433
21,609
ccxt/ccxt
python/ccxt/base/exchange.py
Exchange.find_broadly_matched_key
def find_broadly_matched_key(self, broad, string): """A helper method for matching error strings exactly vs broadly""" keys = list(broad.keys()) for i in range(0, len(keys)): key = keys[i] if string.find(key) >= 0: return key return None
python
def find_broadly_matched_key(self, broad, string): """A helper method for matching error strings exactly vs broadly""" keys = list(broad.keys()) for i in range(0, len(keys)): key = keys[i] if string.find(key) >= 0: return key return None
[ "def", "find_broadly_matched_key", "(", "self", ",", "broad", ",", "string", ")", ":", "keys", "=", "list", "(", "broad", ".", "keys", "(", ")", ")", "for", "i", "in", "range", "(", "0", ",", "len", "(", "keys", ")", ")", ":", "key", "=", "keys", "[", "i", "]", "if", "string", ".", "find", "(", "key", ")", ">=", "0", ":", "return", "key", "return", "None" ]
A helper method for matching error strings exactly vs broadly
[ "A", "helper", "method", "for", "matching", "error", "strings", "exactly", "vs", "broadly" ]
23062efd7a5892c79b370c9d951c03cf8c0ddf23
https://github.com/ccxt/ccxt/blob/23062efd7a5892c79b370c9d951c03cf8c0ddf23/python/ccxt/base/exchange.py#L445-L452
21,610
ccxt/ccxt
python/ccxt/base/exchange.py
Exchange.truncate
def truncate(num, precision=0): """Deprecated, use decimal_to_precision instead""" if precision > 0: decimal_precision = math.pow(10, precision) return math.trunc(num * decimal_precision) / decimal_precision return int(Exchange.truncate_to_string(num, precision))
python
def truncate(num, precision=0): """Deprecated, use decimal_to_precision instead""" if precision > 0: decimal_precision = math.pow(10, precision) return math.trunc(num * decimal_precision) / decimal_precision return int(Exchange.truncate_to_string(num, precision))
[ "def", "truncate", "(", "num", ",", "precision", "=", "0", ")", ":", "if", "precision", ">", "0", ":", "decimal_precision", "=", "math", ".", "pow", "(", "10", ",", "precision", ")", "return", "math", ".", "trunc", "(", "num", "*", "decimal_precision", ")", "/", "decimal_precision", "return", "int", "(", "Exchange", ".", "truncate_to_string", "(", "num", ",", "precision", ")", ")" ]
Deprecated, use decimal_to_precision instead
[ "Deprecated", "use", "decimal_to_precision", "instead" ]
23062efd7a5892c79b370c9d951c03cf8c0ddf23
https://github.com/ccxt/ccxt/blob/23062efd7a5892c79b370c9d951c03cf8c0ddf23/python/ccxt/base/exchange.py#L622-L627
21,611
ccxt/ccxt
python/ccxt/base/exchange.py
Exchange.check_address
def check_address(self, address): """Checks an address is not the same character repeated or an empty sequence""" if address is None: self.raise_error(InvalidAddress, details='address is None') if all(letter == address[0] for letter in address) or len(address) < self.minFundingAddressLength or ' ' in address: self.raise_error(InvalidAddress, details='address is invalid or has less than ' + str(self.minFundingAddressLength) + ' characters: "' + str(address) + '"') return address
python
def check_address(self, address): """Checks an address is not the same character repeated or an empty sequence""" if address is None: self.raise_error(InvalidAddress, details='address is None') if all(letter == address[0] for letter in address) or len(address) < self.minFundingAddressLength or ' ' in address: self.raise_error(InvalidAddress, details='address is invalid or has less than ' + str(self.minFundingAddressLength) + ' characters: "' + str(address) + '"') return address
[ "def", "check_address", "(", "self", ",", "address", ")", ":", "if", "address", "is", "None", ":", "self", ".", "raise_error", "(", "InvalidAddress", ",", "details", "=", "'address is None'", ")", "if", "all", "(", "letter", "==", "address", "[", "0", "]", "for", "letter", "in", "address", ")", "or", "len", "(", "address", ")", "<", "self", ".", "minFundingAddressLength", "or", "' '", "in", "address", ":", "self", ".", "raise_error", "(", "InvalidAddress", ",", "details", "=", "'address is invalid or has less than '", "+", "str", "(", "self", ".", "minFundingAddressLength", ")", "+", "' characters: \"'", "+", "str", "(", "address", ")", "+", "'\"'", ")", "return", "address" ]
Checks an address is not the same character repeated or an empty sequence
[ "Checks", "an", "address", "is", "not", "the", "same", "character", "repeated", "or", "an", "empty", "sequence" ]
23062efd7a5892c79b370c9d951c03cf8c0ddf23
https://github.com/ccxt/ccxt/blob/23062efd7a5892c79b370c9d951c03cf8c0ddf23/python/ccxt/base/exchange.py#L1000-L1006
21,612
mozilla/DeepSpeech
util/benchmark.py
keep_only_digits
def keep_only_digits(s): r''' local helper to just keep digits ''' fs = '' for c in s: if c.isdigit(): fs += c return int(fs)
python
def keep_only_digits(s): r''' local helper to just keep digits ''' fs = '' for c in s: if c.isdigit(): fs += c return int(fs)
[ "def", "keep_only_digits", "(", "s", ")", ":", "fs", "=", "''", "for", "c", "in", "s", ":", "if", "c", ".", "isdigit", "(", ")", ":", "fs", "+=", "c", "return", "int", "(", "fs", ")" ]
r''' local helper to just keep digits
[ "r", "local", "helper", "to", "just", "keep", "digits" ]
f64aa73e7fbe9dde40d4fcf23b42ab304747d152
https://github.com/mozilla/DeepSpeech/blob/f64aa73e7fbe9dde40d4fcf23b42ab304747d152/util/benchmark.py#L6-L15
21,613
mozilla/DeepSpeech
examples/vad_transcriber/wavSplit.py
read_wave
def read_wave(path): """Reads a .wav file. Takes the path, and returns (PCM audio data, sample rate). """ with contextlib.closing(wave.open(path, 'rb')) as wf: num_channels = wf.getnchannels() assert num_channels == 1 sample_width = wf.getsampwidth() assert sample_width == 2 sample_rate = wf.getframerate() assert sample_rate in (8000, 16000, 32000) frames = wf.getnframes() pcm_data = wf.readframes(frames) duration = frames / sample_rate return pcm_data, sample_rate, duration
python
def read_wave(path): """Reads a .wav file. Takes the path, and returns (PCM audio data, sample rate). """ with contextlib.closing(wave.open(path, 'rb')) as wf: num_channels = wf.getnchannels() assert num_channels == 1 sample_width = wf.getsampwidth() assert sample_width == 2 sample_rate = wf.getframerate() assert sample_rate in (8000, 16000, 32000) frames = wf.getnframes() pcm_data = wf.readframes(frames) duration = frames / sample_rate return pcm_data, sample_rate, duration
[ "def", "read_wave", "(", "path", ")", ":", "with", "contextlib", ".", "closing", "(", "wave", ".", "open", "(", "path", ",", "'rb'", ")", ")", "as", "wf", ":", "num_channels", "=", "wf", ".", "getnchannels", "(", ")", "assert", "num_channels", "==", "1", "sample_width", "=", "wf", ".", "getsampwidth", "(", ")", "assert", "sample_width", "==", "2", "sample_rate", "=", "wf", ".", "getframerate", "(", ")", "assert", "sample_rate", "in", "(", "8000", ",", "16000", ",", "32000", ")", "frames", "=", "wf", ".", "getnframes", "(", ")", "pcm_data", "=", "wf", ".", "readframes", "(", "frames", ")", "duration", "=", "frames", "/", "sample_rate", "return", "pcm_data", ",", "sample_rate", ",", "duration" ]
Reads a .wav file. Takes the path, and returns (PCM audio data, sample rate).
[ "Reads", "a", ".", "wav", "file", "." ]
f64aa73e7fbe9dde40d4fcf23b42ab304747d152
https://github.com/mozilla/DeepSpeech/blob/f64aa73e7fbe9dde40d4fcf23b42ab304747d152/examples/vad_transcriber/wavSplit.py#L6-L21
21,614
mozilla/DeepSpeech
examples/vad_transcriber/wavSplit.py
write_wave
def write_wave(path, audio, sample_rate): """Writes a .wav file. Takes path, PCM audio data, and sample rate. """ with contextlib.closing(wave.open(path, 'wb')) as wf: wf.setnchannels(1) wf.setsampwidth(2) wf.setframerate(sample_rate) wf.writeframes(audio)
python
def write_wave(path, audio, sample_rate): """Writes a .wav file. Takes path, PCM audio data, and sample rate. """ with contextlib.closing(wave.open(path, 'wb')) as wf: wf.setnchannels(1) wf.setsampwidth(2) wf.setframerate(sample_rate) wf.writeframes(audio)
[ "def", "write_wave", "(", "path", ",", "audio", ",", "sample_rate", ")", ":", "with", "contextlib", ".", "closing", "(", "wave", ".", "open", "(", "path", ",", "'wb'", ")", ")", "as", "wf", ":", "wf", ".", "setnchannels", "(", "1", ")", "wf", ".", "setsampwidth", "(", "2", ")", "wf", ".", "setframerate", "(", "sample_rate", ")", "wf", ".", "writeframes", "(", "audio", ")" ]
Writes a .wav file. Takes path, PCM audio data, and sample rate.
[ "Writes", "a", ".", "wav", "file", "." ]
f64aa73e7fbe9dde40d4fcf23b42ab304747d152
https://github.com/mozilla/DeepSpeech/blob/f64aa73e7fbe9dde40d4fcf23b42ab304747d152/examples/vad_transcriber/wavSplit.py#L24-L33
21,615
mozilla/DeepSpeech
examples/vad_transcriber/wavSplit.py
frame_generator
def frame_generator(frame_duration_ms, audio, sample_rate): """Generates audio frames from PCM audio data. Takes the desired frame duration in milliseconds, the PCM data, and the sample rate. Yields Frames of the requested duration. """ n = int(sample_rate * (frame_duration_ms / 1000.0) * 2) offset = 0 timestamp = 0.0 duration = (float(n) / sample_rate) / 2.0 while offset + n < len(audio): yield Frame(audio[offset:offset + n], timestamp, duration) timestamp += duration offset += n
python
def frame_generator(frame_duration_ms, audio, sample_rate): """Generates audio frames from PCM audio data. Takes the desired frame duration in milliseconds, the PCM data, and the sample rate. Yields Frames of the requested duration. """ n = int(sample_rate * (frame_duration_ms / 1000.0) * 2) offset = 0 timestamp = 0.0 duration = (float(n) / sample_rate) / 2.0 while offset + n < len(audio): yield Frame(audio[offset:offset + n], timestamp, duration) timestamp += duration offset += n
[ "def", "frame_generator", "(", "frame_duration_ms", ",", "audio", ",", "sample_rate", ")", ":", "n", "=", "int", "(", "sample_rate", "*", "(", "frame_duration_ms", "/", "1000.0", ")", "*", "2", ")", "offset", "=", "0", "timestamp", "=", "0.0", "duration", "=", "(", "float", "(", "n", ")", "/", "sample_rate", ")", "/", "2.0", "while", "offset", "+", "n", "<", "len", "(", "audio", ")", ":", "yield", "Frame", "(", "audio", "[", "offset", ":", "offset", "+", "n", "]", ",", "timestamp", ",", "duration", ")", "timestamp", "+=", "duration", "offset", "+=", "n" ]
Generates audio frames from PCM audio data. Takes the desired frame duration in milliseconds, the PCM data, and the sample rate. Yields Frames of the requested duration.
[ "Generates", "audio", "frames", "from", "PCM", "audio", "data", "." ]
f64aa73e7fbe9dde40d4fcf23b42ab304747d152
https://github.com/mozilla/DeepSpeech/blob/f64aa73e7fbe9dde40d4fcf23b42ab304747d152/examples/vad_transcriber/wavSplit.py#L44-L59
21,616
mozilla/DeepSpeech
examples/vad_transcriber/audioTranscript_gui.py
Worker.run
def run(self): ''' Initialise the runner function with the passed args, kwargs ''' # Retrieve args/kwargs here; and fire up the processing using them try: transcript = self.fn(*self.args, **self.kwargs) except: traceback.print_exc() exctype, value = sys.exc_info()[:2] self.signals.error.emit((exctype, value, traceback.format_exc())) else: # Return the result of the processing self.signals.result.emit(transcript) finally: # Done self.signals.finished.emit()
python
def run(self): ''' Initialise the runner function with the passed args, kwargs ''' # Retrieve args/kwargs here; and fire up the processing using them try: transcript = self.fn(*self.args, **self.kwargs) except: traceback.print_exc() exctype, value = sys.exc_info()[:2] self.signals.error.emit((exctype, value, traceback.format_exc())) else: # Return the result of the processing self.signals.result.emit(transcript) finally: # Done self.signals.finished.emit()
[ "def", "run", "(", "self", ")", ":", "# Retrieve args/kwargs here; and fire up the processing using them", "try", ":", "transcript", "=", "self", ".", "fn", "(", "*", "self", ".", "args", ",", "*", "*", "self", ".", "kwargs", ")", "except", ":", "traceback", ".", "print_exc", "(", ")", "exctype", ",", "value", "=", "sys", ".", "exc_info", "(", ")", "[", ":", "2", "]", "self", ".", "signals", ".", "error", ".", "emit", "(", "(", "exctype", ",", "value", ",", "traceback", ".", "format_exc", "(", ")", ")", ")", "else", ":", "# Return the result of the processing", "self", ".", "signals", ".", "result", ".", "emit", "(", "transcript", ")", "finally", ":", "# Done", "self", ".", "signals", ".", "finished", ".", "emit", "(", ")" ]
Initialise the runner function with the passed args, kwargs
[ "Initialise", "the", "runner", "function", "with", "the", "passed", "args", "kwargs" ]
f64aa73e7fbe9dde40d4fcf23b42ab304747d152
https://github.com/mozilla/DeepSpeech/blob/f64aa73e7fbe9dde40d4fcf23b42ab304747d152/examples/vad_transcriber/audioTranscript_gui.py#L71-L88
21,617
mozilla/DeepSpeech
bin/benchmark_nc.py
get_arch_string
def get_arch_string(): r''' Check local or remote system arch, to produce TaskCluster proper link. ''' rc, stdout, stderr = exec_command('uname -sm') if rc > 0: raise AssertionError('Error checking OS') stdout = stdout.lower().strip() if not 'linux' in stdout: raise AssertionError('Unsupported OS') if 'armv7l' in stdout: return 'arm' if 'x86_64' in stdout: nv_rc, nv_stdout, nv_stderr = exec_command('nvidia-smi') nv_stdout = nv_stdout.lower().strip() if 'NVIDIA-SMI' in nv_stdout: return 'gpu' else: return 'cpu' raise AssertionError('Unsupported arch:', stdout)
python
def get_arch_string(): r''' Check local or remote system arch, to produce TaskCluster proper link. ''' rc, stdout, stderr = exec_command('uname -sm') if rc > 0: raise AssertionError('Error checking OS') stdout = stdout.lower().strip() if not 'linux' in stdout: raise AssertionError('Unsupported OS') if 'armv7l' in stdout: return 'arm' if 'x86_64' in stdout: nv_rc, nv_stdout, nv_stderr = exec_command('nvidia-smi') nv_stdout = nv_stdout.lower().strip() if 'NVIDIA-SMI' in nv_stdout: return 'gpu' else: return 'cpu' raise AssertionError('Unsupported arch:', stdout)
[ "def", "get_arch_string", "(", ")", ":", "rc", ",", "stdout", ",", "stderr", "=", "exec_command", "(", "'uname -sm'", ")", "if", "rc", ">", "0", ":", "raise", "AssertionError", "(", "'Error checking OS'", ")", "stdout", "=", "stdout", ".", "lower", "(", ")", ".", "strip", "(", ")", "if", "not", "'linux'", "in", "stdout", ":", "raise", "AssertionError", "(", "'Unsupported OS'", ")", "if", "'armv7l'", "in", "stdout", ":", "return", "'arm'", "if", "'x86_64'", "in", "stdout", ":", "nv_rc", ",", "nv_stdout", ",", "nv_stderr", "=", "exec_command", "(", "'nvidia-smi'", ")", "nv_stdout", "=", "nv_stdout", ".", "lower", "(", ")", ".", "strip", "(", ")", "if", "'NVIDIA-SMI'", "in", "nv_stdout", ":", "return", "'gpu'", "else", ":", "return", "'cpu'", "raise", "AssertionError", "(", "'Unsupported arch:'", ",", "stdout", ")" ]
r''' Check local or remote system arch, to produce TaskCluster proper link.
[ "r", "Check", "local", "or", "remote", "system", "arch", "to", "produce", "TaskCluster", "proper", "link", "." ]
f64aa73e7fbe9dde40d4fcf23b42ab304747d152
https://github.com/mozilla/DeepSpeech/blob/f64aa73e7fbe9dde40d4fcf23b42ab304747d152/bin/benchmark_nc.py#L68-L91
21,618
mozilla/DeepSpeech
bin/benchmark_nc.py
extract_native_client_tarball
def extract_native_client_tarball(dir): r''' Download a native_client.tar.xz file from TaskCluster and extract it to dir. ''' assert_valid_dir(dir) target_tarball = os.path.join(dir, 'native_client.tar.xz') if os.path.isfile(target_tarball) and os.stat(target_tarball).st_size == 0: return subprocess.check_call(['pixz', '-d', 'native_client.tar.xz'], cwd=dir) subprocess.check_call(['tar', 'xf', 'native_client.tar'], cwd=dir) os.unlink(os.path.join(dir, 'native_client.tar')) open(target_tarball, 'w').close()
python
def extract_native_client_tarball(dir): r''' Download a native_client.tar.xz file from TaskCluster and extract it to dir. ''' assert_valid_dir(dir) target_tarball = os.path.join(dir, 'native_client.tar.xz') if os.path.isfile(target_tarball) and os.stat(target_tarball).st_size == 0: return subprocess.check_call(['pixz', '-d', 'native_client.tar.xz'], cwd=dir) subprocess.check_call(['tar', 'xf', 'native_client.tar'], cwd=dir) os.unlink(os.path.join(dir, 'native_client.tar')) open(target_tarball, 'w').close()
[ "def", "extract_native_client_tarball", "(", "dir", ")", ":", "assert_valid_dir", "(", "dir", ")", "target_tarball", "=", "os", ".", "path", ".", "join", "(", "dir", ",", "'native_client.tar.xz'", ")", "if", "os", ".", "path", ".", "isfile", "(", "target_tarball", ")", "and", "os", ".", "stat", "(", "target_tarball", ")", ".", "st_size", "==", "0", ":", "return", "subprocess", ".", "check_call", "(", "[", "'pixz'", ",", "'-d'", ",", "'native_client.tar.xz'", "]", ",", "cwd", "=", "dir", ")", "subprocess", ".", "check_call", "(", "[", "'tar'", ",", "'xf'", ",", "'native_client.tar'", "]", ",", "cwd", "=", "dir", ")", "os", ".", "unlink", "(", "os", ".", "path", ".", "join", "(", "dir", ",", "'native_client.tar'", ")", ")", "open", "(", "target_tarball", ",", "'w'", ")", ".", "close", "(", ")" ]
r''' Download a native_client.tar.xz file from TaskCluster and extract it to dir.
[ "r", "Download", "a", "native_client", ".", "tar", ".", "xz", "file", "from", "TaskCluster", "and", "extract", "it", "to", "dir", "." ]
f64aa73e7fbe9dde40d4fcf23b42ab304747d152
https://github.com/mozilla/DeepSpeech/blob/f64aa73e7fbe9dde40d4fcf23b42ab304747d152/bin/benchmark_nc.py#L97-L110
21,619
mozilla/DeepSpeech
bin/benchmark_nc.py
maybe_inspect_zip
def maybe_inspect_zip(models): r''' Detect if models is a list of protocolbuffer files or a ZIP file. If the latter, then unzip it and return the list of protocolbuffer files that were inside. ''' if not(is_zip_file(models)): return models if len(models) > 1: return models if len(models) < 1: raise AssertionError('No models at all') return zipfile.ZipFile(models[0]).namelist()
python
def maybe_inspect_zip(models): r''' Detect if models is a list of protocolbuffer files or a ZIP file. If the latter, then unzip it and return the list of protocolbuffer files that were inside. ''' if not(is_zip_file(models)): return models if len(models) > 1: return models if len(models) < 1: raise AssertionError('No models at all') return zipfile.ZipFile(models[0]).namelist()
[ "def", "maybe_inspect_zip", "(", "models", ")", ":", "if", "not", "(", "is_zip_file", "(", "models", ")", ")", ":", "return", "models", "if", "len", "(", "models", ")", ">", "1", ":", "return", "models", "if", "len", "(", "models", ")", "<", "1", ":", "raise", "AssertionError", "(", "'No models at all'", ")", "return", "zipfile", ".", "ZipFile", "(", "models", "[", "0", "]", ")", ".", "namelist", "(", ")" ]
r''' Detect if models is a list of protocolbuffer files or a ZIP file. If the latter, then unzip it and return the list of protocolbuffer files that were inside.
[ "r", "Detect", "if", "models", "is", "a", "list", "of", "protocolbuffer", "files", "or", "a", "ZIP", "file", ".", "If", "the", "latter", "then", "unzip", "it", "and", "return", "the", "list", "of", "protocolbuffer", "files", "that", "were", "inside", "." ]
f64aa73e7fbe9dde40d4fcf23b42ab304747d152
https://github.com/mozilla/DeepSpeech/blob/f64aa73e7fbe9dde40d4fcf23b42ab304747d152/bin/benchmark_nc.py#L121-L137
21,620
mozilla/DeepSpeech
bin/benchmark_nc.py
teardown_tempdir
def teardown_tempdir(dir): r''' Cleanup temporary directory. ''' if ssh_conn: delete_tree(dir) assert_valid_dir(dir) shutil.rmtree(dir)
python
def teardown_tempdir(dir): r''' Cleanup temporary directory. ''' if ssh_conn: delete_tree(dir) assert_valid_dir(dir) shutil.rmtree(dir)
[ "def", "teardown_tempdir", "(", "dir", ")", ":", "if", "ssh_conn", ":", "delete_tree", "(", "dir", ")", "assert_valid_dir", "(", "dir", ")", "shutil", ".", "rmtree", "(", "dir", ")" ]
r''' Cleanup temporary directory.
[ "r", "Cleanup", "temporary", "directory", "." ]
f64aa73e7fbe9dde40d4fcf23b42ab304747d152
https://github.com/mozilla/DeepSpeech/blob/f64aa73e7fbe9dde40d4fcf23b42ab304747d152/bin/benchmark_nc.py#L280-L289
21,621
mozilla/DeepSpeech
bin/benchmark_nc.py
get_sshconfig
def get_sshconfig(): r''' Read user's SSH configuration file ''' with open(os.path.expanduser('~/.ssh/config')) as f: cfg = paramiko.SSHConfig() cfg.parse(f) ret_dict = {} for d in cfg._config: _copy = dict(d) # Avoid buggy behavior with strange host definitions, we need # Hostname and not Host. del _copy['host'] for host in d['host']: ret_dict[host] = _copy['config'] return ret_dict
python
def get_sshconfig(): r''' Read user's SSH configuration file ''' with open(os.path.expanduser('~/.ssh/config')) as f: cfg = paramiko.SSHConfig() cfg.parse(f) ret_dict = {} for d in cfg._config: _copy = dict(d) # Avoid buggy behavior with strange host definitions, we need # Hostname and not Host. del _copy['host'] for host in d['host']: ret_dict[host] = _copy['config'] return ret_dict
[ "def", "get_sshconfig", "(", ")", ":", "with", "open", "(", "os", ".", "path", ".", "expanduser", "(", "'~/.ssh/config'", ")", ")", "as", "f", ":", "cfg", "=", "paramiko", ".", "SSHConfig", "(", ")", "cfg", ".", "parse", "(", "f", ")", "ret_dict", "=", "{", "}", "for", "d", "in", "cfg", ".", "_config", ":", "_copy", "=", "dict", "(", "d", ")", "# Avoid buggy behavior with strange host definitions, we need", "# Hostname and not Host.", "del", "_copy", "[", "'host'", "]", "for", "host", "in", "d", "[", "'host'", "]", ":", "ret_dict", "[", "host", "]", "=", "_copy", "[", "'config'", "]", "return", "ret_dict" ]
r''' Read user's SSH configuration file
[ "r", "Read", "user", "s", "SSH", "configuration", "file" ]
f64aa73e7fbe9dde40d4fcf23b42ab304747d152
https://github.com/mozilla/DeepSpeech/blob/f64aa73e7fbe9dde40d4fcf23b42ab304747d152/bin/benchmark_nc.py#L291-L308
21,622
mozilla/DeepSpeech
bin/benchmark_nc.py
establish_ssh
def establish_ssh(target=None, auto_trust=False, allow_agent=True, look_keys=True): r''' Establish a SSH connection to a remote host. It should be able to use SSH's config file Host name declarations. By default, will not automatically add trust for hosts, will use SSH agent and will try to load keys. ''' def password_prompt(username, hostname): r''' If the Host is relying on password authentication, lets ask it. Relying on SSH itself to take care of that would not work when the remote authentication is password behind a SSH-key+2FA jumphost. ''' return getpass.getpass('No SSH key for %s@%s, please provide password: ' % (username, hostname)) ssh_conn = None if target is not None: ssh_conf = get_sshconfig() cfg = { 'hostname': None, 'port': 22, 'allow_agent': allow_agent, 'look_for_keys': look_keys } if ssh_conf.has_key(target): user_config = ssh_conf.get(target) # If ssh_config file's Host defined 'User' instead of 'Username' if user_config.has_key('user') and not user_config.has_key('username'): user_config['username'] = user_config['user'] del user_config['user'] for k in ('username', 'hostname', 'port'): if k in user_config: cfg[k] = user_config[k] # Assume Password auth. If we don't do that, then when connecting # through a jumphost we will run into issues and the user will # not be able to input his password to the SSH prompt. if 'identityfile' in user_config: cfg['key_filename'] = user_config['identityfile'] else: cfg['password'] = password_prompt(cfg['username'], cfg['hostname'] or target) # Should be the last one, since ProxyCommand will issue connection to remote host if 'proxycommand' in user_config: cfg['sock'] = paramiko.ProxyCommand(user_config['proxycommand']) else: cfg['username'] = target.split('@')[0] cfg['hostname'] = target.split('@')[1].split(':')[0] cfg['password'] = password_prompt(cfg['username'], cfg['hostname']) try: cfg['port'] = int(target.split('@')[1].split(':')[1]) except IndexError: # IndexError will happen if no :PORT is there. # Default value 22 is defined above in 'cfg'. pass ssh_conn = paramiko.SSHClient() if auto_trust: ssh_conn.set_missing_host_key_policy(paramiko.AutoAddPolicy()) ssh_conn.connect(**cfg) return ssh_conn
python
def establish_ssh(target=None, auto_trust=False, allow_agent=True, look_keys=True): r''' Establish a SSH connection to a remote host. It should be able to use SSH's config file Host name declarations. By default, will not automatically add trust for hosts, will use SSH agent and will try to load keys. ''' def password_prompt(username, hostname): r''' If the Host is relying on password authentication, lets ask it. Relying on SSH itself to take care of that would not work when the remote authentication is password behind a SSH-key+2FA jumphost. ''' return getpass.getpass('No SSH key for %s@%s, please provide password: ' % (username, hostname)) ssh_conn = None if target is not None: ssh_conf = get_sshconfig() cfg = { 'hostname': None, 'port': 22, 'allow_agent': allow_agent, 'look_for_keys': look_keys } if ssh_conf.has_key(target): user_config = ssh_conf.get(target) # If ssh_config file's Host defined 'User' instead of 'Username' if user_config.has_key('user') and not user_config.has_key('username'): user_config['username'] = user_config['user'] del user_config['user'] for k in ('username', 'hostname', 'port'): if k in user_config: cfg[k] = user_config[k] # Assume Password auth. If we don't do that, then when connecting # through a jumphost we will run into issues and the user will # not be able to input his password to the SSH prompt. if 'identityfile' in user_config: cfg['key_filename'] = user_config['identityfile'] else: cfg['password'] = password_prompt(cfg['username'], cfg['hostname'] or target) # Should be the last one, since ProxyCommand will issue connection to remote host if 'proxycommand' in user_config: cfg['sock'] = paramiko.ProxyCommand(user_config['proxycommand']) else: cfg['username'] = target.split('@')[0] cfg['hostname'] = target.split('@')[1].split(':')[0] cfg['password'] = password_prompt(cfg['username'], cfg['hostname']) try: cfg['port'] = int(target.split('@')[1].split(':')[1]) except IndexError: # IndexError will happen if no :PORT is there. # Default value 22 is defined above in 'cfg'. pass ssh_conn = paramiko.SSHClient() if auto_trust: ssh_conn.set_missing_host_key_policy(paramiko.AutoAddPolicy()) ssh_conn.connect(**cfg) return ssh_conn
[ "def", "establish_ssh", "(", "target", "=", "None", ",", "auto_trust", "=", "False", ",", "allow_agent", "=", "True", ",", "look_keys", "=", "True", ")", ":", "def", "password_prompt", "(", "username", ",", "hostname", ")", ":", "r'''\n If the Host is relying on password authentication, lets ask it.\n Relying on SSH itself to take care of that would not work when the\n remote authentication is password behind a SSH-key+2FA jumphost.\n '''", "return", "getpass", ".", "getpass", "(", "'No SSH key for %s@%s, please provide password: '", "%", "(", "username", ",", "hostname", ")", ")", "ssh_conn", "=", "None", "if", "target", "is", "not", "None", ":", "ssh_conf", "=", "get_sshconfig", "(", ")", "cfg", "=", "{", "'hostname'", ":", "None", ",", "'port'", ":", "22", ",", "'allow_agent'", ":", "allow_agent", ",", "'look_for_keys'", ":", "look_keys", "}", "if", "ssh_conf", ".", "has_key", "(", "target", ")", ":", "user_config", "=", "ssh_conf", ".", "get", "(", "target", ")", "# If ssh_config file's Host defined 'User' instead of 'Username'", "if", "user_config", ".", "has_key", "(", "'user'", ")", "and", "not", "user_config", ".", "has_key", "(", "'username'", ")", ":", "user_config", "[", "'username'", "]", "=", "user_config", "[", "'user'", "]", "del", "user_config", "[", "'user'", "]", "for", "k", "in", "(", "'username'", ",", "'hostname'", ",", "'port'", ")", ":", "if", "k", "in", "user_config", ":", "cfg", "[", "k", "]", "=", "user_config", "[", "k", "]", "# Assume Password auth. If we don't do that, then when connecting", "# through a jumphost we will run into issues and the user will", "# not be able to input his password to the SSH prompt.", "if", "'identityfile'", "in", "user_config", ":", "cfg", "[", "'key_filename'", "]", "=", "user_config", "[", "'identityfile'", "]", "else", ":", "cfg", "[", "'password'", "]", "=", "password_prompt", "(", "cfg", "[", "'username'", "]", ",", "cfg", "[", "'hostname'", "]", "or", "target", ")", "# Should be the last one, since ProxyCommand will issue connection to remote host", "if", "'proxycommand'", "in", "user_config", ":", "cfg", "[", "'sock'", "]", "=", "paramiko", ".", "ProxyCommand", "(", "user_config", "[", "'proxycommand'", "]", ")", "else", ":", "cfg", "[", "'username'", "]", "=", "target", ".", "split", "(", "'@'", ")", "[", "0", "]", "cfg", "[", "'hostname'", "]", "=", "target", ".", "split", "(", "'@'", ")", "[", "1", "]", ".", "split", "(", "':'", ")", "[", "0", "]", "cfg", "[", "'password'", "]", "=", "password_prompt", "(", "cfg", "[", "'username'", "]", ",", "cfg", "[", "'hostname'", "]", ")", "try", ":", "cfg", "[", "'port'", "]", "=", "int", "(", "target", ".", "split", "(", "'@'", ")", "[", "1", "]", ".", "split", "(", "':'", ")", "[", "1", "]", ")", "except", "IndexError", ":", "# IndexError will happen if no :PORT is there.", "# Default value 22 is defined above in 'cfg'.", "pass", "ssh_conn", "=", "paramiko", ".", "SSHClient", "(", ")", "if", "auto_trust", ":", "ssh_conn", ".", "set_missing_host_key_policy", "(", "paramiko", ".", "AutoAddPolicy", "(", ")", ")", "ssh_conn", ".", "connect", "(", "*", "*", "cfg", ")", "return", "ssh_conn" ]
r''' Establish a SSH connection to a remote host. It should be able to use SSH's config file Host name declarations. By default, will not automatically add trust for hosts, will use SSH agent and will try to load keys.
[ "r", "Establish", "a", "SSH", "connection", "to", "a", "remote", "host", ".", "It", "should", "be", "able", "to", "use", "SSH", "s", "config", "file", "Host", "name", "declarations", ".", "By", "default", "will", "not", "automatically", "add", "trust", "for", "hosts", "will", "use", "SSH", "agent", "and", "will", "try", "to", "load", "keys", "." ]
f64aa73e7fbe9dde40d4fcf23b42ab304747d152
https://github.com/mozilla/DeepSpeech/blob/f64aa73e7fbe9dde40d4fcf23b42ab304747d152/bin/benchmark_nc.py#L310-L375
21,623
mozilla/DeepSpeech
bin/benchmark_nc.py
run_benchmarks
def run_benchmarks(dir, models, wav, alphabet, lm_binary=None, trie=None, iters=-1): r''' Core of the running of the benchmarks. We will run on all of models, against the WAV file provided as wav, and the provided alphabet. ''' assert_valid_dir(dir) inference_times = [ ] for model in models: model_filename = model current_model = { 'name': model, 'iters': [ ], 'mean': numpy.infty, 'stddev': numpy.infty } if lm_binary and trie: cmdline = './deepspeech --model "%s" --alphabet "%s" --lm "%s" --trie "%s" --audio "%s" -t' % (model_filename, alphabet, lm_binary, trie, wav) else: cmdline = './deepspeech --model "%s" --alphabet "%s" --audio "%s" -t' % (model_filename, alphabet, wav) for it in range(iters): sys.stdout.write('\rRunning %s: %d/%d' % (os.path.basename(model), (it+1), iters)) sys.stdout.flush() rc, stdout, stderr = exec_command(cmdline, cwd=dir) if rc == 0: inference_time = float(stdout.split('\n')[1].split('=')[-1]) # print("[%d] model=%s inference=%f" % (it, model, inference_time)) current_model['iters'].append(inference_time) else: print('exec_command("%s") failed with rc=%d' % (cmdline, rc)) print('stdout: %s' % stdout) print('stderr: %s' % stderr) raise AssertionError('Execution failure: rc=%d' % (rc)) sys.stdout.write('\n') sys.stdout.flush() current_model['mean'] = numpy.mean(current_model['iters']) current_model['stddev'] = numpy.std(current_model['iters']) inference_times.append(current_model) return inference_times
python
def run_benchmarks(dir, models, wav, alphabet, lm_binary=None, trie=None, iters=-1): r''' Core of the running of the benchmarks. We will run on all of models, against the WAV file provided as wav, and the provided alphabet. ''' assert_valid_dir(dir) inference_times = [ ] for model in models: model_filename = model current_model = { 'name': model, 'iters': [ ], 'mean': numpy.infty, 'stddev': numpy.infty } if lm_binary and trie: cmdline = './deepspeech --model "%s" --alphabet "%s" --lm "%s" --trie "%s" --audio "%s" -t' % (model_filename, alphabet, lm_binary, trie, wav) else: cmdline = './deepspeech --model "%s" --alphabet "%s" --audio "%s" -t' % (model_filename, alphabet, wav) for it in range(iters): sys.stdout.write('\rRunning %s: %d/%d' % (os.path.basename(model), (it+1), iters)) sys.stdout.flush() rc, stdout, stderr = exec_command(cmdline, cwd=dir) if rc == 0: inference_time = float(stdout.split('\n')[1].split('=')[-1]) # print("[%d] model=%s inference=%f" % (it, model, inference_time)) current_model['iters'].append(inference_time) else: print('exec_command("%s") failed with rc=%d' % (cmdline, rc)) print('stdout: %s' % stdout) print('stderr: %s' % stderr) raise AssertionError('Execution failure: rc=%d' % (rc)) sys.stdout.write('\n') sys.stdout.flush() current_model['mean'] = numpy.mean(current_model['iters']) current_model['stddev'] = numpy.std(current_model['iters']) inference_times.append(current_model) return inference_times
[ "def", "run_benchmarks", "(", "dir", ",", "models", ",", "wav", ",", "alphabet", ",", "lm_binary", "=", "None", ",", "trie", "=", "None", ",", "iters", "=", "-", "1", ")", ":", "assert_valid_dir", "(", "dir", ")", "inference_times", "=", "[", "]", "for", "model", "in", "models", ":", "model_filename", "=", "model", "current_model", "=", "{", "'name'", ":", "model", ",", "'iters'", ":", "[", "]", ",", "'mean'", ":", "numpy", ".", "infty", ",", "'stddev'", ":", "numpy", ".", "infty", "}", "if", "lm_binary", "and", "trie", ":", "cmdline", "=", "'./deepspeech --model \"%s\" --alphabet \"%s\" --lm \"%s\" --trie \"%s\" --audio \"%s\" -t'", "%", "(", "model_filename", ",", "alphabet", ",", "lm_binary", ",", "trie", ",", "wav", ")", "else", ":", "cmdline", "=", "'./deepspeech --model \"%s\" --alphabet \"%s\" --audio \"%s\" -t'", "%", "(", "model_filename", ",", "alphabet", ",", "wav", ")", "for", "it", "in", "range", "(", "iters", ")", ":", "sys", ".", "stdout", ".", "write", "(", "'\\rRunning %s: %d/%d'", "%", "(", "os", ".", "path", ".", "basename", "(", "model", ")", ",", "(", "it", "+", "1", ")", ",", "iters", ")", ")", "sys", ".", "stdout", ".", "flush", "(", ")", "rc", ",", "stdout", ",", "stderr", "=", "exec_command", "(", "cmdline", ",", "cwd", "=", "dir", ")", "if", "rc", "==", "0", ":", "inference_time", "=", "float", "(", "stdout", ".", "split", "(", "'\\n'", ")", "[", "1", "]", ".", "split", "(", "'='", ")", "[", "-", "1", "]", ")", "# print(\"[%d] model=%s inference=%f\" % (it, model, inference_time))", "current_model", "[", "'iters'", "]", ".", "append", "(", "inference_time", ")", "else", ":", "print", "(", "'exec_command(\"%s\") failed with rc=%d'", "%", "(", "cmdline", ",", "rc", ")", ")", "print", "(", "'stdout: %s'", "%", "stdout", ")", "print", "(", "'stderr: %s'", "%", "stderr", ")", "raise", "AssertionError", "(", "'Execution failure: rc=%d'", "%", "(", "rc", ")", ")", "sys", ".", "stdout", ".", "write", "(", "'\\n'", ")", "sys", ".", "stdout", ".", "flush", "(", ")", "current_model", "[", "'mean'", "]", "=", "numpy", ".", "mean", "(", "current_model", "[", "'iters'", "]", ")", "current_model", "[", "'stddev'", "]", "=", "numpy", ".", "std", "(", "current_model", "[", "'iters'", "]", ")", "inference_times", ".", "append", "(", "current_model", ")", "return", "inference_times" ]
r''' Core of the running of the benchmarks. We will run on all of models, against the WAV file provided as wav, and the provided alphabet.
[ "r", "Core", "of", "the", "running", "of", "the", "benchmarks", ".", "We", "will", "run", "on", "all", "of", "models", "against", "the", "WAV", "file", "provided", "as", "wav", "and", "the", "provided", "alphabet", "." ]
f64aa73e7fbe9dde40d4fcf23b42ab304747d152
https://github.com/mozilla/DeepSpeech/blob/f64aa73e7fbe9dde40d4fcf23b42ab304747d152/bin/benchmark_nc.py#L377-L422
21,624
mozilla/DeepSpeech
bin/benchmark_nc.py
produce_csv
def produce_csv(input, output): r''' Take an input dictionnary and write it to the object-file output. ''' output.write('"model","mean","std"\n') for model_data in input: output.write('"%s",%f,%f\n' % (model_data['name'], model_data['mean'], model_data['stddev'])) output.flush() output.close() print("Wrote as %s" % output.name)
python
def produce_csv(input, output): r''' Take an input dictionnary and write it to the object-file output. ''' output.write('"model","mean","std"\n') for model_data in input: output.write('"%s",%f,%f\n' % (model_data['name'], model_data['mean'], model_data['stddev'])) output.flush() output.close() print("Wrote as %s" % output.name)
[ "def", "produce_csv", "(", "input", ",", "output", ")", ":", "output", ".", "write", "(", "'\"model\",\"mean\",\"std\"\\n'", ")", "for", "model_data", "in", "input", ":", "output", ".", "write", "(", "'\"%s\",%f,%f\\n'", "%", "(", "model_data", "[", "'name'", "]", ",", "model_data", "[", "'mean'", "]", ",", "model_data", "[", "'stddev'", "]", ")", ")", "output", ".", "flush", "(", ")", "output", ".", "close", "(", ")", "print", "(", "\"Wrote as %s\"", "%", "output", ".", "name", ")" ]
r''' Take an input dictionnary and write it to the object-file output.
[ "r", "Take", "an", "input", "dictionnary", "and", "write", "it", "to", "the", "object", "-", "file", "output", "." ]
f64aa73e7fbe9dde40d4fcf23b42ab304747d152
https://github.com/mozilla/DeepSpeech/blob/f64aa73e7fbe9dde40d4fcf23b42ab304747d152/bin/benchmark_nc.py#L424-L433
21,625
mozilla/DeepSpeech
bin/import_voxforge.py
_parallel_downloader
def _parallel_downloader(voxforge_url, archive_dir, total, counter): """Generate a function to download a file based on given parameters This works by currying the above given arguments into a closure in the form of the following function. :param voxforge_url: the base voxforge URL :param archive_dir: the location to store the downloaded file :param total: the total number of files to download :param counter: an atomic counter to keep track of # of downloaded files :return: a function that actually downloads a file given these params """ def download(d): """Binds voxforge_url, archive_dir, total, and counter into this scope Downloads the given file :param d: a tuple consisting of (index, file) where index is the index of the file to download and file is the name of the file to download """ (i, file) = d download_url = voxforge_url + '/' + file c = counter.increment() print('Downloading file {} ({}/{})...'.format(i+1, c, total)) maybe_download(filename_of(download_url), archive_dir, download_url) return download
python
def _parallel_downloader(voxforge_url, archive_dir, total, counter): """Generate a function to download a file based on given parameters This works by currying the above given arguments into a closure in the form of the following function. :param voxforge_url: the base voxforge URL :param archive_dir: the location to store the downloaded file :param total: the total number of files to download :param counter: an atomic counter to keep track of # of downloaded files :return: a function that actually downloads a file given these params """ def download(d): """Binds voxforge_url, archive_dir, total, and counter into this scope Downloads the given file :param d: a tuple consisting of (index, file) where index is the index of the file to download and file is the name of the file to download """ (i, file) = d download_url = voxforge_url + '/' + file c = counter.increment() print('Downloading file {} ({}/{})...'.format(i+1, c, total)) maybe_download(filename_of(download_url), archive_dir, download_url) return download
[ "def", "_parallel_downloader", "(", "voxforge_url", ",", "archive_dir", ",", "total", ",", "counter", ")", ":", "def", "download", "(", "d", ")", ":", "\"\"\"Binds voxforge_url, archive_dir, total, and counter into this scope\n Downloads the given file\n :param d: a tuple consisting of (index, file) where index is the index\n of the file to download and file is the name of the file to download\n \"\"\"", "(", "i", ",", "file", ")", "=", "d", "download_url", "=", "voxforge_url", "+", "'/'", "+", "file", "c", "=", "counter", ".", "increment", "(", ")", "print", "(", "'Downloading file {} ({}/{})...'", ".", "format", "(", "i", "+", "1", ",", "c", ",", "total", ")", ")", "maybe_download", "(", "filename_of", "(", "download_url", ")", ",", "archive_dir", ",", "download_url", ")", "return", "download" ]
Generate a function to download a file based on given parameters This works by currying the above given arguments into a closure in the form of the following function. :param voxforge_url: the base voxforge URL :param archive_dir: the location to store the downloaded file :param total: the total number of files to download :param counter: an atomic counter to keep track of # of downloaded files :return: a function that actually downloads a file given these params
[ "Generate", "a", "function", "to", "download", "a", "file", "based", "on", "given", "parameters", "This", "works", "by", "currying", "the", "above", "given", "arguments", "into", "a", "closure", "in", "the", "form", "of", "the", "following", "function", "." ]
f64aa73e7fbe9dde40d4fcf23b42ab304747d152
https://github.com/mozilla/DeepSpeech/blob/f64aa73e7fbe9dde40d4fcf23b42ab304747d152/bin/import_voxforge.py#L50-L72
21,626
mozilla/DeepSpeech
bin/import_voxforge.py
_parallel_extracter
def _parallel_extracter(data_dir, number_of_test, number_of_dev, total, counter): """Generate a function to extract a tar file based on given parameters This works by currying the above given arguments into a closure in the form of the following function. :param data_dir: the target directory to extract into :param number_of_test: the number of files to keep as the test set :param number_of_dev: the number of files to keep as the dev set :param total: the total number of files to extract :param counter: an atomic counter to keep track of # of extracted files :return: a function that actually extracts a tar file given these params """ def extract(d): """Binds data_dir, number_of_test, number_of_dev, total, and counter into this scope Extracts the given file :param d: a tuple consisting of (index, file) where index is the index of the file to extract and file is the name of the file to extract """ (i, archive) = d if i < number_of_test: dataset_dir = path.join(data_dir, "test") elif i<number_of_test+number_of_dev: dataset_dir = path.join(data_dir, "dev") else: dataset_dir = path.join(data_dir, "train") if not gfile.Exists(path.join(dataset_dir, '.'.join(filename_of(archive).split(".")[:-1]))): c = counter.increment() print('Extracting file {} ({}/{})...'.format(i+1, c, total)) tar = tarfile.open(archive) tar.extractall(dataset_dir) tar.close() return extract
python
def _parallel_extracter(data_dir, number_of_test, number_of_dev, total, counter): """Generate a function to extract a tar file based on given parameters This works by currying the above given arguments into a closure in the form of the following function. :param data_dir: the target directory to extract into :param number_of_test: the number of files to keep as the test set :param number_of_dev: the number of files to keep as the dev set :param total: the total number of files to extract :param counter: an atomic counter to keep track of # of extracted files :return: a function that actually extracts a tar file given these params """ def extract(d): """Binds data_dir, number_of_test, number_of_dev, total, and counter into this scope Extracts the given file :param d: a tuple consisting of (index, file) where index is the index of the file to extract and file is the name of the file to extract """ (i, archive) = d if i < number_of_test: dataset_dir = path.join(data_dir, "test") elif i<number_of_test+number_of_dev: dataset_dir = path.join(data_dir, "dev") else: dataset_dir = path.join(data_dir, "train") if not gfile.Exists(path.join(dataset_dir, '.'.join(filename_of(archive).split(".")[:-1]))): c = counter.increment() print('Extracting file {} ({}/{})...'.format(i+1, c, total)) tar = tarfile.open(archive) tar.extractall(dataset_dir) tar.close() return extract
[ "def", "_parallel_extracter", "(", "data_dir", ",", "number_of_test", ",", "number_of_dev", ",", "total", ",", "counter", ")", ":", "def", "extract", "(", "d", ")", ":", "\"\"\"Binds data_dir, number_of_test, number_of_dev, total, and counter into this scope\n Extracts the given file\n :param d: a tuple consisting of (index, file) where index is the index\n of the file to extract and file is the name of the file to extract\n \"\"\"", "(", "i", ",", "archive", ")", "=", "d", "if", "i", "<", "number_of_test", ":", "dataset_dir", "=", "path", ".", "join", "(", "data_dir", ",", "\"test\"", ")", "elif", "i", "<", "number_of_test", "+", "number_of_dev", ":", "dataset_dir", "=", "path", ".", "join", "(", "data_dir", ",", "\"dev\"", ")", "else", ":", "dataset_dir", "=", "path", ".", "join", "(", "data_dir", ",", "\"train\"", ")", "if", "not", "gfile", ".", "Exists", "(", "path", ".", "join", "(", "dataset_dir", ",", "'.'", ".", "join", "(", "filename_of", "(", "archive", ")", ".", "split", "(", "\".\"", ")", "[", ":", "-", "1", "]", ")", ")", ")", ":", "c", "=", "counter", ".", "increment", "(", ")", "print", "(", "'Extracting file {} ({}/{})...'", ".", "format", "(", "i", "+", "1", ",", "c", ",", "total", ")", ")", "tar", "=", "tarfile", ".", "open", "(", "archive", ")", "tar", ".", "extractall", "(", "dataset_dir", ")", "tar", ".", "close", "(", ")", "return", "extract" ]
Generate a function to extract a tar file based on given parameters This works by currying the above given arguments into a closure in the form of the following function. :param data_dir: the target directory to extract into :param number_of_test: the number of files to keep as the test set :param number_of_dev: the number of files to keep as the dev set :param total: the total number of files to extract :param counter: an atomic counter to keep track of # of extracted files :return: a function that actually extracts a tar file given these params
[ "Generate", "a", "function", "to", "extract", "a", "tar", "file", "based", "on", "given", "parameters", "This", "works", "by", "currying", "the", "above", "given", "arguments", "into", "a", "closure", "in", "the", "form", "of", "the", "following", "function", "." ]
f64aa73e7fbe9dde40d4fcf23b42ab304747d152
https://github.com/mozilla/DeepSpeech/blob/f64aa73e7fbe9dde40d4fcf23b42ab304747d152/bin/import_voxforge.py#L74-L105
21,627
mozilla/DeepSpeech
util/text.py
text_to_char_array
def text_to_char_array(original, alphabet): r""" Given a Python string ``original``, remove unsupported characters, map characters to integers and return a numpy array representing the processed string. """ return np.asarray([alphabet.label_from_string(c) for c in original])
python
def text_to_char_array(original, alphabet): r""" Given a Python string ``original``, remove unsupported characters, map characters to integers and return a numpy array representing the processed string. """ return np.asarray([alphabet.label_from_string(c) for c in original])
[ "def", "text_to_char_array", "(", "original", ",", "alphabet", ")", ":", "return", "np", ".", "asarray", "(", "[", "alphabet", ".", "label_from_string", "(", "c", ")", "for", "c", "in", "original", "]", ")" ]
r""" Given a Python string ``original``, remove unsupported characters, map characters to integers and return a numpy array representing the processed string.
[ "r", "Given", "a", "Python", "string", "original", "remove", "unsupported", "characters", "map", "characters", "to", "integers", "and", "return", "a", "numpy", "array", "representing", "the", "processed", "string", "." ]
f64aa73e7fbe9dde40d4fcf23b42ab304747d152
https://github.com/mozilla/DeepSpeech/blob/f64aa73e7fbe9dde40d4fcf23b42ab304747d152/util/text.py#L50-L55
21,628
mozilla/DeepSpeech
DeepSpeech.py
calculate_mean_edit_distance_and_loss
def calculate_mean_edit_distance_and_loss(iterator, dropout, reuse): r''' This routine beam search decodes a mini-batch and calculates the loss and mean edit distance. Next to total and average loss it returns the mean edit distance, the decoded result and the batch's original Y. ''' # Obtain the next batch of data (batch_x, batch_seq_len), batch_y = iterator.get_next() # Calculate the logits of the batch logits, _ = create_model(batch_x, batch_seq_len, dropout, reuse=reuse) # Compute the CTC loss using TensorFlow's `ctc_loss` total_loss = tf.nn.ctc_loss(labels=batch_y, inputs=logits, sequence_length=batch_seq_len) # Calculate the average loss across the batch avg_loss = tf.reduce_mean(total_loss) # Finally we return the average loss return avg_loss
python
def calculate_mean_edit_distance_and_loss(iterator, dropout, reuse): r''' This routine beam search decodes a mini-batch and calculates the loss and mean edit distance. Next to total and average loss it returns the mean edit distance, the decoded result and the batch's original Y. ''' # Obtain the next batch of data (batch_x, batch_seq_len), batch_y = iterator.get_next() # Calculate the logits of the batch logits, _ = create_model(batch_x, batch_seq_len, dropout, reuse=reuse) # Compute the CTC loss using TensorFlow's `ctc_loss` total_loss = tf.nn.ctc_loss(labels=batch_y, inputs=logits, sequence_length=batch_seq_len) # Calculate the average loss across the batch avg_loss = tf.reduce_mean(total_loss) # Finally we return the average loss return avg_loss
[ "def", "calculate_mean_edit_distance_and_loss", "(", "iterator", ",", "dropout", ",", "reuse", ")", ":", "# Obtain the next batch of data", "(", "batch_x", ",", "batch_seq_len", ")", ",", "batch_y", "=", "iterator", ".", "get_next", "(", ")", "# Calculate the logits of the batch", "logits", ",", "_", "=", "create_model", "(", "batch_x", ",", "batch_seq_len", ",", "dropout", ",", "reuse", "=", "reuse", ")", "# Compute the CTC loss using TensorFlow's `ctc_loss`", "total_loss", "=", "tf", ".", "nn", ".", "ctc_loss", "(", "labels", "=", "batch_y", ",", "inputs", "=", "logits", ",", "sequence_length", "=", "batch_seq_len", ")", "# Calculate the average loss across the batch", "avg_loss", "=", "tf", ".", "reduce_mean", "(", "total_loss", ")", "# Finally we return the average loss", "return", "avg_loss" ]
r''' This routine beam search decodes a mini-batch and calculates the loss and mean edit distance. Next to total and average loss it returns the mean edit distance, the decoded result and the batch's original Y.
[ "r", "This", "routine", "beam", "search", "decodes", "a", "mini", "-", "batch", "and", "calculates", "the", "loss", "and", "mean", "edit", "distance", ".", "Next", "to", "total", "and", "average", "loss", "it", "returns", "the", "mean", "edit", "distance", "the", "decoded", "result", "and", "the", "batch", "s", "original", "Y", "." ]
f64aa73e7fbe9dde40d4fcf23b42ab304747d152
https://github.com/mozilla/DeepSpeech/blob/f64aa73e7fbe9dde40d4fcf23b42ab304747d152/DeepSpeech.py#L176-L195
21,629
mozilla/DeepSpeech
DeepSpeech.py
get_tower_results
def get_tower_results(iterator, optimizer, dropout_rates): r''' With this preliminary step out of the way, we can for each GPU introduce a tower for which's batch we calculate and return the optimization gradients and the average loss across towers. ''' # To calculate the mean of the losses tower_avg_losses = [] # Tower gradients to return tower_gradients = [] with tf.variable_scope(tf.get_variable_scope()): # Loop over available_devices for i in range(len(Config.available_devices)): # Execute operations of tower i on device i device = Config.available_devices[i] with tf.device(device): # Create a scope for all operations of tower i with tf.name_scope('tower_%d' % i): # Calculate the avg_loss and mean_edit_distance and retrieve the decoded # batch along with the original batch's labels (Y) of this tower avg_loss = calculate_mean_edit_distance_and_loss(iterator, dropout_rates, reuse=i > 0) # Allow for variables to be re-used by the next tower tf.get_variable_scope().reuse_variables() # Retain tower's avg losses tower_avg_losses.append(avg_loss) # Compute gradients for model parameters using tower's mini-batch gradients = optimizer.compute_gradients(avg_loss) # Retain tower's gradients tower_gradients.append(gradients) avg_loss_across_towers = tf.reduce_mean(tower_avg_losses, 0) tf.summary.scalar(name='step_loss', tensor=avg_loss_across_towers, collections=['step_summaries']) # Return gradients and the average loss return tower_gradients, avg_loss_across_towers
python
def get_tower_results(iterator, optimizer, dropout_rates): r''' With this preliminary step out of the way, we can for each GPU introduce a tower for which's batch we calculate and return the optimization gradients and the average loss across towers. ''' # To calculate the mean of the losses tower_avg_losses = [] # Tower gradients to return tower_gradients = [] with tf.variable_scope(tf.get_variable_scope()): # Loop over available_devices for i in range(len(Config.available_devices)): # Execute operations of tower i on device i device = Config.available_devices[i] with tf.device(device): # Create a scope for all operations of tower i with tf.name_scope('tower_%d' % i): # Calculate the avg_loss and mean_edit_distance and retrieve the decoded # batch along with the original batch's labels (Y) of this tower avg_loss = calculate_mean_edit_distance_and_loss(iterator, dropout_rates, reuse=i > 0) # Allow for variables to be re-used by the next tower tf.get_variable_scope().reuse_variables() # Retain tower's avg losses tower_avg_losses.append(avg_loss) # Compute gradients for model parameters using tower's mini-batch gradients = optimizer.compute_gradients(avg_loss) # Retain tower's gradients tower_gradients.append(gradients) avg_loss_across_towers = tf.reduce_mean(tower_avg_losses, 0) tf.summary.scalar(name='step_loss', tensor=avg_loss_across_towers, collections=['step_summaries']) # Return gradients and the average loss return tower_gradients, avg_loss_across_towers
[ "def", "get_tower_results", "(", "iterator", ",", "optimizer", ",", "dropout_rates", ")", ":", "# To calculate the mean of the losses", "tower_avg_losses", "=", "[", "]", "# Tower gradients to return", "tower_gradients", "=", "[", "]", "with", "tf", ".", "variable_scope", "(", "tf", ".", "get_variable_scope", "(", ")", ")", ":", "# Loop over available_devices", "for", "i", "in", "range", "(", "len", "(", "Config", ".", "available_devices", ")", ")", ":", "# Execute operations of tower i on device i", "device", "=", "Config", ".", "available_devices", "[", "i", "]", "with", "tf", ".", "device", "(", "device", ")", ":", "# Create a scope for all operations of tower i", "with", "tf", ".", "name_scope", "(", "'tower_%d'", "%", "i", ")", ":", "# Calculate the avg_loss and mean_edit_distance and retrieve the decoded", "# batch along with the original batch's labels (Y) of this tower", "avg_loss", "=", "calculate_mean_edit_distance_and_loss", "(", "iterator", ",", "dropout_rates", ",", "reuse", "=", "i", ">", "0", ")", "# Allow for variables to be re-used by the next tower", "tf", ".", "get_variable_scope", "(", ")", ".", "reuse_variables", "(", ")", "# Retain tower's avg losses", "tower_avg_losses", ".", "append", "(", "avg_loss", ")", "# Compute gradients for model parameters using tower's mini-batch", "gradients", "=", "optimizer", ".", "compute_gradients", "(", "avg_loss", ")", "# Retain tower's gradients", "tower_gradients", ".", "append", "(", "gradients", ")", "avg_loss_across_towers", "=", "tf", ".", "reduce_mean", "(", "tower_avg_losses", ",", "0", ")", "tf", ".", "summary", ".", "scalar", "(", "name", "=", "'step_loss'", ",", "tensor", "=", "avg_loss_across_towers", ",", "collections", "=", "[", "'step_summaries'", "]", ")", "# Return gradients and the average loss", "return", "tower_gradients", ",", "avg_loss_across_towers" ]
r''' With this preliminary step out of the way, we can for each GPU introduce a tower for which's batch we calculate and return the optimization gradients and the average loss across towers.
[ "r", "With", "this", "preliminary", "step", "out", "of", "the", "way", "we", "can", "for", "each", "GPU", "introduce", "a", "tower", "for", "which", "s", "batch", "we", "calculate", "and", "return", "the", "optimization", "gradients", "and", "the", "average", "loss", "across", "towers", "." ]
f64aa73e7fbe9dde40d4fcf23b42ab304747d152
https://github.com/mozilla/DeepSpeech/blob/f64aa73e7fbe9dde40d4fcf23b42ab304747d152/DeepSpeech.py#L231-L273
21,630
mozilla/DeepSpeech
DeepSpeech.py
average_gradients
def average_gradients(tower_gradients): r''' A routine for computing each variable's average of the gradients obtained from the GPUs. Note also that this code acts as a synchronization point as it requires all GPUs to be finished with their mini-batch before it can run to completion. ''' # List of average gradients to return to the caller average_grads = [] # Run this on cpu_device to conserve GPU memory with tf.device(Config.cpu_device): # Loop over gradient/variable pairs from all towers for grad_and_vars in zip(*tower_gradients): # Introduce grads to store the gradients for the current variable grads = [] # Loop over the gradients for the current variable for g, _ in grad_and_vars: # Add 0 dimension to the gradients to represent the tower. expanded_g = tf.expand_dims(g, 0) # Append on a 'tower' dimension which we will average over below. grads.append(expanded_g) # Average over the 'tower' dimension grad = tf.concat(grads, 0) grad = tf.reduce_mean(grad, 0) # Create a gradient/variable tuple for the current variable with its average gradient grad_and_var = (grad, grad_and_vars[0][1]) # Add the current tuple to average_grads average_grads.append(grad_and_var) # Return result to caller return average_grads
python
def average_gradients(tower_gradients): r''' A routine for computing each variable's average of the gradients obtained from the GPUs. Note also that this code acts as a synchronization point as it requires all GPUs to be finished with their mini-batch before it can run to completion. ''' # List of average gradients to return to the caller average_grads = [] # Run this on cpu_device to conserve GPU memory with tf.device(Config.cpu_device): # Loop over gradient/variable pairs from all towers for grad_and_vars in zip(*tower_gradients): # Introduce grads to store the gradients for the current variable grads = [] # Loop over the gradients for the current variable for g, _ in grad_and_vars: # Add 0 dimension to the gradients to represent the tower. expanded_g = tf.expand_dims(g, 0) # Append on a 'tower' dimension which we will average over below. grads.append(expanded_g) # Average over the 'tower' dimension grad = tf.concat(grads, 0) grad = tf.reduce_mean(grad, 0) # Create a gradient/variable tuple for the current variable with its average gradient grad_and_var = (grad, grad_and_vars[0][1]) # Add the current tuple to average_grads average_grads.append(grad_and_var) # Return result to caller return average_grads
[ "def", "average_gradients", "(", "tower_gradients", ")", ":", "# List of average gradients to return to the caller", "average_grads", "=", "[", "]", "# Run this on cpu_device to conserve GPU memory", "with", "tf", ".", "device", "(", "Config", ".", "cpu_device", ")", ":", "# Loop over gradient/variable pairs from all towers", "for", "grad_and_vars", "in", "zip", "(", "*", "tower_gradients", ")", ":", "# Introduce grads to store the gradients for the current variable", "grads", "=", "[", "]", "# Loop over the gradients for the current variable", "for", "g", ",", "_", "in", "grad_and_vars", ":", "# Add 0 dimension to the gradients to represent the tower.", "expanded_g", "=", "tf", ".", "expand_dims", "(", "g", ",", "0", ")", "# Append on a 'tower' dimension which we will average over below.", "grads", ".", "append", "(", "expanded_g", ")", "# Average over the 'tower' dimension", "grad", "=", "tf", ".", "concat", "(", "grads", ",", "0", ")", "grad", "=", "tf", ".", "reduce_mean", "(", "grad", ",", "0", ")", "# Create a gradient/variable tuple for the current variable with its average gradient", "grad_and_var", "=", "(", "grad", ",", "grad_and_vars", "[", "0", "]", "[", "1", "]", ")", "# Add the current tuple to average_grads", "average_grads", ".", "append", "(", "grad_and_var", ")", "# Return result to caller", "return", "average_grads" ]
r''' A routine for computing each variable's average of the gradients obtained from the GPUs. Note also that this code acts as a synchronization point as it requires all GPUs to be finished with their mini-batch before it can run to completion.
[ "r", "A", "routine", "for", "computing", "each", "variable", "s", "average", "of", "the", "gradients", "obtained", "from", "the", "GPUs", ".", "Note", "also", "that", "this", "code", "acts", "as", "a", "synchronization", "point", "as", "it", "requires", "all", "GPUs", "to", "be", "finished", "with", "their", "mini", "-", "batch", "before", "it", "can", "run", "to", "completion", "." ]
f64aa73e7fbe9dde40d4fcf23b42ab304747d152
https://github.com/mozilla/DeepSpeech/blob/f64aa73e7fbe9dde40d4fcf23b42ab304747d152/DeepSpeech.py#L276-L310
21,631
mozilla/DeepSpeech
native_client/ctcdecode/__init__.py
ctc_beam_search_decoder
def ctc_beam_search_decoder(probs_seq, alphabet, beam_size, cutoff_prob=1.0, cutoff_top_n=40, scorer=None): """Wrapper for the CTC Beam Search Decoder. :param probs_seq: 2-D list of probability distributions over each time step, with each element being a list of normalized probabilities over alphabet and blank. :type probs_seq: 2-D list :param alphabet: alphabet list. :alphabet: Alphabet :param beam_size: Width for beam search. :type beam_size: int :param cutoff_prob: Cutoff probability in pruning, default 1.0, no pruning. :type cutoff_prob: float :param cutoff_top_n: Cutoff number in pruning, only top cutoff_top_n characters with highest probs in alphabet will be used in beam search, default 40. :type cutoff_top_n: int :param scorer: External scorer for partially decoded sentence, e.g. word count or language model. :type scorer: Scorer :return: List of tuples of log probability and sentence as decoding results, in descending order of the probability. :rtype: list """ beam_results = swigwrapper.ctc_beam_search_decoder( probs_seq, alphabet.config_file(), beam_size, cutoff_prob, cutoff_top_n, scorer) beam_results = [(res.probability, alphabet.decode(res.tokens)) for res in beam_results] return beam_results
python
def ctc_beam_search_decoder(probs_seq, alphabet, beam_size, cutoff_prob=1.0, cutoff_top_n=40, scorer=None): """Wrapper for the CTC Beam Search Decoder. :param probs_seq: 2-D list of probability distributions over each time step, with each element being a list of normalized probabilities over alphabet and blank. :type probs_seq: 2-D list :param alphabet: alphabet list. :alphabet: Alphabet :param beam_size: Width for beam search. :type beam_size: int :param cutoff_prob: Cutoff probability in pruning, default 1.0, no pruning. :type cutoff_prob: float :param cutoff_top_n: Cutoff number in pruning, only top cutoff_top_n characters with highest probs in alphabet will be used in beam search, default 40. :type cutoff_top_n: int :param scorer: External scorer for partially decoded sentence, e.g. word count or language model. :type scorer: Scorer :return: List of tuples of log probability and sentence as decoding results, in descending order of the probability. :rtype: list """ beam_results = swigwrapper.ctc_beam_search_decoder( probs_seq, alphabet.config_file(), beam_size, cutoff_prob, cutoff_top_n, scorer) beam_results = [(res.probability, alphabet.decode(res.tokens)) for res in beam_results] return beam_results
[ "def", "ctc_beam_search_decoder", "(", "probs_seq", ",", "alphabet", ",", "beam_size", ",", "cutoff_prob", "=", "1.0", ",", "cutoff_top_n", "=", "40", ",", "scorer", "=", "None", ")", ":", "beam_results", "=", "swigwrapper", ".", "ctc_beam_search_decoder", "(", "probs_seq", ",", "alphabet", ".", "config_file", "(", ")", ",", "beam_size", ",", "cutoff_prob", ",", "cutoff_top_n", ",", "scorer", ")", "beam_results", "=", "[", "(", "res", ".", "probability", ",", "alphabet", ".", "decode", "(", "res", ".", "tokens", ")", ")", "for", "res", "in", "beam_results", "]", "return", "beam_results" ]
Wrapper for the CTC Beam Search Decoder. :param probs_seq: 2-D list of probability distributions over each time step, with each element being a list of normalized probabilities over alphabet and blank. :type probs_seq: 2-D list :param alphabet: alphabet list. :alphabet: Alphabet :param beam_size: Width for beam search. :type beam_size: int :param cutoff_prob: Cutoff probability in pruning, default 1.0, no pruning. :type cutoff_prob: float :param cutoff_top_n: Cutoff number in pruning, only top cutoff_top_n characters with highest probs in alphabet will be used in beam search, default 40. :type cutoff_top_n: int :param scorer: External scorer for partially decoded sentence, e.g. word count or language model. :type scorer: Scorer :return: List of tuples of log probability and sentence as decoding results, in descending order of the probability. :rtype: list
[ "Wrapper", "for", "the", "CTC", "Beam", "Search", "Decoder", "." ]
f64aa73e7fbe9dde40d4fcf23b42ab304747d152
https://github.com/mozilla/DeepSpeech/blob/f64aa73e7fbe9dde40d4fcf23b42ab304747d152/native_client/ctcdecode/__init__.py#L25-L59
21,632
mozilla/DeepSpeech
native_client/ctcdecode/__init__.py
ctc_beam_search_decoder_batch
def ctc_beam_search_decoder_batch(probs_seq, seq_lengths, alphabet, beam_size, num_processes, cutoff_prob=1.0, cutoff_top_n=40, scorer=None): """Wrapper for the batched CTC beam search decoder. :param probs_seq: 3-D list with each element as an instance of 2-D list of probabilities used by ctc_beam_search_decoder(). :type probs_seq: 3-D list :param alphabet: alphabet list. :alphabet: Alphabet :param beam_size: Width for beam search. :type beam_size: int :param num_processes: Number of parallel processes. :type num_processes: int :param cutoff_prob: Cutoff probability in alphabet pruning, default 1.0, no pruning. :type cutoff_prob: float :param cutoff_top_n: Cutoff number in pruning, only top cutoff_top_n characters with highest probs in alphabet will be used in beam search, default 40. :type cutoff_top_n: int :param num_processes: Number of parallel processes. :type num_processes: int :param scorer: External scorer for partially decoded sentence, e.g. word count or language model. :type scorer: Scorer :return: List of tuples of log probability and sentence as decoding results, in descending order of the probability. :rtype: list """ batch_beam_results = swigwrapper.ctc_beam_search_decoder_batch( probs_seq, seq_lengths, alphabet.config_file(), beam_size, num_processes, cutoff_prob, cutoff_top_n, scorer) batch_beam_results = [ [(res.probability, alphabet.decode(res.tokens)) for res in beam_results] for beam_results in batch_beam_results ] return batch_beam_results
python
def ctc_beam_search_decoder_batch(probs_seq, seq_lengths, alphabet, beam_size, num_processes, cutoff_prob=1.0, cutoff_top_n=40, scorer=None): """Wrapper for the batched CTC beam search decoder. :param probs_seq: 3-D list with each element as an instance of 2-D list of probabilities used by ctc_beam_search_decoder(). :type probs_seq: 3-D list :param alphabet: alphabet list. :alphabet: Alphabet :param beam_size: Width for beam search. :type beam_size: int :param num_processes: Number of parallel processes. :type num_processes: int :param cutoff_prob: Cutoff probability in alphabet pruning, default 1.0, no pruning. :type cutoff_prob: float :param cutoff_top_n: Cutoff number in pruning, only top cutoff_top_n characters with highest probs in alphabet will be used in beam search, default 40. :type cutoff_top_n: int :param num_processes: Number of parallel processes. :type num_processes: int :param scorer: External scorer for partially decoded sentence, e.g. word count or language model. :type scorer: Scorer :return: List of tuples of log probability and sentence as decoding results, in descending order of the probability. :rtype: list """ batch_beam_results = swigwrapper.ctc_beam_search_decoder_batch( probs_seq, seq_lengths, alphabet.config_file(), beam_size, num_processes, cutoff_prob, cutoff_top_n, scorer) batch_beam_results = [ [(res.probability, alphabet.decode(res.tokens)) for res in beam_results] for beam_results in batch_beam_results ] return batch_beam_results
[ "def", "ctc_beam_search_decoder_batch", "(", "probs_seq", ",", "seq_lengths", ",", "alphabet", ",", "beam_size", ",", "num_processes", ",", "cutoff_prob", "=", "1.0", ",", "cutoff_top_n", "=", "40", ",", "scorer", "=", "None", ")", ":", "batch_beam_results", "=", "swigwrapper", ".", "ctc_beam_search_decoder_batch", "(", "probs_seq", ",", "seq_lengths", ",", "alphabet", ".", "config_file", "(", ")", ",", "beam_size", ",", "num_processes", ",", "cutoff_prob", ",", "cutoff_top_n", ",", "scorer", ")", "batch_beam_results", "=", "[", "[", "(", "res", ".", "probability", ",", "alphabet", ".", "decode", "(", "res", ".", "tokens", ")", ")", "for", "res", "in", "beam_results", "]", "for", "beam_results", "in", "batch_beam_results", "]", "return", "batch_beam_results" ]
Wrapper for the batched CTC beam search decoder. :param probs_seq: 3-D list with each element as an instance of 2-D list of probabilities used by ctc_beam_search_decoder(). :type probs_seq: 3-D list :param alphabet: alphabet list. :alphabet: Alphabet :param beam_size: Width for beam search. :type beam_size: int :param num_processes: Number of parallel processes. :type num_processes: int :param cutoff_prob: Cutoff probability in alphabet pruning, default 1.0, no pruning. :type cutoff_prob: float :param cutoff_top_n: Cutoff number in pruning, only top cutoff_top_n characters with highest probs in alphabet will be used in beam search, default 40. :type cutoff_top_n: int :param num_processes: Number of parallel processes. :type num_processes: int :param scorer: External scorer for partially decoded sentence, e.g. word count or language model. :type scorer: Scorer :return: List of tuples of log probability and sentence as decoding results, in descending order of the probability. :rtype: list
[ "Wrapper", "for", "the", "batched", "CTC", "beam", "search", "decoder", "." ]
f64aa73e7fbe9dde40d4fcf23b42ab304747d152
https://github.com/mozilla/DeepSpeech/blob/f64aa73e7fbe9dde40d4fcf23b42ab304747d152/native_client/ctcdecode/__init__.py#L62-L104
21,633
mozilla/DeepSpeech
examples/mic_vad_streaming/mic_vad_streaming.py
Audio.resample
def resample(self, data, input_rate): """ Microphone may not support our native processing sampling rate, so resample from input_rate to RATE_PROCESS here for webrtcvad and deepspeech Args: data (binary): Input audio stream input_rate (int): Input audio rate to resample from """ data16 = np.fromstring(string=data, dtype=np.int16) resample_size = int(len(data16) / self.input_rate * self.RATE_PROCESS) resample = signal.resample(data16, resample_size) resample16 = np.array(resample, dtype=np.int16) return resample16.tostring()
python
def resample(self, data, input_rate): """ Microphone may not support our native processing sampling rate, so resample from input_rate to RATE_PROCESS here for webrtcvad and deepspeech Args: data (binary): Input audio stream input_rate (int): Input audio rate to resample from """ data16 = np.fromstring(string=data, dtype=np.int16) resample_size = int(len(data16) / self.input_rate * self.RATE_PROCESS) resample = signal.resample(data16, resample_size) resample16 = np.array(resample, dtype=np.int16) return resample16.tostring()
[ "def", "resample", "(", "self", ",", "data", ",", "input_rate", ")", ":", "data16", "=", "np", ".", "fromstring", "(", "string", "=", "data", ",", "dtype", "=", "np", ".", "int16", ")", "resample_size", "=", "int", "(", "len", "(", "data16", ")", "/", "self", ".", "input_rate", "*", "self", ".", "RATE_PROCESS", ")", "resample", "=", "signal", ".", "resample", "(", "data16", ",", "resample_size", ")", "resample16", "=", "np", ".", "array", "(", "resample", ",", "dtype", "=", "np", ".", "int16", ")", "return", "resample16", ".", "tostring", "(", ")" ]
Microphone may not support our native processing sampling rate, so resample from input_rate to RATE_PROCESS here for webrtcvad and deepspeech Args: data (binary): Input audio stream input_rate (int): Input audio rate to resample from
[ "Microphone", "may", "not", "support", "our", "native", "processing", "sampling", "rate", "so", "resample", "from", "input_rate", "to", "RATE_PROCESS", "here", "for", "webrtcvad", "and", "deepspeech" ]
f64aa73e7fbe9dde40d4fcf23b42ab304747d152
https://github.com/mozilla/DeepSpeech/blob/f64aa73e7fbe9dde40d4fcf23b42ab304747d152/examples/mic_vad_streaming/mic_vad_streaming.py#L52-L66
21,634
mozilla/DeepSpeech
examples/mic_vad_streaming/mic_vad_streaming.py
Audio.read_resampled
def read_resampled(self): """Return a block of audio data resampled to 16000hz, blocking if necessary.""" return self.resample(data=self.buffer_queue.get(), input_rate=self.input_rate)
python
def read_resampled(self): """Return a block of audio data resampled to 16000hz, blocking if necessary.""" return self.resample(data=self.buffer_queue.get(), input_rate=self.input_rate)
[ "def", "read_resampled", "(", "self", ")", ":", "return", "self", ".", "resample", "(", "data", "=", "self", ".", "buffer_queue", ".", "get", "(", ")", ",", "input_rate", "=", "self", ".", "input_rate", ")" ]
Return a block of audio data resampled to 16000hz, blocking if necessary.
[ "Return", "a", "block", "of", "audio", "data", "resampled", "to", "16000hz", "blocking", "if", "necessary", "." ]
f64aa73e7fbe9dde40d4fcf23b42ab304747d152
https://github.com/mozilla/DeepSpeech/blob/f64aa73e7fbe9dde40d4fcf23b42ab304747d152/examples/mic_vad_streaming/mic_vad_streaming.py#L68-L71
21,635
mozilla/DeepSpeech
examples/mic_vad_streaming/mic_vad_streaming.py
VADAudio.frame_generator
def frame_generator(self): """Generator that yields all audio frames from microphone.""" if self.input_rate == self.RATE_PROCESS: while True: yield self.read() else: while True: yield self.read_resampled()
python
def frame_generator(self): """Generator that yields all audio frames from microphone.""" if self.input_rate == self.RATE_PROCESS: while True: yield self.read() else: while True: yield self.read_resampled()
[ "def", "frame_generator", "(", "self", ")", ":", "if", "self", ".", "input_rate", "==", "self", ".", "RATE_PROCESS", ":", "while", "True", ":", "yield", "self", ".", "read", "(", ")", "else", ":", "while", "True", ":", "yield", "self", ".", "read_resampled", "(", ")" ]
Generator that yields all audio frames from microphone.
[ "Generator", "that", "yields", "all", "audio", "frames", "from", "microphone", "." ]
f64aa73e7fbe9dde40d4fcf23b42ab304747d152
https://github.com/mozilla/DeepSpeech/blob/f64aa73e7fbe9dde40d4fcf23b42ab304747d152/examples/mic_vad_streaming/mic_vad_streaming.py#L103-L110
21,636
fxsjy/jieba
jieba/posseg/__init__.py
cut
def cut(sentence, HMM=True): """ Global `cut` function that supports parallel processing. Note that this only works using dt, custom POSTokenizer instances are not supported. """ global dt if jieba.pool is None: for w in dt.cut(sentence, HMM=HMM): yield w else: parts = strdecode(sentence).splitlines(True) if HMM: result = jieba.pool.map(_lcut_internal, parts) else: result = jieba.pool.map(_lcut_internal_no_hmm, parts) for r in result: for w in r: yield w
python
def cut(sentence, HMM=True): """ Global `cut` function that supports parallel processing. Note that this only works using dt, custom POSTokenizer instances are not supported. """ global dt if jieba.pool is None: for w in dt.cut(sentence, HMM=HMM): yield w else: parts = strdecode(sentence).splitlines(True) if HMM: result = jieba.pool.map(_lcut_internal, parts) else: result = jieba.pool.map(_lcut_internal_no_hmm, parts) for r in result: for w in r: yield w
[ "def", "cut", "(", "sentence", ",", "HMM", "=", "True", ")", ":", "global", "dt", "if", "jieba", ".", "pool", "is", "None", ":", "for", "w", "in", "dt", ".", "cut", "(", "sentence", ",", "HMM", "=", "HMM", ")", ":", "yield", "w", "else", ":", "parts", "=", "strdecode", "(", "sentence", ")", ".", "splitlines", "(", "True", ")", "if", "HMM", ":", "result", "=", "jieba", ".", "pool", ".", "map", "(", "_lcut_internal", ",", "parts", ")", "else", ":", "result", "=", "jieba", ".", "pool", ".", "map", "(", "_lcut_internal_no_hmm", ",", "parts", ")", "for", "r", "in", "result", ":", "for", "w", "in", "r", ":", "yield", "w" ]
Global `cut` function that supports parallel processing. Note that this only works using dt, custom POSTokenizer instances are not supported.
[ "Global", "cut", "function", "that", "supports", "parallel", "processing", "." ]
8212b6c5725d08311952a3a08e5509eeaee33eb7
https://github.com/fxsjy/jieba/blob/8212b6c5725d08311952a3a08e5509eeaee33eb7/jieba/posseg/__init__.py#L272-L291
21,637
fxsjy/jieba
jieba/__init__.py
enable_parallel
def enable_parallel(processnum=None): """ Change the module's `cut` and `cut_for_search` functions to the parallel version. Note that this only works using dt, custom Tokenizer instances are not supported. """ global pool, dt, cut, cut_for_search from multiprocessing import cpu_count if os.name == 'nt': raise NotImplementedError( "jieba: parallel mode only supports posix system") else: from multiprocessing import Pool dt.check_initialized() if processnum is None: processnum = cpu_count() pool = Pool(processnum) cut = _pcut cut_for_search = _pcut_for_search
python
def enable_parallel(processnum=None): """ Change the module's `cut` and `cut_for_search` functions to the parallel version. Note that this only works using dt, custom Tokenizer instances are not supported. """ global pool, dt, cut, cut_for_search from multiprocessing import cpu_count if os.name == 'nt': raise NotImplementedError( "jieba: parallel mode only supports posix system") else: from multiprocessing import Pool dt.check_initialized() if processnum is None: processnum = cpu_count() pool = Pool(processnum) cut = _pcut cut_for_search = _pcut_for_search
[ "def", "enable_parallel", "(", "processnum", "=", "None", ")", ":", "global", "pool", ",", "dt", ",", "cut", ",", "cut_for_search", "from", "multiprocessing", "import", "cpu_count", "if", "os", ".", "name", "==", "'nt'", ":", "raise", "NotImplementedError", "(", "\"jieba: parallel mode only supports posix system\"", ")", "else", ":", "from", "multiprocessing", "import", "Pool", "dt", ".", "check_initialized", "(", ")", "if", "processnum", "is", "None", ":", "processnum", "=", "cpu_count", "(", ")", "pool", "=", "Pool", "(", "processnum", ")", "cut", "=", "_pcut", "cut_for_search", "=", "_pcut_for_search" ]
Change the module's `cut` and `cut_for_search` functions to the parallel version. Note that this only works using dt, custom Tokenizer instances are not supported.
[ "Change", "the", "module", "s", "cut", "and", "cut_for_search", "functions", "to", "the", "parallel", "version", "." ]
8212b6c5725d08311952a3a08e5509eeaee33eb7
https://github.com/fxsjy/jieba/blob/8212b6c5725d08311952a3a08e5509eeaee33eb7/jieba/__init__.py#L569-L589
21,638
fxsjy/jieba
jieba/__init__.py
Tokenizer.cut
def cut(self, sentence, cut_all=False, HMM=True): ''' The main function that segments an entire sentence that contains Chinese characters into separated words. Parameter: - sentence: The str(unicode) to be segmented. - cut_all: Model type. True for full pattern, False for accurate pattern. - HMM: Whether to use the Hidden Markov Model. ''' sentence = strdecode(sentence) if cut_all: re_han = re_han_cut_all re_skip = re_skip_cut_all else: re_han = re_han_default re_skip = re_skip_default if cut_all: cut_block = self.__cut_all elif HMM: cut_block = self.__cut_DAG else: cut_block = self.__cut_DAG_NO_HMM blocks = re_han.split(sentence) for blk in blocks: if not blk: continue if re_han.match(blk): for word in cut_block(blk): yield word else: tmp = re_skip.split(blk) for x in tmp: if re_skip.match(x): yield x elif not cut_all: for xx in x: yield xx else: yield x
python
def cut(self, sentence, cut_all=False, HMM=True): ''' The main function that segments an entire sentence that contains Chinese characters into separated words. Parameter: - sentence: The str(unicode) to be segmented. - cut_all: Model type. True for full pattern, False for accurate pattern. - HMM: Whether to use the Hidden Markov Model. ''' sentence = strdecode(sentence) if cut_all: re_han = re_han_cut_all re_skip = re_skip_cut_all else: re_han = re_han_default re_skip = re_skip_default if cut_all: cut_block = self.__cut_all elif HMM: cut_block = self.__cut_DAG else: cut_block = self.__cut_DAG_NO_HMM blocks = re_han.split(sentence) for blk in blocks: if not blk: continue if re_han.match(blk): for word in cut_block(blk): yield word else: tmp = re_skip.split(blk) for x in tmp: if re_skip.match(x): yield x elif not cut_all: for xx in x: yield xx else: yield x
[ "def", "cut", "(", "self", ",", "sentence", ",", "cut_all", "=", "False", ",", "HMM", "=", "True", ")", ":", "sentence", "=", "strdecode", "(", "sentence", ")", "if", "cut_all", ":", "re_han", "=", "re_han_cut_all", "re_skip", "=", "re_skip_cut_all", "else", ":", "re_han", "=", "re_han_default", "re_skip", "=", "re_skip_default", "if", "cut_all", ":", "cut_block", "=", "self", ".", "__cut_all", "elif", "HMM", ":", "cut_block", "=", "self", ".", "__cut_DAG", "else", ":", "cut_block", "=", "self", ".", "__cut_DAG_NO_HMM", "blocks", "=", "re_han", ".", "split", "(", "sentence", ")", "for", "blk", "in", "blocks", ":", "if", "not", "blk", ":", "continue", "if", "re_han", ".", "match", "(", "blk", ")", ":", "for", "word", "in", "cut_block", "(", "blk", ")", ":", "yield", "word", "else", ":", "tmp", "=", "re_skip", ".", "split", "(", "blk", ")", "for", "x", "in", "tmp", ":", "if", "re_skip", ".", "match", "(", "x", ")", ":", "yield", "x", "elif", "not", "cut_all", ":", "for", "xx", "in", "x", ":", "yield", "xx", "else", ":", "yield", "x" ]
The main function that segments an entire sentence that contains Chinese characters into separated words. Parameter: - sentence: The str(unicode) to be segmented. - cut_all: Model type. True for full pattern, False for accurate pattern. - HMM: Whether to use the Hidden Markov Model.
[ "The", "main", "function", "that", "segments", "an", "entire", "sentence", "that", "contains", "Chinese", "characters", "into", "separated", "words", "." ]
8212b6c5725d08311952a3a08e5509eeaee33eb7
https://github.com/fxsjy/jieba/blob/8212b6c5725d08311952a3a08e5509eeaee33eb7/jieba/__init__.py#L275-L315
21,639
fxsjy/jieba
jieba/__init__.py
Tokenizer.cut_for_search
def cut_for_search(self, sentence, HMM=True): """ Finer segmentation for search engines. """ words = self.cut(sentence, HMM=HMM) for w in words: if len(w) > 2: for i in xrange(len(w) - 1): gram2 = w[i:i + 2] if self.FREQ.get(gram2): yield gram2 if len(w) > 3: for i in xrange(len(w) - 2): gram3 = w[i:i + 3] if self.FREQ.get(gram3): yield gram3 yield w
python
def cut_for_search(self, sentence, HMM=True): """ Finer segmentation for search engines. """ words = self.cut(sentence, HMM=HMM) for w in words: if len(w) > 2: for i in xrange(len(w) - 1): gram2 = w[i:i + 2] if self.FREQ.get(gram2): yield gram2 if len(w) > 3: for i in xrange(len(w) - 2): gram3 = w[i:i + 3] if self.FREQ.get(gram3): yield gram3 yield w
[ "def", "cut_for_search", "(", "self", ",", "sentence", ",", "HMM", "=", "True", ")", ":", "words", "=", "self", ".", "cut", "(", "sentence", ",", "HMM", "=", "HMM", ")", "for", "w", "in", "words", ":", "if", "len", "(", "w", ")", ">", "2", ":", "for", "i", "in", "xrange", "(", "len", "(", "w", ")", "-", "1", ")", ":", "gram2", "=", "w", "[", "i", ":", "i", "+", "2", "]", "if", "self", ".", "FREQ", ".", "get", "(", "gram2", ")", ":", "yield", "gram2", "if", "len", "(", "w", ")", ">", "3", ":", "for", "i", "in", "xrange", "(", "len", "(", "w", ")", "-", "2", ")", ":", "gram3", "=", "w", "[", "i", ":", "i", "+", "3", "]", "if", "self", ".", "FREQ", ".", "get", "(", "gram3", ")", ":", "yield", "gram3", "yield", "w" ]
Finer segmentation for search engines.
[ "Finer", "segmentation", "for", "search", "engines", "." ]
8212b6c5725d08311952a3a08e5509eeaee33eb7
https://github.com/fxsjy/jieba/blob/8212b6c5725d08311952a3a08e5509eeaee33eb7/jieba/__init__.py#L317-L333
21,640
fxsjy/jieba
jieba/__init__.py
Tokenizer.load_userdict
def load_userdict(self, f): ''' Load personalized dict to improve detect rate. Parameter: - f : A plain text file contains words and their ocurrences. Can be a file-like object, or the path of the dictionary file, whose encoding must be utf-8. Structure of dict file: word1 freq1 word_type1 word2 freq2 word_type2 ... Word type may be ignored ''' self.check_initialized() if isinstance(f, string_types): f_name = f f = open(f, 'rb') else: f_name = resolve_filename(f) for lineno, ln in enumerate(f, 1): line = ln.strip() if not isinstance(line, text_type): try: line = line.decode('utf-8').lstrip('\ufeff') except UnicodeDecodeError: raise ValueError('dictionary file %s must be utf-8' % f_name) if not line: continue # match won't be None because there's at least one character word, freq, tag = re_userdict.match(line).groups() if freq is not None: freq = freq.strip() if tag is not None: tag = tag.strip() self.add_word(word, freq, tag)
python
def load_userdict(self, f): ''' Load personalized dict to improve detect rate. Parameter: - f : A plain text file contains words and their ocurrences. Can be a file-like object, or the path of the dictionary file, whose encoding must be utf-8. Structure of dict file: word1 freq1 word_type1 word2 freq2 word_type2 ... Word type may be ignored ''' self.check_initialized() if isinstance(f, string_types): f_name = f f = open(f, 'rb') else: f_name = resolve_filename(f) for lineno, ln in enumerate(f, 1): line = ln.strip() if not isinstance(line, text_type): try: line = line.decode('utf-8').lstrip('\ufeff') except UnicodeDecodeError: raise ValueError('dictionary file %s must be utf-8' % f_name) if not line: continue # match won't be None because there's at least one character word, freq, tag = re_userdict.match(line).groups() if freq is not None: freq = freq.strip() if tag is not None: tag = tag.strip() self.add_word(word, freq, tag)
[ "def", "load_userdict", "(", "self", ",", "f", ")", ":", "self", ".", "check_initialized", "(", ")", "if", "isinstance", "(", "f", ",", "string_types", ")", ":", "f_name", "=", "f", "f", "=", "open", "(", "f", ",", "'rb'", ")", "else", ":", "f_name", "=", "resolve_filename", "(", "f", ")", "for", "lineno", ",", "ln", "in", "enumerate", "(", "f", ",", "1", ")", ":", "line", "=", "ln", ".", "strip", "(", ")", "if", "not", "isinstance", "(", "line", ",", "text_type", ")", ":", "try", ":", "line", "=", "line", ".", "decode", "(", "'utf-8'", ")", ".", "lstrip", "(", "'\\ufeff'", ")", "except", "UnicodeDecodeError", ":", "raise", "ValueError", "(", "'dictionary file %s must be utf-8'", "%", "f_name", ")", "if", "not", "line", ":", "continue", "# match won't be None because there's at least one character", "word", ",", "freq", ",", "tag", "=", "re_userdict", ".", "match", "(", "line", ")", ".", "groups", "(", ")", "if", "freq", "is", "not", "None", ":", "freq", "=", "freq", ".", "strip", "(", ")", "if", "tag", "is", "not", "None", ":", "tag", "=", "tag", ".", "strip", "(", ")", "self", ".", "add_word", "(", "word", ",", "freq", ",", "tag", ")" ]
Load personalized dict to improve detect rate. Parameter: - f : A plain text file contains words and their ocurrences. Can be a file-like object, or the path of the dictionary file, whose encoding must be utf-8. Structure of dict file: word1 freq1 word_type1 word2 freq2 word_type2 ... Word type may be ignored
[ "Load", "personalized", "dict", "to", "improve", "detect", "rate", "." ]
8212b6c5725d08311952a3a08e5509eeaee33eb7
https://github.com/fxsjy/jieba/blob/8212b6c5725d08311952a3a08e5509eeaee33eb7/jieba/__init__.py#L359-L395
21,641
fxsjy/jieba
jieba/__init__.py
Tokenizer.add_word
def add_word(self, word, freq=None, tag=None): """ Add a word to dictionary. freq and tag can be omitted, freq defaults to be a calculated value that ensures the word can be cut out. """ self.check_initialized() word = strdecode(word) freq = int(freq) if freq is not None else self.suggest_freq(word, False) self.FREQ[word] = freq self.total += freq if tag: self.user_word_tag_tab[word] = tag for ch in xrange(len(word)): wfrag = word[:ch + 1] if wfrag not in self.FREQ: self.FREQ[wfrag] = 0 if freq == 0: finalseg.add_force_split(word)
python
def add_word(self, word, freq=None, tag=None): """ Add a word to dictionary. freq and tag can be omitted, freq defaults to be a calculated value that ensures the word can be cut out. """ self.check_initialized() word = strdecode(word) freq = int(freq) if freq is not None else self.suggest_freq(word, False) self.FREQ[word] = freq self.total += freq if tag: self.user_word_tag_tab[word] = tag for ch in xrange(len(word)): wfrag = word[:ch + 1] if wfrag not in self.FREQ: self.FREQ[wfrag] = 0 if freq == 0: finalseg.add_force_split(word)
[ "def", "add_word", "(", "self", ",", "word", ",", "freq", "=", "None", ",", "tag", "=", "None", ")", ":", "self", ".", "check_initialized", "(", ")", "word", "=", "strdecode", "(", "word", ")", "freq", "=", "int", "(", "freq", ")", "if", "freq", "is", "not", "None", "else", "self", ".", "suggest_freq", "(", "word", ",", "False", ")", "self", ".", "FREQ", "[", "word", "]", "=", "freq", "self", ".", "total", "+=", "freq", "if", "tag", ":", "self", ".", "user_word_tag_tab", "[", "word", "]", "=", "tag", "for", "ch", "in", "xrange", "(", "len", "(", "word", ")", ")", ":", "wfrag", "=", "word", "[", ":", "ch", "+", "1", "]", "if", "wfrag", "not", "in", "self", ".", "FREQ", ":", "self", ".", "FREQ", "[", "wfrag", "]", "=", "0", "if", "freq", "==", "0", ":", "finalseg", ".", "add_force_split", "(", "word", ")" ]
Add a word to dictionary. freq and tag can be omitted, freq defaults to be a calculated value that ensures the word can be cut out.
[ "Add", "a", "word", "to", "dictionary", "." ]
8212b6c5725d08311952a3a08e5509eeaee33eb7
https://github.com/fxsjy/jieba/blob/8212b6c5725d08311952a3a08e5509eeaee33eb7/jieba/__init__.py#L397-L416
21,642
fxsjy/jieba
jieba/__init__.py
Tokenizer.suggest_freq
def suggest_freq(self, segment, tune=False): """ Suggest word frequency to force the characters in a word to be joined or splitted. Parameter: - segment : The segments that the word is expected to be cut into, If the word should be treated as a whole, use a str. - tune : If True, tune the word frequency. Note that HMM may affect the final result. If the result doesn't change, set HMM=False. """ self.check_initialized() ftotal = float(self.total) freq = 1 if isinstance(segment, string_types): word = segment for seg in self.cut(word, HMM=False): freq *= self.FREQ.get(seg, 1) / ftotal freq = max(int(freq * self.total) + 1, self.FREQ.get(word, 1)) else: segment = tuple(map(strdecode, segment)) word = ''.join(segment) for seg in segment: freq *= self.FREQ.get(seg, 1) / ftotal freq = min(int(freq * self.total), self.FREQ.get(word, 0)) if tune: add_word(word, freq) return freq
python
def suggest_freq(self, segment, tune=False): """ Suggest word frequency to force the characters in a word to be joined or splitted. Parameter: - segment : The segments that the word is expected to be cut into, If the word should be treated as a whole, use a str. - tune : If True, tune the word frequency. Note that HMM may affect the final result. If the result doesn't change, set HMM=False. """ self.check_initialized() ftotal = float(self.total) freq = 1 if isinstance(segment, string_types): word = segment for seg in self.cut(word, HMM=False): freq *= self.FREQ.get(seg, 1) / ftotal freq = max(int(freq * self.total) + 1, self.FREQ.get(word, 1)) else: segment = tuple(map(strdecode, segment)) word = ''.join(segment) for seg in segment: freq *= self.FREQ.get(seg, 1) / ftotal freq = min(int(freq * self.total), self.FREQ.get(word, 0)) if tune: add_word(word, freq) return freq
[ "def", "suggest_freq", "(", "self", ",", "segment", ",", "tune", "=", "False", ")", ":", "self", ".", "check_initialized", "(", ")", "ftotal", "=", "float", "(", "self", ".", "total", ")", "freq", "=", "1", "if", "isinstance", "(", "segment", ",", "string_types", ")", ":", "word", "=", "segment", "for", "seg", "in", "self", ".", "cut", "(", "word", ",", "HMM", "=", "False", ")", ":", "freq", "*=", "self", ".", "FREQ", ".", "get", "(", "seg", ",", "1", ")", "/", "ftotal", "freq", "=", "max", "(", "int", "(", "freq", "*", "self", ".", "total", ")", "+", "1", ",", "self", ".", "FREQ", ".", "get", "(", "word", ",", "1", ")", ")", "else", ":", "segment", "=", "tuple", "(", "map", "(", "strdecode", ",", "segment", ")", ")", "word", "=", "''", ".", "join", "(", "segment", ")", "for", "seg", "in", "segment", ":", "freq", "*=", "self", ".", "FREQ", ".", "get", "(", "seg", ",", "1", ")", "/", "ftotal", "freq", "=", "min", "(", "int", "(", "freq", "*", "self", ".", "total", ")", ",", "self", ".", "FREQ", ".", "get", "(", "word", ",", "0", ")", ")", "if", "tune", ":", "add_word", "(", "word", ",", "freq", ")", "return", "freq" ]
Suggest word frequency to force the characters in a word to be joined or splitted. Parameter: - segment : The segments that the word is expected to be cut into, If the word should be treated as a whole, use a str. - tune : If True, tune the word frequency. Note that HMM may affect the final result. If the result doesn't change, set HMM=False.
[ "Suggest", "word", "frequency", "to", "force", "the", "characters", "in", "a", "word", "to", "be", "joined", "or", "splitted", "." ]
8212b6c5725d08311952a3a08e5509eeaee33eb7
https://github.com/fxsjy/jieba/blob/8212b6c5725d08311952a3a08e5509eeaee33eb7/jieba/__init__.py#L424-L453
21,643
tensorflow/tensor2tensor
tensor2tensor/data_generators/allen_brain.py
_get_case_file_paths
def _get_case_file_paths(tmp_dir, case, training_fraction=0.95): """Obtain a list of image paths corresponding to training or eval case. Args: tmp_dir: str, the root path to which raw images were written, at the top level having meta/ and raw/ subdirs. case: bool, whether obtaining file paths for training (true) or eval (false). training_fraction: float, the fraction of the sub-image path list to consider as the basis for training examples. Returns: list: A list of file paths. Raises: ValueError: if images not found in tmp_dir, or if training_fraction would leave no examples for eval. """ paths = tf.gfile.Glob("%s/*.jpg" % tmp_dir) if not paths: raise ValueError("Search of tmp_dir (%s) " % tmp_dir, "for subimage paths yielded an empty list, ", "can't proceed with returning training/eval split.") split_index = int(math.floor(len(paths)*training_fraction)) if split_index >= len(paths): raise ValueError("For a path list of size %s " "and a training_fraction of %s " "the resulting split_index of the paths list, " "%s, would leave no elements for the eval " "condition." % (len(paths), training_fraction, split_index)) if case: return paths[:split_index] else: return paths[split_index:]
python
def _get_case_file_paths(tmp_dir, case, training_fraction=0.95): """Obtain a list of image paths corresponding to training or eval case. Args: tmp_dir: str, the root path to which raw images were written, at the top level having meta/ and raw/ subdirs. case: bool, whether obtaining file paths for training (true) or eval (false). training_fraction: float, the fraction of the sub-image path list to consider as the basis for training examples. Returns: list: A list of file paths. Raises: ValueError: if images not found in tmp_dir, or if training_fraction would leave no examples for eval. """ paths = tf.gfile.Glob("%s/*.jpg" % tmp_dir) if not paths: raise ValueError("Search of tmp_dir (%s) " % tmp_dir, "for subimage paths yielded an empty list, ", "can't proceed with returning training/eval split.") split_index = int(math.floor(len(paths)*training_fraction)) if split_index >= len(paths): raise ValueError("For a path list of size %s " "and a training_fraction of %s " "the resulting split_index of the paths list, " "%s, would leave no elements for the eval " "condition." % (len(paths), training_fraction, split_index)) if case: return paths[:split_index] else: return paths[split_index:]
[ "def", "_get_case_file_paths", "(", "tmp_dir", ",", "case", ",", "training_fraction", "=", "0.95", ")", ":", "paths", "=", "tf", ".", "gfile", ".", "Glob", "(", "\"%s/*.jpg\"", "%", "tmp_dir", ")", "if", "not", "paths", ":", "raise", "ValueError", "(", "\"Search of tmp_dir (%s) \"", "%", "tmp_dir", ",", "\"for subimage paths yielded an empty list, \"", ",", "\"can't proceed with returning training/eval split.\"", ")", "split_index", "=", "int", "(", "math", ".", "floor", "(", "len", "(", "paths", ")", "*", "training_fraction", ")", ")", "if", "split_index", ">=", "len", "(", "paths", ")", ":", "raise", "ValueError", "(", "\"For a path list of size %s \"", "\"and a training_fraction of %s \"", "\"the resulting split_index of the paths list, \"", "\"%s, would leave no elements for the eval \"", "\"condition.\"", "%", "(", "len", "(", "paths", ")", ",", "training_fraction", ",", "split_index", ")", ")", "if", "case", ":", "return", "paths", "[", ":", "split_index", "]", "else", ":", "return", "paths", "[", "split_index", ":", "]" ]
Obtain a list of image paths corresponding to training or eval case. Args: tmp_dir: str, the root path to which raw images were written, at the top level having meta/ and raw/ subdirs. case: bool, whether obtaining file paths for training (true) or eval (false). training_fraction: float, the fraction of the sub-image path list to consider as the basis for training examples. Returns: list: A list of file paths. Raises: ValueError: if images not found in tmp_dir, or if training_fraction would leave no examples for eval.
[ "Obtain", "a", "list", "of", "image", "paths", "corresponding", "to", "training", "or", "eval", "case", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/allen_brain.py#L81-L121
21,644
tensorflow/tensor2tensor
tensor2tensor/data_generators/allen_brain.py
maybe_download_image_dataset
def maybe_download_image_dataset(image_ids, target_dir): """Download a set of images from api.brain-map.org to `target_dir`. Args: image_ids: list, a list of image ids. target_dir: str, a directory to which to download the images. """ tf.gfile.MakeDirs(target_dir) num_images = len(image_ids) for i, image_id in enumerate(image_ids): destination = os.path.join(target_dir, "%s.jpg" % i) tmp_destination = "%s.temp" % destination source_url = ("http://api.brain-map.org/api/v2/" "section_image_download/%s" % image_id) if tf.gfile.Exists(destination): tf.logging.info("Image with ID already present, " "skipping download (%s of %s)." % ( i+1, num_images )) continue tf.logging.info("Downloading image with id %s (%s of %s)" % ( image_id, i+1, num_images )) response = requests.get(source_url, stream=True) response.raise_for_status() with tf.gfile.Open(tmp_destination, "w") as f: for block in response.iter_content(1024): f.write(block) tf.gfile.Rename(tmp_destination, destination)
python
def maybe_download_image_dataset(image_ids, target_dir): """Download a set of images from api.brain-map.org to `target_dir`. Args: image_ids: list, a list of image ids. target_dir: str, a directory to which to download the images. """ tf.gfile.MakeDirs(target_dir) num_images = len(image_ids) for i, image_id in enumerate(image_ids): destination = os.path.join(target_dir, "%s.jpg" % i) tmp_destination = "%s.temp" % destination source_url = ("http://api.brain-map.org/api/v2/" "section_image_download/%s" % image_id) if tf.gfile.Exists(destination): tf.logging.info("Image with ID already present, " "skipping download (%s of %s)." % ( i+1, num_images )) continue tf.logging.info("Downloading image with id %s (%s of %s)" % ( image_id, i+1, num_images )) response = requests.get(source_url, stream=True) response.raise_for_status() with tf.gfile.Open(tmp_destination, "w") as f: for block in response.iter_content(1024): f.write(block) tf.gfile.Rename(tmp_destination, destination)
[ "def", "maybe_download_image_dataset", "(", "image_ids", ",", "target_dir", ")", ":", "tf", ".", "gfile", ".", "MakeDirs", "(", "target_dir", ")", "num_images", "=", "len", "(", "image_ids", ")", "for", "i", ",", "image_id", "in", "enumerate", "(", "image_ids", ")", ":", "destination", "=", "os", ".", "path", ".", "join", "(", "target_dir", ",", "\"%s.jpg\"", "%", "i", ")", "tmp_destination", "=", "\"%s.temp\"", "%", "destination", "source_url", "=", "(", "\"http://api.brain-map.org/api/v2/\"", "\"section_image_download/%s\"", "%", "image_id", ")", "if", "tf", ".", "gfile", ".", "Exists", "(", "destination", ")", ":", "tf", ".", "logging", ".", "info", "(", "\"Image with ID already present, \"", "\"skipping download (%s of %s).\"", "%", "(", "i", "+", "1", ",", "num_images", ")", ")", "continue", "tf", ".", "logging", ".", "info", "(", "\"Downloading image with id %s (%s of %s)\"", "%", "(", "image_id", ",", "i", "+", "1", ",", "num_images", ")", ")", "response", "=", "requests", ".", "get", "(", "source_url", ",", "stream", "=", "True", ")", "response", ".", "raise_for_status", "(", ")", "with", "tf", ".", "gfile", ".", "Open", "(", "tmp_destination", ",", "\"w\"", ")", "as", "f", ":", "for", "block", "in", "response", ".", "iter_content", "(", "1024", ")", ":", "f", ".", "write", "(", "block", ")", "tf", ".", "gfile", ".", "Rename", "(", "tmp_destination", ",", "destination", ")" ]
Download a set of images from api.brain-map.org to `target_dir`. Args: image_ids: list, a list of image ids. target_dir: str, a directory to which to download the images.
[ "Download", "a", "set", "of", "images", "from", "api", ".", "brain", "-", "map", ".", "org", "to", "target_dir", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/allen_brain.py#L124-L163
21,645
tensorflow/tensor2tensor
tensor2tensor/data_generators/allen_brain.py
random_square_mask
def random_square_mask(shape, fraction): """Create a numpy array with specified shape and masked fraction. Args: shape: tuple, shape of the mask to create. fraction: float, fraction of the mask area to populate with `mask_scalar`. Returns: numpy.array: A numpy array storing the mask. """ mask = np.ones(shape) patch_area = shape[0]*shape[1]*fraction patch_dim = np.int(math.floor(math.sqrt(patch_area))) if patch_area == 0 or patch_dim == 0: return mask x = np.random.randint(shape[0] - patch_dim) y = np.random.randint(shape[1] - patch_dim) mask[x:(x + patch_dim), y:(y + patch_dim), :] = 0 return mask
python
def random_square_mask(shape, fraction): """Create a numpy array with specified shape and masked fraction. Args: shape: tuple, shape of the mask to create. fraction: float, fraction of the mask area to populate with `mask_scalar`. Returns: numpy.array: A numpy array storing the mask. """ mask = np.ones(shape) patch_area = shape[0]*shape[1]*fraction patch_dim = np.int(math.floor(math.sqrt(patch_area))) if patch_area == 0 or patch_dim == 0: return mask x = np.random.randint(shape[0] - patch_dim) y = np.random.randint(shape[1] - patch_dim) mask[x:(x + patch_dim), y:(y + patch_dim), :] = 0 return mask
[ "def", "random_square_mask", "(", "shape", ",", "fraction", ")", ":", "mask", "=", "np", ".", "ones", "(", "shape", ")", "patch_area", "=", "shape", "[", "0", "]", "*", "shape", "[", "1", "]", "*", "fraction", "patch_dim", "=", "np", ".", "int", "(", "math", ".", "floor", "(", "math", ".", "sqrt", "(", "patch_area", ")", ")", ")", "if", "patch_area", "==", "0", "or", "patch_dim", "==", "0", ":", "return", "mask", "x", "=", "np", ".", "random", ".", "randint", "(", "shape", "[", "0", "]", "-", "patch_dim", ")", "y", "=", "np", ".", "random", ".", "randint", "(", "shape", "[", "1", "]", "-", "patch_dim", ")", "mask", "[", "x", ":", "(", "x", "+", "patch_dim", ")", ",", "y", ":", "(", "y", "+", "patch_dim", ")", ",", ":", "]", "=", "0", "return", "mask" ]
Create a numpy array with specified shape and masked fraction. Args: shape: tuple, shape of the mask to create. fraction: float, fraction of the mask area to populate with `mask_scalar`. Returns: numpy.array: A numpy array storing the mask.
[ "Create", "a", "numpy", "array", "with", "specified", "shape", "and", "masked", "fraction", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/allen_brain.py#L166-L189
21,646
tensorflow/tensor2tensor
tensor2tensor/data_generators/allen_brain.py
_generator
def _generator(tmp_dir, training, size=_BASE_EXAMPLE_IMAGE_SIZE, training_fraction=0.95): """Base problem example generator for Allen Brain Atlas problems. Args: tmp_dir: str, a directory where raw example input data has been stored. training: bool, whether the mode of operation is training (or, alternatively, evaluation), determining whether examples in tmp_dir prefixed with train or dev will be used. size: int, the image size to add to the example annotation. training_fraction: float, the fraction of the sub-image path list to consider as the basis for training examples. Yields: A dictionary representing the images with the following fields: * image/encoded: The string encoding the image as JPEG. * image/format: The string "jpeg" indicating the image format. * image/height: The integer indicating the image height. * image/width: The integer indicating the image height. """ maybe_download_image_dataset(_IMAGE_IDS, tmp_dir) image_files = _get_case_file_paths(tmp_dir=tmp_dir, case=training, training_fraction=training_fraction) image_obj = PIL_Image() tf.logging.info("Loaded case file paths (n=%s)" % len(image_files)) height = size width = size for input_path in image_files: img = image_obj.open(input_path) img = np.float32(img) shape = np.shape(img) for h_index in range(0, int(math.floor(shape[0]/size))): h_offset = h_index * size h_end = h_offset + size - 1 for v_index in range(0, int(math.floor(shape[1]/size))): v_offset = v_index * size v_end = v_offset + size - 1 # Extract a sub-image tile. subimage = np.uint8(img[h_offset:h_end, v_offset:v_end]) # pylint: disable=invalid-sequence-index # Filter images that are likely background (not tissue). if np.amax(subimage) < 230: continue subimage = image_obj.fromarray(subimage) buff = BytesIO() subimage.save(buff, format="JPEG") subimage_encoded = buff.getvalue() yield { "image/encoded": [subimage_encoded], "image/format": ["jpeg"], "image/height": [height], "image/width": [width] }
python
def _generator(tmp_dir, training, size=_BASE_EXAMPLE_IMAGE_SIZE, training_fraction=0.95): """Base problem example generator for Allen Brain Atlas problems. Args: tmp_dir: str, a directory where raw example input data has been stored. training: bool, whether the mode of operation is training (or, alternatively, evaluation), determining whether examples in tmp_dir prefixed with train or dev will be used. size: int, the image size to add to the example annotation. training_fraction: float, the fraction of the sub-image path list to consider as the basis for training examples. Yields: A dictionary representing the images with the following fields: * image/encoded: The string encoding the image as JPEG. * image/format: The string "jpeg" indicating the image format. * image/height: The integer indicating the image height. * image/width: The integer indicating the image height. """ maybe_download_image_dataset(_IMAGE_IDS, tmp_dir) image_files = _get_case_file_paths(tmp_dir=tmp_dir, case=training, training_fraction=training_fraction) image_obj = PIL_Image() tf.logging.info("Loaded case file paths (n=%s)" % len(image_files)) height = size width = size for input_path in image_files: img = image_obj.open(input_path) img = np.float32(img) shape = np.shape(img) for h_index in range(0, int(math.floor(shape[0]/size))): h_offset = h_index * size h_end = h_offset + size - 1 for v_index in range(0, int(math.floor(shape[1]/size))): v_offset = v_index * size v_end = v_offset + size - 1 # Extract a sub-image tile. subimage = np.uint8(img[h_offset:h_end, v_offset:v_end]) # pylint: disable=invalid-sequence-index # Filter images that are likely background (not tissue). if np.amax(subimage) < 230: continue subimage = image_obj.fromarray(subimage) buff = BytesIO() subimage.save(buff, format="JPEG") subimage_encoded = buff.getvalue() yield { "image/encoded": [subimage_encoded], "image/format": ["jpeg"], "image/height": [height], "image/width": [width] }
[ "def", "_generator", "(", "tmp_dir", ",", "training", ",", "size", "=", "_BASE_EXAMPLE_IMAGE_SIZE", ",", "training_fraction", "=", "0.95", ")", ":", "maybe_download_image_dataset", "(", "_IMAGE_IDS", ",", "tmp_dir", ")", "image_files", "=", "_get_case_file_paths", "(", "tmp_dir", "=", "tmp_dir", ",", "case", "=", "training", ",", "training_fraction", "=", "training_fraction", ")", "image_obj", "=", "PIL_Image", "(", ")", "tf", ".", "logging", ".", "info", "(", "\"Loaded case file paths (n=%s)\"", "%", "len", "(", "image_files", ")", ")", "height", "=", "size", "width", "=", "size", "for", "input_path", "in", "image_files", ":", "img", "=", "image_obj", ".", "open", "(", "input_path", ")", "img", "=", "np", ".", "float32", "(", "img", ")", "shape", "=", "np", ".", "shape", "(", "img", ")", "for", "h_index", "in", "range", "(", "0", ",", "int", "(", "math", ".", "floor", "(", "shape", "[", "0", "]", "/", "size", ")", ")", ")", ":", "h_offset", "=", "h_index", "*", "size", "h_end", "=", "h_offset", "+", "size", "-", "1", "for", "v_index", "in", "range", "(", "0", ",", "int", "(", "math", ".", "floor", "(", "shape", "[", "1", "]", "/", "size", ")", ")", ")", ":", "v_offset", "=", "v_index", "*", "size", "v_end", "=", "v_offset", "+", "size", "-", "1", "# Extract a sub-image tile.", "subimage", "=", "np", ".", "uint8", "(", "img", "[", "h_offset", ":", "h_end", ",", "v_offset", ":", "v_end", "]", ")", "# pylint: disable=invalid-sequence-index", "# Filter images that are likely background (not tissue).", "if", "np", ".", "amax", "(", "subimage", ")", "<", "230", ":", "continue", "subimage", "=", "image_obj", ".", "fromarray", "(", "subimage", ")", "buff", "=", "BytesIO", "(", ")", "subimage", ".", "save", "(", "buff", ",", "format", "=", "\"JPEG\"", ")", "subimage_encoded", "=", "buff", ".", "getvalue", "(", ")", "yield", "{", "\"image/encoded\"", ":", "[", "subimage_encoded", "]", ",", "\"image/format\"", ":", "[", "\"jpeg\"", "]", ",", "\"image/height\"", ":", "[", "height", "]", ",", "\"image/width\"", ":", "[", "width", "]", "}" ]
Base problem example generator for Allen Brain Atlas problems. Args: tmp_dir: str, a directory where raw example input data has been stored. training: bool, whether the mode of operation is training (or, alternatively, evaluation), determining whether examples in tmp_dir prefixed with train or dev will be used. size: int, the image size to add to the example annotation. training_fraction: float, the fraction of the sub-image path list to consider as the basis for training examples. Yields: A dictionary representing the images with the following fields: * image/encoded: The string encoding the image as JPEG. * image/format: The string "jpeg" indicating the image format. * image/height: The integer indicating the image height. * image/width: The integer indicating the image height.
[ "Base", "problem", "example", "generator", "for", "Allen", "Brain", "Atlas", "problems", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/allen_brain.py#L192-L260
21,647
tensorflow/tensor2tensor
tensor2tensor/models/research/transformer_moe.py
transformer_moe_2k
def transformer_moe_2k(): """Base transformers model with moe. Will have the following architecture: * No encoder. * Layer 0: a - sep (self-attention - unmasked separable convolutions) * Layer 1: a - sep * Layer 2: a - sep * Layer 3: a - sep * Layer 4: a - sep * Decoder architecture: * Layer 0: a - a - sepm (self-attention - enco/deco-attention - masked sep) * Layer 1: a - a - sepm * Layer 2: a - a - moe (mixture of expert layers in the middle) * Layer 3: a - a - sepm * Layer 4: a - a - sepm Returns: hparams """ hparams = transformer_moe_8k() hparams.batch_size = 2048 hparams.default_ff = "sep" # hparams.layer_types contains the network architecture: encoder_archi = "a/a/a/a/a" decoder_archi = "a-sepm/a-sepm/a-moe/a-sepm/a-sepm" hparams.layer_types = "{}#{}".format(encoder_archi, decoder_archi) return hparams
python
def transformer_moe_2k(): """Base transformers model with moe. Will have the following architecture: * No encoder. * Layer 0: a - sep (self-attention - unmasked separable convolutions) * Layer 1: a - sep * Layer 2: a - sep * Layer 3: a - sep * Layer 4: a - sep * Decoder architecture: * Layer 0: a - a - sepm (self-attention - enco/deco-attention - masked sep) * Layer 1: a - a - sepm * Layer 2: a - a - moe (mixture of expert layers in the middle) * Layer 3: a - a - sepm * Layer 4: a - a - sepm Returns: hparams """ hparams = transformer_moe_8k() hparams.batch_size = 2048 hparams.default_ff = "sep" # hparams.layer_types contains the network architecture: encoder_archi = "a/a/a/a/a" decoder_archi = "a-sepm/a-sepm/a-moe/a-sepm/a-sepm" hparams.layer_types = "{}#{}".format(encoder_archi, decoder_archi) return hparams
[ "def", "transformer_moe_2k", "(", ")", ":", "hparams", "=", "transformer_moe_8k", "(", ")", "hparams", ".", "batch_size", "=", "2048", "hparams", ".", "default_ff", "=", "\"sep\"", "# hparams.layer_types contains the network architecture:", "encoder_archi", "=", "\"a/a/a/a/a\"", "decoder_archi", "=", "\"a-sepm/a-sepm/a-moe/a-sepm/a-sepm\"", "hparams", ".", "layer_types", "=", "\"{}#{}\"", ".", "format", "(", "encoder_archi", ",", "decoder_archi", ")", "return", "hparams" ]
Base transformers model with moe. Will have the following architecture: * No encoder. * Layer 0: a - sep (self-attention - unmasked separable convolutions) * Layer 1: a - sep * Layer 2: a - sep * Layer 3: a - sep * Layer 4: a - sep * Decoder architecture: * Layer 0: a - a - sepm (self-attention - enco/deco-attention - masked sep) * Layer 1: a - a - sepm * Layer 2: a - a - moe (mixture of expert layers in the middle) * Layer 3: a - a - sepm * Layer 4: a - a - sepm Returns: hparams
[ "Base", "transformers", "model", "with", "moe", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/transformer_moe.py#L365-L395
21,648
tensorflow/tensor2tensor
tensor2tensor/models/research/transformer_moe.py
transformer_moe_prepend_8k
def transformer_moe_prepend_8k(): """Model which formulate a seq2seq problem as language modeling.""" hparams = transformer_moe_8k() hparams.prepend_mode = "prepend_inputs_masked_attention" hparams.eval_drop_long_sequences = False hparams.max_input_seq_length = 7500 hparams.default_ff = "sepm" hparams.layer_types = "locm/redm/locm-moe/redm/locm" hparams.moe_num_experts = 256 return hparams
python
def transformer_moe_prepend_8k(): """Model which formulate a seq2seq problem as language modeling.""" hparams = transformer_moe_8k() hparams.prepend_mode = "prepend_inputs_masked_attention" hparams.eval_drop_long_sequences = False hparams.max_input_seq_length = 7500 hparams.default_ff = "sepm" hparams.layer_types = "locm/redm/locm-moe/redm/locm" hparams.moe_num_experts = 256 return hparams
[ "def", "transformer_moe_prepend_8k", "(", ")", ":", "hparams", "=", "transformer_moe_8k", "(", ")", "hparams", ".", "prepend_mode", "=", "\"prepend_inputs_masked_attention\"", "hparams", ".", "eval_drop_long_sequences", "=", "False", "hparams", ".", "max_input_seq_length", "=", "7500", "hparams", ".", "default_ff", "=", "\"sepm\"", "hparams", ".", "layer_types", "=", "\"locm/redm/locm-moe/redm/locm\"", "hparams", ".", "moe_num_experts", "=", "256", "return", "hparams" ]
Model which formulate a seq2seq problem as language modeling.
[ "Model", "which", "formulate", "a", "seq2seq", "problem", "as", "language", "modeling", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/transformer_moe.py#L409-L418
21,649
tensorflow/tensor2tensor
tensor2tensor/models/revnet.py
f
def f(x, depth1, depth2, dim='2d', first_batch_norm=True, stride=1, training=True, bottleneck=True, padding='SAME'): """Applies residual function for RevNet. Args: x: input tensor depth1: Number of output channels for the first and second conv layers. depth2: Number of output channels for the third conv layer. dim: '2d' if 2-dimensional, '3d' if 3-dimensional. first_batch_norm: Whether to keep the first batch norm layer or not. Typically used in the first RevNet block. stride: Stride for the first conv filter. Note that this particular RevNet architecture only varies the stride for the first conv filter. The stride for the second conv filter is always set to 1. training: True for train phase, False for eval phase. bottleneck: If true, apply bottleneck 1x1 down/up sampling. padding: Padding for each conv layer. Returns: Output tensor after applying residual function for RevNet. """ conv = CONFIG[dim]['conv'] with tf.variable_scope('f', reuse=tf.AUTO_REUSE): if first_batch_norm: net = tf.layers.batch_normalization(x, training=training) net = tf.nn.relu(net) else: net = x if bottleneck: net = conv(net, depth1, 1, strides=stride, padding=padding, activation=None) net = tf.layers.batch_normalization(net, training=training) net = tf.nn.relu(net) net = conv(net, depth1, 3, strides=1, padding=padding, activation=None) net = tf.layers.batch_normalization(net, training=training) net = tf.nn.relu(net) net = conv(net, depth2, 1, strides=1, padding=padding, activation=None) else: net = conv(net, depth2, 3, strides=stride, padding=padding, activation=None) net = tf.layers.batch_normalization(x, training=training) net = tf.nn.relu(net) net = conv(net, depth2, 3, strides=stride, padding=padding, activation=None) return net
python
def f(x, depth1, depth2, dim='2d', first_batch_norm=True, stride=1, training=True, bottleneck=True, padding='SAME'): """Applies residual function for RevNet. Args: x: input tensor depth1: Number of output channels for the first and second conv layers. depth2: Number of output channels for the third conv layer. dim: '2d' if 2-dimensional, '3d' if 3-dimensional. first_batch_norm: Whether to keep the first batch norm layer or not. Typically used in the first RevNet block. stride: Stride for the first conv filter. Note that this particular RevNet architecture only varies the stride for the first conv filter. The stride for the second conv filter is always set to 1. training: True for train phase, False for eval phase. bottleneck: If true, apply bottleneck 1x1 down/up sampling. padding: Padding for each conv layer. Returns: Output tensor after applying residual function for RevNet. """ conv = CONFIG[dim]['conv'] with tf.variable_scope('f', reuse=tf.AUTO_REUSE): if first_batch_norm: net = tf.layers.batch_normalization(x, training=training) net = tf.nn.relu(net) else: net = x if bottleneck: net = conv(net, depth1, 1, strides=stride, padding=padding, activation=None) net = tf.layers.batch_normalization(net, training=training) net = tf.nn.relu(net) net = conv(net, depth1, 3, strides=1, padding=padding, activation=None) net = tf.layers.batch_normalization(net, training=training) net = tf.nn.relu(net) net = conv(net, depth2, 1, strides=1, padding=padding, activation=None) else: net = conv(net, depth2, 3, strides=stride, padding=padding, activation=None) net = tf.layers.batch_normalization(x, training=training) net = tf.nn.relu(net) net = conv(net, depth2, 3, strides=stride, padding=padding, activation=None) return net
[ "def", "f", "(", "x", ",", "depth1", ",", "depth2", ",", "dim", "=", "'2d'", ",", "first_batch_norm", "=", "True", ",", "stride", "=", "1", ",", "training", "=", "True", ",", "bottleneck", "=", "True", ",", "padding", "=", "'SAME'", ")", ":", "conv", "=", "CONFIG", "[", "dim", "]", "[", "'conv'", "]", "with", "tf", ".", "variable_scope", "(", "'f'", ",", "reuse", "=", "tf", ".", "AUTO_REUSE", ")", ":", "if", "first_batch_norm", ":", "net", "=", "tf", ".", "layers", ".", "batch_normalization", "(", "x", ",", "training", "=", "training", ")", "net", "=", "tf", ".", "nn", ".", "relu", "(", "net", ")", "else", ":", "net", "=", "x", "if", "bottleneck", ":", "net", "=", "conv", "(", "net", ",", "depth1", ",", "1", ",", "strides", "=", "stride", ",", "padding", "=", "padding", ",", "activation", "=", "None", ")", "net", "=", "tf", ".", "layers", ".", "batch_normalization", "(", "net", ",", "training", "=", "training", ")", "net", "=", "tf", ".", "nn", ".", "relu", "(", "net", ")", "net", "=", "conv", "(", "net", ",", "depth1", ",", "3", ",", "strides", "=", "1", ",", "padding", "=", "padding", ",", "activation", "=", "None", ")", "net", "=", "tf", ".", "layers", ".", "batch_normalization", "(", "net", ",", "training", "=", "training", ")", "net", "=", "tf", ".", "nn", ".", "relu", "(", "net", ")", "net", "=", "conv", "(", "net", ",", "depth2", ",", "1", ",", "strides", "=", "1", ",", "padding", "=", "padding", ",", "activation", "=", "None", ")", "else", ":", "net", "=", "conv", "(", "net", ",", "depth2", ",", "3", ",", "strides", "=", "stride", ",", "padding", "=", "padding", ",", "activation", "=", "None", ")", "net", "=", "tf", ".", "layers", ".", "batch_normalization", "(", "x", ",", "training", "=", "training", ")", "net", "=", "tf", ".", "nn", ".", "relu", "(", "net", ")", "net", "=", "conv", "(", "net", ",", "depth2", ",", "3", ",", "strides", "=", "stride", ",", "padding", "=", "padding", ",", "activation", "=", "None", ")", "return", "net" ]
Applies residual function for RevNet. Args: x: input tensor depth1: Number of output channels for the first and second conv layers. depth2: Number of output channels for the third conv layer. dim: '2d' if 2-dimensional, '3d' if 3-dimensional. first_batch_norm: Whether to keep the first batch norm layer or not. Typically used in the first RevNet block. stride: Stride for the first conv filter. Note that this particular RevNet architecture only varies the stride for the first conv filter. The stride for the second conv filter is always set to 1. training: True for train phase, False for eval phase. bottleneck: If true, apply bottleneck 1x1 down/up sampling. padding: Padding for each conv layer. Returns: Output tensor after applying residual function for RevNet.
[ "Applies", "residual", "function", "for", "RevNet", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/revnet.py#L72-L122
21,650
tensorflow/tensor2tensor
tensor2tensor/models/revnet.py
downsample_bottleneck
def downsample_bottleneck(x, output_channels, dim='2d', stride=1, scope='h'): """Downsamples 'x' by `stride` using a 1x1 convolution filter. Args: x: input tensor of size [N, H, W, C] output_channels: Desired number of output channels. dim: '2d' if 2-dimensional, '3d' if 3-dimensional. stride: What stride to use. Usually 1 or 2. scope: Optional variable scope. Returns: A downsampled tensor of size [N, H/2, W/2, output_channels] if stride is 2, else returns a tensor of size [N, H, W, output_channels] if stride is 1. """ conv = CONFIG[dim]['conv'] with tf.variable_scope(scope): x = conv(x, output_channels, 1, strides=stride, padding='SAME', activation=None) return x
python
def downsample_bottleneck(x, output_channels, dim='2d', stride=1, scope='h'): """Downsamples 'x' by `stride` using a 1x1 convolution filter. Args: x: input tensor of size [N, H, W, C] output_channels: Desired number of output channels. dim: '2d' if 2-dimensional, '3d' if 3-dimensional. stride: What stride to use. Usually 1 or 2. scope: Optional variable scope. Returns: A downsampled tensor of size [N, H/2, W/2, output_channels] if stride is 2, else returns a tensor of size [N, H, W, output_channels] if stride is 1. """ conv = CONFIG[dim]['conv'] with tf.variable_scope(scope): x = conv(x, output_channels, 1, strides=stride, padding='SAME', activation=None) return x
[ "def", "downsample_bottleneck", "(", "x", ",", "output_channels", ",", "dim", "=", "'2d'", ",", "stride", "=", "1", ",", "scope", "=", "'h'", ")", ":", "conv", "=", "CONFIG", "[", "dim", "]", "[", "'conv'", "]", "with", "tf", ".", "variable_scope", "(", "scope", ")", ":", "x", "=", "conv", "(", "x", ",", "output_channels", ",", "1", ",", "strides", "=", "stride", ",", "padding", "=", "'SAME'", ",", "activation", "=", "None", ")", "return", "x" ]
Downsamples 'x' by `stride` using a 1x1 convolution filter. Args: x: input tensor of size [N, H, W, C] output_channels: Desired number of output channels. dim: '2d' if 2-dimensional, '3d' if 3-dimensional. stride: What stride to use. Usually 1 or 2. scope: Optional variable scope. Returns: A downsampled tensor of size [N, H/2, W/2, output_channels] if stride is 2, else returns a tensor of size [N, H, W, output_channels] if stride is 1.
[ "Downsamples", "x", "by", "stride", "using", "a", "1x1", "convolution", "filter", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/revnet.py#L125-L144
21,651
tensorflow/tensor2tensor
tensor2tensor/models/revnet.py
downsample_residual
def downsample_residual(x, output_channels, dim='2d', stride=1, scope='h'): """Downsamples 'x' by `stride` using average pooling. Args: x: input tensor of size [N, H, W, C] output_channels: Desired number of output channels. dim: '2d' if 2-dimensional, '3d' if 3-dimensional. stride: What stride to use. Usually 1 or 2. scope: Optional variable scope. Returns: A downsampled tensor of size [N, H/2, W/2, output_channels] if stride is 2, else returns a tensor of size [N, H, W, output_channels] if stride is 1. """ with tf.variable_scope(scope): if stride > 1: avg_pool = CONFIG[dim]['avg_pool'] x = avg_pool(x, pool_size=(stride, stride), strides=(stride, stride), padding='VALID') input_channels = tf.shape(x)[3] diff = output_channels - input_channels x = tf.pad( x, [[0, 0], [0, 0], [0, 0], [diff // 2, diff // 2]]) return x
python
def downsample_residual(x, output_channels, dim='2d', stride=1, scope='h'): """Downsamples 'x' by `stride` using average pooling. Args: x: input tensor of size [N, H, W, C] output_channels: Desired number of output channels. dim: '2d' if 2-dimensional, '3d' if 3-dimensional. stride: What stride to use. Usually 1 or 2. scope: Optional variable scope. Returns: A downsampled tensor of size [N, H/2, W/2, output_channels] if stride is 2, else returns a tensor of size [N, H, W, output_channels] if stride is 1. """ with tf.variable_scope(scope): if stride > 1: avg_pool = CONFIG[dim]['avg_pool'] x = avg_pool(x, pool_size=(stride, stride), strides=(stride, stride), padding='VALID') input_channels = tf.shape(x)[3] diff = output_channels - input_channels x = tf.pad( x, [[0, 0], [0, 0], [0, 0], [diff // 2, diff // 2]]) return x
[ "def", "downsample_residual", "(", "x", ",", "output_channels", ",", "dim", "=", "'2d'", ",", "stride", "=", "1", ",", "scope", "=", "'h'", ")", ":", "with", "tf", ".", "variable_scope", "(", "scope", ")", ":", "if", "stride", ">", "1", ":", "avg_pool", "=", "CONFIG", "[", "dim", "]", "[", "'avg_pool'", "]", "x", "=", "avg_pool", "(", "x", ",", "pool_size", "=", "(", "stride", ",", "stride", ")", ",", "strides", "=", "(", "stride", ",", "stride", ")", ",", "padding", "=", "'VALID'", ")", "input_channels", "=", "tf", ".", "shape", "(", "x", ")", "[", "3", "]", "diff", "=", "output_channels", "-", "input_channels", "x", "=", "tf", ".", "pad", "(", "x", ",", "[", "[", "0", ",", "0", "]", ",", "[", "0", ",", "0", "]", ",", "[", "0", ",", "0", "]", ",", "[", "diff", "//", "2", ",", "diff", "//", "2", "]", "]", ")", "return", "x" ]
Downsamples 'x' by `stride` using average pooling. Args: x: input tensor of size [N, H, W, C] output_channels: Desired number of output channels. dim: '2d' if 2-dimensional, '3d' if 3-dimensional. stride: What stride to use. Usually 1 or 2. scope: Optional variable scope. Returns: A downsampled tensor of size [N, H/2, W/2, output_channels] if stride is 2, else returns a tensor of size [N, H, W, output_channels] if stride is 1.
[ "Downsamples", "x", "by", "stride", "using", "average", "pooling", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/revnet.py#L147-L175
21,652
tensorflow/tensor2tensor
tensor2tensor/models/revnet.py
init
def init(images, num_channels, dim='2d', stride=2, kernel_size=7, maxpool=True, training=True, scope='init'): """Standard ResNet initial block used as first RevNet block. Args: images: [N, H, W, 3] tensor of input images to the model. num_channels: Output depth of convolutional layer in initial block. dim: '2d' if 2-dimensional, '3d' if 3-dimensional. stride: stride for the convolution and pool layer. kernel_size: Size of the initial convolution filter maxpool: If true, apply a maxpool after the convolution training: True for train phase, False for eval phase. scope: Optional scope for the init block. Returns: Two [N, H, W, C] output activations from input images. """ conv = CONFIG[dim]['conv'] pool = CONFIG[dim]['max_pool'] with tf.variable_scope(scope): net = conv(images, num_channels, kernel_size, strides=stride, padding='SAME', activation=None) net = tf.layers.batch_normalization(net, training=training) net = tf.nn.relu(net) if maxpool: net = pool(net, pool_size=3, strides=stride) x1, x2 = tf.split(net, 2, axis=CONFIG[dim]['split_axis']) return x1, x2
python
def init(images, num_channels, dim='2d', stride=2, kernel_size=7, maxpool=True, training=True, scope='init'): """Standard ResNet initial block used as first RevNet block. Args: images: [N, H, W, 3] tensor of input images to the model. num_channels: Output depth of convolutional layer in initial block. dim: '2d' if 2-dimensional, '3d' if 3-dimensional. stride: stride for the convolution and pool layer. kernel_size: Size of the initial convolution filter maxpool: If true, apply a maxpool after the convolution training: True for train phase, False for eval phase. scope: Optional scope for the init block. Returns: Two [N, H, W, C] output activations from input images. """ conv = CONFIG[dim]['conv'] pool = CONFIG[dim]['max_pool'] with tf.variable_scope(scope): net = conv(images, num_channels, kernel_size, strides=stride, padding='SAME', activation=None) net = tf.layers.batch_normalization(net, training=training) net = tf.nn.relu(net) if maxpool: net = pool(net, pool_size=3, strides=stride) x1, x2 = tf.split(net, 2, axis=CONFIG[dim]['split_axis']) return x1, x2
[ "def", "init", "(", "images", ",", "num_channels", ",", "dim", "=", "'2d'", ",", "stride", "=", "2", ",", "kernel_size", "=", "7", ",", "maxpool", "=", "True", ",", "training", "=", "True", ",", "scope", "=", "'init'", ")", ":", "conv", "=", "CONFIG", "[", "dim", "]", "[", "'conv'", "]", "pool", "=", "CONFIG", "[", "dim", "]", "[", "'max_pool'", "]", "with", "tf", ".", "variable_scope", "(", "scope", ")", ":", "net", "=", "conv", "(", "images", ",", "num_channels", ",", "kernel_size", ",", "strides", "=", "stride", ",", "padding", "=", "'SAME'", ",", "activation", "=", "None", ")", "net", "=", "tf", ".", "layers", ".", "batch_normalization", "(", "net", ",", "training", "=", "training", ")", "net", "=", "tf", ".", "nn", ".", "relu", "(", "net", ")", "if", "maxpool", ":", "net", "=", "pool", "(", "net", ",", "pool_size", "=", "3", ",", "strides", "=", "stride", ")", "x1", ",", "x2", "=", "tf", ".", "split", "(", "net", ",", "2", ",", "axis", "=", "CONFIG", "[", "dim", "]", "[", "'split_axis'", "]", ")", "return", "x1", ",", "x2" ]
Standard ResNet initial block used as first RevNet block. Args: images: [N, H, W, 3] tensor of input images to the model. num_channels: Output depth of convolutional layer in initial block. dim: '2d' if 2-dimensional, '3d' if 3-dimensional. stride: stride for the convolution and pool layer. kernel_size: Size of the initial convolution filter maxpool: If true, apply a maxpool after the convolution training: True for train phase, False for eval phase. scope: Optional scope for the init block. Returns: Two [N, H, W, C] output activations from input images.
[ "Standard", "ResNet", "initial", "block", "used", "as", "first", "RevNet", "block", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/revnet.py#L178-L205
21,653
tensorflow/tensor2tensor
tensor2tensor/models/revnet.py
unit
def unit(x1, x2, block_num, depth, num_layers, dim='2d', bottleneck=True, first_batch_norm=True, stride=1, training=True): """Implements bottleneck RevNet unit from authors' RevNet architecture. Args: x1: [N, H, W, C] tensor of network activations. x2: [N, H, W, C] tensor of network activations. block_num: integer ID of block depth: First depth in bottleneck residual unit. num_layers: Number of layers in the RevNet block. dim: '2d' if 2-dimensional, '3d' if 3-dimensional. bottleneck: Should a bottleneck layer be used. first_batch_norm: Whether to keep the first batch norm layer or not. Typically used in the first RevNet block. stride: Stride for the residual function. training: True for train phase, False for eval phase. Returns: Two [N, H, W, C] output activation tensors. """ scope_name = 'unit_%d' % block_num if bottleneck: depth1 = depth depth2 = depth * 4 else: depth1 = depth2 = depth residual = wrapped_partial(f, depth1=depth1, depth2=depth2, dim=dim, training=training, bottleneck=bottleneck) with tf.variable_scope(scope_name): downsample = downsample_bottleneck if bottleneck else downsample_residual # Manual implementation of downsampling with tf.variable_scope('downsampling'): with tf.variable_scope('x1'): hx1 = downsample(x1, depth2, dim=dim, stride=stride) fx2 = residual(x2, stride=stride, first_batch_norm=first_batch_norm) x1 = hx1 + fx2 with tf.variable_scope('x2'): hx2 = downsample(x2, depth2, dim=dim, stride=stride) fx1 = residual(x1) x2 = hx2 + fx1 # Full block using memory-efficient rev_block implementation. with tf.variable_scope('full_block'): x1, x2 = tf.contrib.layers.rev_block(x1, x2, residual, residual, num_layers=num_layers) return x1, x2
python
def unit(x1, x2, block_num, depth, num_layers, dim='2d', bottleneck=True, first_batch_norm=True, stride=1, training=True): """Implements bottleneck RevNet unit from authors' RevNet architecture. Args: x1: [N, H, W, C] tensor of network activations. x2: [N, H, W, C] tensor of network activations. block_num: integer ID of block depth: First depth in bottleneck residual unit. num_layers: Number of layers in the RevNet block. dim: '2d' if 2-dimensional, '3d' if 3-dimensional. bottleneck: Should a bottleneck layer be used. first_batch_norm: Whether to keep the first batch norm layer or not. Typically used in the first RevNet block. stride: Stride for the residual function. training: True for train phase, False for eval phase. Returns: Two [N, H, W, C] output activation tensors. """ scope_name = 'unit_%d' % block_num if bottleneck: depth1 = depth depth2 = depth * 4 else: depth1 = depth2 = depth residual = wrapped_partial(f, depth1=depth1, depth2=depth2, dim=dim, training=training, bottleneck=bottleneck) with tf.variable_scope(scope_name): downsample = downsample_bottleneck if bottleneck else downsample_residual # Manual implementation of downsampling with tf.variable_scope('downsampling'): with tf.variable_scope('x1'): hx1 = downsample(x1, depth2, dim=dim, stride=stride) fx2 = residual(x2, stride=stride, first_batch_norm=first_batch_norm) x1 = hx1 + fx2 with tf.variable_scope('x2'): hx2 = downsample(x2, depth2, dim=dim, stride=stride) fx1 = residual(x1) x2 = hx2 + fx1 # Full block using memory-efficient rev_block implementation. with tf.variable_scope('full_block'): x1, x2 = tf.contrib.layers.rev_block(x1, x2, residual, residual, num_layers=num_layers) return x1, x2
[ "def", "unit", "(", "x1", ",", "x2", ",", "block_num", ",", "depth", ",", "num_layers", ",", "dim", "=", "'2d'", ",", "bottleneck", "=", "True", ",", "first_batch_norm", "=", "True", ",", "stride", "=", "1", ",", "training", "=", "True", ")", ":", "scope_name", "=", "'unit_%d'", "%", "block_num", "if", "bottleneck", ":", "depth1", "=", "depth", "depth2", "=", "depth", "*", "4", "else", ":", "depth1", "=", "depth2", "=", "depth", "residual", "=", "wrapped_partial", "(", "f", ",", "depth1", "=", "depth1", ",", "depth2", "=", "depth2", ",", "dim", "=", "dim", ",", "training", "=", "training", ",", "bottleneck", "=", "bottleneck", ")", "with", "tf", ".", "variable_scope", "(", "scope_name", ")", ":", "downsample", "=", "downsample_bottleneck", "if", "bottleneck", "else", "downsample_residual", "# Manual implementation of downsampling", "with", "tf", ".", "variable_scope", "(", "'downsampling'", ")", ":", "with", "tf", ".", "variable_scope", "(", "'x1'", ")", ":", "hx1", "=", "downsample", "(", "x1", ",", "depth2", ",", "dim", "=", "dim", ",", "stride", "=", "stride", ")", "fx2", "=", "residual", "(", "x2", ",", "stride", "=", "stride", ",", "first_batch_norm", "=", "first_batch_norm", ")", "x1", "=", "hx1", "+", "fx2", "with", "tf", ".", "variable_scope", "(", "'x2'", ")", ":", "hx2", "=", "downsample", "(", "x2", ",", "depth2", ",", "dim", "=", "dim", ",", "stride", "=", "stride", ")", "fx1", "=", "residual", "(", "x1", ")", "x2", "=", "hx2", "+", "fx1", "# Full block using memory-efficient rev_block implementation.", "with", "tf", ".", "variable_scope", "(", "'full_block'", ")", ":", "x1", ",", "x2", "=", "tf", ".", "contrib", ".", "layers", ".", "rev_block", "(", "x1", ",", "x2", ",", "residual", ",", "residual", ",", "num_layers", "=", "num_layers", ")", "return", "x1", ",", "x2" ]
Implements bottleneck RevNet unit from authors' RevNet architecture. Args: x1: [N, H, W, C] tensor of network activations. x2: [N, H, W, C] tensor of network activations. block_num: integer ID of block depth: First depth in bottleneck residual unit. num_layers: Number of layers in the RevNet block. dim: '2d' if 2-dimensional, '3d' if 3-dimensional. bottleneck: Should a bottleneck layer be used. first_batch_norm: Whether to keep the first batch norm layer or not. Typically used in the first RevNet block. stride: Stride for the residual function. training: True for train phase, False for eval phase. Returns: Two [N, H, W, C] output activation tensors.
[ "Implements", "bottleneck", "RevNet", "unit", "from", "authors", "RevNet", "architecture", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/revnet.py#L208-L258
21,654
tensorflow/tensor2tensor
tensor2tensor/models/revnet.py
final_block
def final_block(x1, x2, dim='2d', training=True, scope='final_block'): """Converts activations from last RevNet block to pre-logits. Args: x1: [NxHxWxC] tensor of network activations. x2: [NxHxWxC] tensor of network activations. dim: '2d' if 2-dimensional, '3d' if 3-dimensional. training: True for train phase, False for eval phase. scope: Optional variable scope for the final block. Returns: [N, hidden_dim] pre-logits tensor from activations x1 and x2. """ # Final batch norm and relu with tf.variable_scope(scope): y = tf.concat([x1, x2], axis=CONFIG[dim]['split_axis']) y = tf.layers.batch_normalization(y, training=training) y = tf.nn.relu(y) # Global average pooling net = tf.reduce_mean(y, CONFIG[dim]['reduction_dimensions'], name='final_pool', keep_dims=True) return net
python
def final_block(x1, x2, dim='2d', training=True, scope='final_block'): """Converts activations from last RevNet block to pre-logits. Args: x1: [NxHxWxC] tensor of network activations. x2: [NxHxWxC] tensor of network activations. dim: '2d' if 2-dimensional, '3d' if 3-dimensional. training: True for train phase, False for eval phase. scope: Optional variable scope for the final block. Returns: [N, hidden_dim] pre-logits tensor from activations x1 and x2. """ # Final batch norm and relu with tf.variable_scope(scope): y = tf.concat([x1, x2], axis=CONFIG[dim]['split_axis']) y = tf.layers.batch_normalization(y, training=training) y = tf.nn.relu(y) # Global average pooling net = tf.reduce_mean(y, CONFIG[dim]['reduction_dimensions'], name='final_pool', keep_dims=True) return net
[ "def", "final_block", "(", "x1", ",", "x2", ",", "dim", "=", "'2d'", ",", "training", "=", "True", ",", "scope", "=", "'final_block'", ")", ":", "# Final batch norm and relu", "with", "tf", ".", "variable_scope", "(", "scope", ")", ":", "y", "=", "tf", ".", "concat", "(", "[", "x1", ",", "x2", "]", ",", "axis", "=", "CONFIG", "[", "dim", "]", "[", "'split_axis'", "]", ")", "y", "=", "tf", ".", "layers", ".", "batch_normalization", "(", "y", ",", "training", "=", "training", ")", "y", "=", "tf", ".", "nn", ".", "relu", "(", "y", ")", "# Global average pooling", "net", "=", "tf", ".", "reduce_mean", "(", "y", ",", "CONFIG", "[", "dim", "]", "[", "'reduction_dimensions'", "]", ",", "name", "=", "'final_pool'", ",", "keep_dims", "=", "True", ")", "return", "net" ]
Converts activations from last RevNet block to pre-logits. Args: x1: [NxHxWxC] tensor of network activations. x2: [NxHxWxC] tensor of network activations. dim: '2d' if 2-dimensional, '3d' if 3-dimensional. training: True for train phase, False for eval phase. scope: Optional variable scope for the final block. Returns: [N, hidden_dim] pre-logits tensor from activations x1 and x2.
[ "Converts", "activations", "from", "last", "RevNet", "block", "to", "pre", "-", "logits", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/revnet.py#L261-L285
21,655
tensorflow/tensor2tensor
tensor2tensor/models/revnet.py
revnet
def revnet(inputs, hparams, reuse=None): """Uses Tensor2Tensor memory optimized RevNet block to build a RevNet. Args: inputs: [NxHxWx3] tensor of input images to the model. hparams: HParams object that contains the following parameters, in addition to the parameters contained in the basic_params1() object in the common_hparams module: num_channels_first - A Python list where each element represents the depth of the first and third convolutional layers in the bottleneck residual unit for a given block. num_channels_second - A Python list where each element represents the depth of the second convolutional layer in the bottleneck residual unit for a given block. num_layers_per_block - A Python list containing the number of RevNet layers for each block. first_batch_norm - A Python list containing booleans representing the presence of a batch norm layer at the beginning of a given block. strides - A Python list containing integers representing the stride of the residual function for each block. num_channels_init_block - An integer representing the number of channels for the convolutional layer in the initial block. dimension - A string (either "2d" or "3d") that decides if the RevNet is 2-dimensional or 3-dimensional. reuse: Whether to reuse the default variable scope. Returns: [batch_size, hidden_dim] pre-logits tensor from the bottleneck RevNet. """ training = hparams.mode == tf.estimator.ModeKeys.TRAIN with tf.variable_scope('RevNet', reuse=reuse): x1, x2 = init(inputs, num_channels=hparams.num_channels_init_block, dim=hparams.dim, kernel_size=hparams.init_kernel_size, maxpool=hparams.init_maxpool, stride=hparams.init_stride, training=training) for block_num in range(len(hparams.num_layers_per_block)): block = {'depth': hparams.num_channels[block_num], 'num_layers': hparams.num_layers_per_block[block_num], 'first_batch_norm': hparams.first_batch_norm[block_num], 'stride': hparams.strides[block_num], 'bottleneck': hparams.bottleneck} x1, x2 = unit(x1, x2, block_num, dim=hparams.dim, training=training, **block) pre_logits = final_block(x1, x2, dim=hparams.dim, training=training) return pre_logits
python
def revnet(inputs, hparams, reuse=None): """Uses Tensor2Tensor memory optimized RevNet block to build a RevNet. Args: inputs: [NxHxWx3] tensor of input images to the model. hparams: HParams object that contains the following parameters, in addition to the parameters contained in the basic_params1() object in the common_hparams module: num_channels_first - A Python list where each element represents the depth of the first and third convolutional layers in the bottleneck residual unit for a given block. num_channels_second - A Python list where each element represents the depth of the second convolutional layer in the bottleneck residual unit for a given block. num_layers_per_block - A Python list containing the number of RevNet layers for each block. first_batch_norm - A Python list containing booleans representing the presence of a batch norm layer at the beginning of a given block. strides - A Python list containing integers representing the stride of the residual function for each block. num_channels_init_block - An integer representing the number of channels for the convolutional layer in the initial block. dimension - A string (either "2d" or "3d") that decides if the RevNet is 2-dimensional or 3-dimensional. reuse: Whether to reuse the default variable scope. Returns: [batch_size, hidden_dim] pre-logits tensor from the bottleneck RevNet. """ training = hparams.mode == tf.estimator.ModeKeys.TRAIN with tf.variable_scope('RevNet', reuse=reuse): x1, x2 = init(inputs, num_channels=hparams.num_channels_init_block, dim=hparams.dim, kernel_size=hparams.init_kernel_size, maxpool=hparams.init_maxpool, stride=hparams.init_stride, training=training) for block_num in range(len(hparams.num_layers_per_block)): block = {'depth': hparams.num_channels[block_num], 'num_layers': hparams.num_layers_per_block[block_num], 'first_batch_norm': hparams.first_batch_norm[block_num], 'stride': hparams.strides[block_num], 'bottleneck': hparams.bottleneck} x1, x2 = unit(x1, x2, block_num, dim=hparams.dim, training=training, **block) pre_logits = final_block(x1, x2, dim=hparams.dim, training=training) return pre_logits
[ "def", "revnet", "(", "inputs", ",", "hparams", ",", "reuse", "=", "None", ")", ":", "training", "=", "hparams", ".", "mode", "==", "tf", ".", "estimator", ".", "ModeKeys", ".", "TRAIN", "with", "tf", ".", "variable_scope", "(", "'RevNet'", ",", "reuse", "=", "reuse", ")", ":", "x1", ",", "x2", "=", "init", "(", "inputs", ",", "num_channels", "=", "hparams", ".", "num_channels_init_block", ",", "dim", "=", "hparams", ".", "dim", ",", "kernel_size", "=", "hparams", ".", "init_kernel_size", ",", "maxpool", "=", "hparams", ".", "init_maxpool", ",", "stride", "=", "hparams", ".", "init_stride", ",", "training", "=", "training", ")", "for", "block_num", "in", "range", "(", "len", "(", "hparams", ".", "num_layers_per_block", ")", ")", ":", "block", "=", "{", "'depth'", ":", "hparams", ".", "num_channels", "[", "block_num", "]", ",", "'num_layers'", ":", "hparams", ".", "num_layers_per_block", "[", "block_num", "]", ",", "'first_batch_norm'", ":", "hparams", ".", "first_batch_norm", "[", "block_num", "]", ",", "'stride'", ":", "hparams", ".", "strides", "[", "block_num", "]", ",", "'bottleneck'", ":", "hparams", ".", "bottleneck", "}", "x1", ",", "x2", "=", "unit", "(", "x1", ",", "x2", ",", "block_num", ",", "dim", "=", "hparams", ".", "dim", ",", "training", "=", "training", ",", "*", "*", "block", ")", "pre_logits", "=", "final_block", "(", "x1", ",", "x2", ",", "dim", "=", "hparams", ".", "dim", ",", "training", "=", "training", ")", "return", "pre_logits" ]
Uses Tensor2Tensor memory optimized RevNet block to build a RevNet. Args: inputs: [NxHxWx3] tensor of input images to the model. hparams: HParams object that contains the following parameters, in addition to the parameters contained in the basic_params1() object in the common_hparams module: num_channels_first - A Python list where each element represents the depth of the first and third convolutional layers in the bottleneck residual unit for a given block. num_channels_second - A Python list where each element represents the depth of the second convolutional layer in the bottleneck residual unit for a given block. num_layers_per_block - A Python list containing the number of RevNet layers for each block. first_batch_norm - A Python list containing booleans representing the presence of a batch norm layer at the beginning of a given block. strides - A Python list containing integers representing the stride of the residual function for each block. num_channels_init_block - An integer representing the number of channels for the convolutional layer in the initial block. dimension - A string (either "2d" or "3d") that decides if the RevNet is 2-dimensional or 3-dimensional. reuse: Whether to reuse the default variable scope. Returns: [batch_size, hidden_dim] pre-logits tensor from the bottleneck RevNet.
[ "Uses", "Tensor2Tensor", "memory", "optimized", "RevNet", "block", "to", "build", "a", "RevNet", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/revnet.py#L288-L335
21,656
tensorflow/tensor2tensor
tensor2tensor/models/revnet.py
revnet_base
def revnet_base(): """Default hparams for Revnet.""" hparams = common_hparams.basic_params1() hparams.add_hparam('num_channels', [64, 128, 256, 416]) hparams.add_hparam('num_layers_per_block', [1, 1, 10, 1]) hparams.add_hparam('bottleneck', True) hparams.add_hparam('first_batch_norm', [False, True, True, True]) hparams.add_hparam('init_stride', 2) hparams.add_hparam('init_kernel_size', 7) hparams.add_hparam('init_maxpool', True) hparams.add_hparam('strides', [1, 2, 2, 2]) hparams.add_hparam('num_channels_init_block', 64) hparams.add_hparam('dim', '2d') # Variable init hparams.initializer = 'normal_unit_scaling' hparams.initializer_gain = 2. # Optimization hparams.optimizer = 'Momentum' hparams.optimizer_momentum_momentum = 0.9 hparams.optimizer_momentum_nesterov = True hparams.weight_decay = 1e-4 hparams.clip_grad_norm = 0.0 # (base_lr=0.1) * (batch_size=128*8 (on TPU, or 8 GPUs)=1024) / (256.) hparams.learning_rate = 0.4 hparams.learning_rate_decay_scheme = 'cosine' # For image_imagenet224, 120k training steps, which effectively makes this a # cosine decay (i.e. no cycles). hparams.learning_rate_cosine_cycle_steps = 120000 # Can run with a batch size of 128 with Problem ImageImagenet224 hparams.batch_size = 128 return hparams
python
def revnet_base(): """Default hparams for Revnet.""" hparams = common_hparams.basic_params1() hparams.add_hparam('num_channels', [64, 128, 256, 416]) hparams.add_hparam('num_layers_per_block', [1, 1, 10, 1]) hparams.add_hparam('bottleneck', True) hparams.add_hparam('first_batch_norm', [False, True, True, True]) hparams.add_hparam('init_stride', 2) hparams.add_hparam('init_kernel_size', 7) hparams.add_hparam('init_maxpool', True) hparams.add_hparam('strides', [1, 2, 2, 2]) hparams.add_hparam('num_channels_init_block', 64) hparams.add_hparam('dim', '2d') # Variable init hparams.initializer = 'normal_unit_scaling' hparams.initializer_gain = 2. # Optimization hparams.optimizer = 'Momentum' hparams.optimizer_momentum_momentum = 0.9 hparams.optimizer_momentum_nesterov = True hparams.weight_decay = 1e-4 hparams.clip_grad_norm = 0.0 # (base_lr=0.1) * (batch_size=128*8 (on TPU, or 8 GPUs)=1024) / (256.) hparams.learning_rate = 0.4 hparams.learning_rate_decay_scheme = 'cosine' # For image_imagenet224, 120k training steps, which effectively makes this a # cosine decay (i.e. no cycles). hparams.learning_rate_cosine_cycle_steps = 120000 # Can run with a batch size of 128 with Problem ImageImagenet224 hparams.batch_size = 128 return hparams
[ "def", "revnet_base", "(", ")", ":", "hparams", "=", "common_hparams", ".", "basic_params1", "(", ")", "hparams", ".", "add_hparam", "(", "'num_channels'", ",", "[", "64", ",", "128", ",", "256", ",", "416", "]", ")", "hparams", ".", "add_hparam", "(", "'num_layers_per_block'", ",", "[", "1", ",", "1", ",", "10", ",", "1", "]", ")", "hparams", ".", "add_hparam", "(", "'bottleneck'", ",", "True", ")", "hparams", ".", "add_hparam", "(", "'first_batch_norm'", ",", "[", "False", ",", "True", ",", "True", ",", "True", "]", ")", "hparams", ".", "add_hparam", "(", "'init_stride'", ",", "2", ")", "hparams", ".", "add_hparam", "(", "'init_kernel_size'", ",", "7", ")", "hparams", ".", "add_hparam", "(", "'init_maxpool'", ",", "True", ")", "hparams", ".", "add_hparam", "(", "'strides'", ",", "[", "1", ",", "2", ",", "2", ",", "2", "]", ")", "hparams", ".", "add_hparam", "(", "'num_channels_init_block'", ",", "64", ")", "hparams", ".", "add_hparam", "(", "'dim'", ",", "'2d'", ")", "# Variable init", "hparams", ".", "initializer", "=", "'normal_unit_scaling'", "hparams", ".", "initializer_gain", "=", "2.", "# Optimization", "hparams", ".", "optimizer", "=", "'Momentum'", "hparams", ".", "optimizer_momentum_momentum", "=", "0.9", "hparams", ".", "optimizer_momentum_nesterov", "=", "True", "hparams", ".", "weight_decay", "=", "1e-4", "hparams", ".", "clip_grad_norm", "=", "0.0", "# (base_lr=0.1) * (batch_size=128*8 (on TPU, or 8 GPUs)=1024) / (256.)", "hparams", ".", "learning_rate", "=", "0.4", "hparams", ".", "learning_rate_decay_scheme", "=", "'cosine'", "# For image_imagenet224, 120k training steps, which effectively makes this a", "# cosine decay (i.e. no cycles).", "hparams", ".", "learning_rate_cosine_cycle_steps", "=", "120000", "# Can run with a batch size of 128 with Problem ImageImagenet224", "hparams", ".", "batch_size", "=", "128", "return", "hparams" ]
Default hparams for Revnet.
[ "Default", "hparams", "for", "Revnet", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/revnet.py#L345-L378
21,657
tensorflow/tensor2tensor
tensor2tensor/models/revnet.py
revnet_range
def revnet_range(rhp): """Hyperparameters for tuning revnet.""" rhp.set_float('learning_rate', 0.05, 0.2, scale=rhp.LOG_SCALE) rhp.set_float('weight_decay', 1e-5, 1e-3, scale=rhp.LOG_SCALE) rhp.set_discrete('num_channels_init_block', [64, 128]) return rhp
python
def revnet_range(rhp): """Hyperparameters for tuning revnet.""" rhp.set_float('learning_rate', 0.05, 0.2, scale=rhp.LOG_SCALE) rhp.set_float('weight_decay', 1e-5, 1e-3, scale=rhp.LOG_SCALE) rhp.set_discrete('num_channels_init_block', [64, 128]) return rhp
[ "def", "revnet_range", "(", "rhp", ")", ":", "rhp", ".", "set_float", "(", "'learning_rate'", ",", "0.05", ",", "0.2", ",", "scale", "=", "rhp", ".", "LOG_SCALE", ")", "rhp", ".", "set_float", "(", "'weight_decay'", ",", "1e-5", ",", "1e-3", ",", "scale", "=", "rhp", ".", "LOG_SCALE", ")", "rhp", ".", "set_discrete", "(", "'num_channels_init_block'", ",", "[", "64", ",", "128", "]", ")", "return", "rhp" ]
Hyperparameters for tuning revnet.
[ "Hyperparameters", "for", "tuning", "revnet", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/revnet.py#L435-L440
21,658
tensorflow/tensor2tensor
tensor2tensor/models/video/basic_deterministic_params.py
next_frame_basic_deterministic
def next_frame_basic_deterministic(): """Basic 2-frame conv model.""" hparams = base.next_frame_base() hparams.video_num_input_frames = 4 hparams.video_num_target_frames = 1 hparams.hidden_size = 64 hparams.batch_size = 4 hparams.num_hidden_layers = 2 hparams.optimizer = "Adafactor" hparams.learning_rate_constant = 1.5 hparams.learning_rate_warmup_steps = 8000 hparams.learning_rate_schedule = "linear_warmup * constant * rsqrt_decay" hparams.label_smoothing = 0.0 hparams.initializer = "uniform_unit_scaling" hparams.initializer_gain = 1.3 hparams.weight_decay = 0.0 hparams.clip_grad_norm = 1.0 hparams.dropout = 0.1 hparams.add_hparam("residual_dropout", 0.5) hparams.add_hparam("num_compress_steps", 6) hparams.add_hparam("filter_double_steps", 2) hparams.add_hparam("pixel_sampling_temperature", 0.0) hparams.add_hparam("concat_internal_states", False) hparams.add_hparam("do_autoregressive_rnn", False) hparams.add_hparam("autoregressive_rnn_lookback", 8) hparams.add_hparam("autoregressive_rnn_warmup_steps", 8000) hparams.add_hparam("activation_fn", "relu") hparams.bottom["inputs"] = modalities.video_identity_bottom hparams.bottom["targets"] = modalities.video_identity_bottom return hparams
python
def next_frame_basic_deterministic(): """Basic 2-frame conv model.""" hparams = base.next_frame_base() hparams.video_num_input_frames = 4 hparams.video_num_target_frames = 1 hparams.hidden_size = 64 hparams.batch_size = 4 hparams.num_hidden_layers = 2 hparams.optimizer = "Adafactor" hparams.learning_rate_constant = 1.5 hparams.learning_rate_warmup_steps = 8000 hparams.learning_rate_schedule = "linear_warmup * constant * rsqrt_decay" hparams.label_smoothing = 0.0 hparams.initializer = "uniform_unit_scaling" hparams.initializer_gain = 1.3 hparams.weight_decay = 0.0 hparams.clip_grad_norm = 1.0 hparams.dropout = 0.1 hparams.add_hparam("residual_dropout", 0.5) hparams.add_hparam("num_compress_steps", 6) hparams.add_hparam("filter_double_steps", 2) hparams.add_hparam("pixel_sampling_temperature", 0.0) hparams.add_hparam("concat_internal_states", False) hparams.add_hparam("do_autoregressive_rnn", False) hparams.add_hparam("autoregressive_rnn_lookback", 8) hparams.add_hparam("autoregressive_rnn_warmup_steps", 8000) hparams.add_hparam("activation_fn", "relu") hparams.bottom["inputs"] = modalities.video_identity_bottom hparams.bottom["targets"] = modalities.video_identity_bottom return hparams
[ "def", "next_frame_basic_deterministic", "(", ")", ":", "hparams", "=", "base", ".", "next_frame_base", "(", ")", "hparams", ".", "video_num_input_frames", "=", "4", "hparams", ".", "video_num_target_frames", "=", "1", "hparams", ".", "hidden_size", "=", "64", "hparams", ".", "batch_size", "=", "4", "hparams", ".", "num_hidden_layers", "=", "2", "hparams", ".", "optimizer", "=", "\"Adafactor\"", "hparams", ".", "learning_rate_constant", "=", "1.5", "hparams", ".", "learning_rate_warmup_steps", "=", "8000", "hparams", ".", "learning_rate_schedule", "=", "\"linear_warmup * constant * rsqrt_decay\"", "hparams", ".", "label_smoothing", "=", "0.0", "hparams", ".", "initializer", "=", "\"uniform_unit_scaling\"", "hparams", ".", "initializer_gain", "=", "1.3", "hparams", ".", "weight_decay", "=", "0.0", "hparams", ".", "clip_grad_norm", "=", "1.0", "hparams", ".", "dropout", "=", "0.1", "hparams", ".", "add_hparam", "(", "\"residual_dropout\"", ",", "0.5", ")", "hparams", ".", "add_hparam", "(", "\"num_compress_steps\"", ",", "6", ")", "hparams", ".", "add_hparam", "(", "\"filter_double_steps\"", ",", "2", ")", "hparams", ".", "add_hparam", "(", "\"pixel_sampling_temperature\"", ",", "0.0", ")", "hparams", ".", "add_hparam", "(", "\"concat_internal_states\"", ",", "False", ")", "hparams", ".", "add_hparam", "(", "\"do_autoregressive_rnn\"", ",", "False", ")", "hparams", ".", "add_hparam", "(", "\"autoregressive_rnn_lookback\"", ",", "8", ")", "hparams", ".", "add_hparam", "(", "\"autoregressive_rnn_warmup_steps\"", ",", "8000", ")", "hparams", ".", "add_hparam", "(", "\"activation_fn\"", ",", "\"relu\"", ")", "hparams", ".", "bottom", "[", "\"inputs\"", "]", "=", "modalities", ".", "video_identity_bottom", "hparams", ".", "bottom", "[", "\"targets\"", "]", "=", "modalities", ".", "video_identity_bottom", "return", "hparams" ]
Basic 2-frame conv model.
[ "Basic", "2", "-", "frame", "conv", "model", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/video/basic_deterministic_params.py#L27-L56
21,659
tensorflow/tensor2tensor
tensor2tensor/models/video/basic_deterministic_params.py
next_frame_pixel_noise
def next_frame_pixel_noise(): """Basic 2-frame conv model with pixel noise.""" hparams = next_frame_basic_deterministic() hparams.add_hparam("video_modality_input_noise", 0.05) hparams.bottom["inputs"] = modalities.video_pixel_noise_bottom hparams.top["inputs"] = modalities.video_top return hparams
python
def next_frame_pixel_noise(): """Basic 2-frame conv model with pixel noise.""" hparams = next_frame_basic_deterministic() hparams.add_hparam("video_modality_input_noise", 0.05) hparams.bottom["inputs"] = modalities.video_pixel_noise_bottom hparams.top["inputs"] = modalities.video_top return hparams
[ "def", "next_frame_pixel_noise", "(", ")", ":", "hparams", "=", "next_frame_basic_deterministic", "(", ")", "hparams", ".", "add_hparam", "(", "\"video_modality_input_noise\"", ",", "0.05", ")", "hparams", ".", "bottom", "[", "\"inputs\"", "]", "=", "modalities", ".", "video_pixel_noise_bottom", "hparams", ".", "top", "[", "\"inputs\"", "]", "=", "modalities", ".", "video_top", "return", "hparams" ]
Basic 2-frame conv model with pixel noise.
[ "Basic", "2", "-", "frame", "conv", "model", "with", "pixel", "noise", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/video/basic_deterministic_params.py#L60-L66
21,660
tensorflow/tensor2tensor
tensor2tensor/models/video/basic_deterministic_params.py
next_frame_sampling
def next_frame_sampling(): """Basic conv model with scheduled sampling.""" hparams = next_frame_basic_deterministic() hparams.scheduled_sampling_mode = "prob_inverse_exp" hparams.scheduled_sampling_max_prob = 1.0 hparams.scheduled_sampling_decay_steps = 10000 return hparams
python
def next_frame_sampling(): """Basic conv model with scheduled sampling.""" hparams = next_frame_basic_deterministic() hparams.scheduled_sampling_mode = "prob_inverse_exp" hparams.scheduled_sampling_max_prob = 1.0 hparams.scheduled_sampling_decay_steps = 10000 return hparams
[ "def", "next_frame_sampling", "(", ")", ":", "hparams", "=", "next_frame_basic_deterministic", "(", ")", "hparams", ".", "scheduled_sampling_mode", "=", "\"prob_inverse_exp\"", "hparams", ".", "scheduled_sampling_max_prob", "=", "1.0", "hparams", ".", "scheduled_sampling_decay_steps", "=", "10000", "return", "hparams" ]
Basic conv model with scheduled sampling.
[ "Basic", "conv", "model", "with", "scheduled", "sampling", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/video/basic_deterministic_params.py#L79-L85
21,661
tensorflow/tensor2tensor
tensor2tensor/models/video/basic_deterministic_params.py
next_frame_ae
def next_frame_ae(): """Conv autoencoder.""" hparams = next_frame_basic_deterministic() hparams.bottom["inputs"] = modalities.video_bitwise_bottom hparams.top["inputs"] = modalities.video_top hparams.hidden_size = 256 hparams.batch_size = 8 hparams.num_hidden_layers = 4 hparams.num_compress_steps = 4 hparams.dropout = 0.4 return hparams
python
def next_frame_ae(): """Conv autoencoder.""" hparams = next_frame_basic_deterministic() hparams.bottom["inputs"] = modalities.video_bitwise_bottom hparams.top["inputs"] = modalities.video_top hparams.hidden_size = 256 hparams.batch_size = 8 hparams.num_hidden_layers = 4 hparams.num_compress_steps = 4 hparams.dropout = 0.4 return hparams
[ "def", "next_frame_ae", "(", ")", ":", "hparams", "=", "next_frame_basic_deterministic", "(", ")", "hparams", ".", "bottom", "[", "\"inputs\"", "]", "=", "modalities", ".", "video_bitwise_bottom", "hparams", ".", "top", "[", "\"inputs\"", "]", "=", "modalities", ".", "video_top", "hparams", ".", "hidden_size", "=", "256", "hparams", ".", "batch_size", "=", "8", "hparams", ".", "num_hidden_layers", "=", "4", "hparams", ".", "num_compress_steps", "=", "4", "hparams", ".", "dropout", "=", "0.4", "return", "hparams" ]
Conv autoencoder.
[ "Conv", "autoencoder", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/video/basic_deterministic_params.py#L96-L106
21,662
tensorflow/tensor2tensor
tensor2tensor/models/video/basic_deterministic_params.py
next_frame_ae_tiny
def next_frame_ae_tiny(): """Conv autoencoder, tiny set for testing.""" hparams = next_frame_tiny() hparams.bottom["inputs"] = modalities.video_bitwise_bottom hparams.top["inputs"] = modalities.video_top hparams.batch_size = 8 hparams.dropout = 0.4 return hparams
python
def next_frame_ae_tiny(): """Conv autoencoder, tiny set for testing.""" hparams = next_frame_tiny() hparams.bottom["inputs"] = modalities.video_bitwise_bottom hparams.top["inputs"] = modalities.video_top hparams.batch_size = 8 hparams.dropout = 0.4 return hparams
[ "def", "next_frame_ae_tiny", "(", ")", ":", "hparams", "=", "next_frame_tiny", "(", ")", "hparams", ".", "bottom", "[", "\"inputs\"", "]", "=", "modalities", ".", "video_bitwise_bottom", "hparams", ".", "top", "[", "\"inputs\"", "]", "=", "modalities", ".", "video_top", "hparams", ".", "batch_size", "=", "8", "hparams", ".", "dropout", "=", "0.4", "return", "hparams" ]
Conv autoencoder, tiny set for testing.
[ "Conv", "autoencoder", "tiny", "set", "for", "testing", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/video/basic_deterministic_params.py#L110-L117
21,663
tensorflow/tensor2tensor
tensor2tensor/models/video/basic_deterministic_params.py
next_frame_tiny
def next_frame_tiny(): """Tiny for testing.""" hparams = next_frame_basic_deterministic() hparams.hidden_size = 32 hparams.num_hidden_layers = 1 hparams.num_compress_steps = 2 hparams.filter_double_steps = 1 return hparams
python
def next_frame_tiny(): """Tiny for testing.""" hparams = next_frame_basic_deterministic() hparams.hidden_size = 32 hparams.num_hidden_layers = 1 hparams.num_compress_steps = 2 hparams.filter_double_steps = 1 return hparams
[ "def", "next_frame_tiny", "(", ")", ":", "hparams", "=", "next_frame_basic_deterministic", "(", ")", "hparams", ".", "hidden_size", "=", "32", "hparams", ".", "num_hidden_layers", "=", "1", "hparams", ".", "num_compress_steps", "=", "2", "hparams", ".", "filter_double_steps", "=", "1", "return", "hparams" ]
Tiny for testing.
[ "Tiny", "for", "testing", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/video/basic_deterministic_params.py#L129-L136
21,664
tensorflow/tensor2tensor
tensor2tensor/models/video/basic_deterministic_params.py
next_frame_l1
def next_frame_l1(): """Basic conv model with L1 modality.""" hparams = next_frame_basic_deterministic() hparams.loss["targets"] = modalities.video_l1_loss hparams.top["targets"] = modalities.video_l1_top hparams.video_modality_loss_cutoff = 2.4 return hparams
python
def next_frame_l1(): """Basic conv model with L1 modality.""" hparams = next_frame_basic_deterministic() hparams.loss["targets"] = modalities.video_l1_loss hparams.top["targets"] = modalities.video_l1_top hparams.video_modality_loss_cutoff = 2.4 return hparams
[ "def", "next_frame_l1", "(", ")", ":", "hparams", "=", "next_frame_basic_deterministic", "(", ")", "hparams", ".", "loss", "[", "\"targets\"", "]", "=", "modalities", ".", "video_l1_loss", "hparams", ".", "top", "[", "\"targets\"", "]", "=", "modalities", ".", "video_l1_top", "hparams", ".", "video_modality_loss_cutoff", "=", "2.4", "return", "hparams" ]
Basic conv model with L1 modality.
[ "Basic", "conv", "model", "with", "L1", "modality", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/video/basic_deterministic_params.py#L140-L146
21,665
tensorflow/tensor2tensor
tensor2tensor/models/video/basic_deterministic_params.py
next_frame_l2
def next_frame_l2(): """Basic conv model with L2 modality.""" hparams = next_frame_basic_deterministic() hparams.loss["targets"] = modalities.video_l2_loss hparams.top["targets"] = modalities.video_l1_top hparams.video_modality_loss_cutoff = 2.4 return hparams
python
def next_frame_l2(): """Basic conv model with L2 modality.""" hparams = next_frame_basic_deterministic() hparams.loss["targets"] = modalities.video_l2_loss hparams.top["targets"] = modalities.video_l1_top hparams.video_modality_loss_cutoff = 2.4 return hparams
[ "def", "next_frame_l2", "(", ")", ":", "hparams", "=", "next_frame_basic_deterministic", "(", ")", "hparams", ".", "loss", "[", "\"targets\"", "]", "=", "modalities", ".", "video_l2_loss", "hparams", ".", "top", "[", "\"targets\"", "]", "=", "modalities", ".", "video_l1_top", "hparams", ".", "video_modality_loss_cutoff", "=", "2.4", "return", "hparams" ]
Basic conv model with L2 modality.
[ "Basic", "conv", "model", "with", "L2", "modality", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/video/basic_deterministic_params.py#L150-L156
21,666
tensorflow/tensor2tensor
tensor2tensor/models/video/basic_deterministic_params.py
next_frame_base_range
def next_frame_base_range(rhp): """Basic tuning grid.""" rhp.set_float("dropout", 0.2, 0.6) rhp.set_discrete("hidden_size", [64, 128, 256]) rhp.set_int("num_compress_steps", 5, 8) rhp.set_discrete("batch_size", [4, 8, 16, 32]) rhp.set_int("num_hidden_layers", 1, 3) rhp.set_int("filter_double_steps", 1, 6) rhp.set_float("learning_rate_constant", 1., 4.) rhp.set_int("learning_rate_warmup_steps", 500, 3000) rhp.set_float("initializer_gain", 0.8, 1.8)
python
def next_frame_base_range(rhp): """Basic tuning grid.""" rhp.set_float("dropout", 0.2, 0.6) rhp.set_discrete("hidden_size", [64, 128, 256]) rhp.set_int("num_compress_steps", 5, 8) rhp.set_discrete("batch_size", [4, 8, 16, 32]) rhp.set_int("num_hidden_layers", 1, 3) rhp.set_int("filter_double_steps", 1, 6) rhp.set_float("learning_rate_constant", 1., 4.) rhp.set_int("learning_rate_warmup_steps", 500, 3000) rhp.set_float("initializer_gain", 0.8, 1.8)
[ "def", "next_frame_base_range", "(", "rhp", ")", ":", "rhp", ".", "set_float", "(", "\"dropout\"", ",", "0.2", ",", "0.6", ")", "rhp", ".", "set_discrete", "(", "\"hidden_size\"", ",", "[", "64", ",", "128", ",", "256", "]", ")", "rhp", ".", "set_int", "(", "\"num_compress_steps\"", ",", "5", ",", "8", ")", "rhp", ".", "set_discrete", "(", "\"batch_size\"", ",", "[", "4", ",", "8", ",", "16", ",", "32", "]", ")", "rhp", ".", "set_int", "(", "\"num_hidden_layers\"", ",", "1", ",", "3", ")", "rhp", ".", "set_int", "(", "\"filter_double_steps\"", ",", "1", ",", "6", ")", "rhp", ".", "set_float", "(", "\"learning_rate_constant\"", ",", "1.", ",", "4.", ")", "rhp", ".", "set_int", "(", "\"learning_rate_warmup_steps\"", ",", "500", ",", "3000", ")", "rhp", ".", "set_float", "(", "\"initializer_gain\"", ",", "0.8", ",", "1.8", ")" ]
Basic tuning grid.
[ "Basic", "tuning", "grid", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/video/basic_deterministic_params.py#L160-L170
21,667
tensorflow/tensor2tensor
tensor2tensor/models/video/basic_deterministic_params.py
next_frame_ae_range
def next_frame_ae_range(rhp): """Autoencoder world model tuning grid.""" rhp.set_float("dropout", 0.3, 0.5) rhp.set_int("num_compress_steps", 1, 3) rhp.set_int("num_hidden_layers", 2, 6) rhp.set_float("learning_rate_constant", 1., 2.) rhp.set_float("initializer_gain", 0.8, 1.5) rhp.set_int("filter_double_steps", 2, 3)
python
def next_frame_ae_range(rhp): """Autoencoder world model tuning grid.""" rhp.set_float("dropout", 0.3, 0.5) rhp.set_int("num_compress_steps", 1, 3) rhp.set_int("num_hidden_layers", 2, 6) rhp.set_float("learning_rate_constant", 1., 2.) rhp.set_float("initializer_gain", 0.8, 1.5) rhp.set_int("filter_double_steps", 2, 3)
[ "def", "next_frame_ae_range", "(", "rhp", ")", ":", "rhp", ".", "set_float", "(", "\"dropout\"", ",", "0.3", ",", "0.5", ")", "rhp", ".", "set_int", "(", "\"num_compress_steps\"", ",", "1", ",", "3", ")", "rhp", ".", "set_int", "(", "\"num_hidden_layers\"", ",", "2", ",", "6", ")", "rhp", ".", "set_float", "(", "\"learning_rate_constant\"", ",", "1.", ",", "2.", ")", "rhp", ".", "set_float", "(", "\"initializer_gain\"", ",", "0.8", ",", "1.5", ")", "rhp", ".", "set_int", "(", "\"filter_double_steps\"", ",", "2", ",", "3", ")" ]
Autoencoder world model tuning grid.
[ "Autoencoder", "world", "model", "tuning", "grid", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/video/basic_deterministic_params.py#L194-L201
21,668
tensorflow/tensor2tensor
tensor2tensor/rl/trainer_model_free.py
initialize_env_specs
def initialize_env_specs(hparams, env_problem_name): """Initializes env_specs using the appropriate env.""" if env_problem_name: env = registry.env_problem(env_problem_name, batch_size=hparams.batch_size) else: env = rl_utils.setup_env(hparams, hparams.batch_size, hparams.eval_max_num_noops, hparams.rl_env_max_episode_steps, env_name=hparams.rl_env_name) env.start_new_epoch(0) return rl.make_real_env_fn(env)
python
def initialize_env_specs(hparams, env_problem_name): """Initializes env_specs using the appropriate env.""" if env_problem_name: env = registry.env_problem(env_problem_name, batch_size=hparams.batch_size) else: env = rl_utils.setup_env(hparams, hparams.batch_size, hparams.eval_max_num_noops, hparams.rl_env_max_episode_steps, env_name=hparams.rl_env_name) env.start_new_epoch(0) return rl.make_real_env_fn(env)
[ "def", "initialize_env_specs", "(", "hparams", ",", "env_problem_name", ")", ":", "if", "env_problem_name", ":", "env", "=", "registry", ".", "env_problem", "(", "env_problem_name", ",", "batch_size", "=", "hparams", ".", "batch_size", ")", "else", ":", "env", "=", "rl_utils", ".", "setup_env", "(", "hparams", ",", "hparams", ".", "batch_size", ",", "hparams", ".", "eval_max_num_noops", ",", "hparams", ".", "rl_env_max_episode_steps", ",", "env_name", "=", "hparams", ".", "rl_env_name", ")", "env", ".", "start_new_epoch", "(", "0", ")", "return", "rl", ".", "make_real_env_fn", "(", "env", ")" ]
Initializes env_specs using the appropriate env.
[ "Initializes", "env_specs", "using", "the", "appropriate", "env", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/trainer_model_free.py#L68-L79
21,669
tensorflow/tensor2tensor
tensor2tensor/utils/learning_rate.py
learning_rate_factor
def learning_rate_factor(name, step_num, hparams): """Compute the designated learning rate factor from hparams.""" if name == "constant": tf.logging.info("Base learning rate: %f", hparams.learning_rate_constant) return hparams.learning_rate_constant elif name == "linear_warmup": return tf.minimum(1.0, step_num / hparams.learning_rate_warmup_steps) elif name == "linear_decay": ret = (hparams.train_steps - step_num) / hparams.learning_rate_decay_steps return tf.minimum(1.0, tf.maximum(0.0, ret)) elif name == "cosdecay": # openai gpt in_warmup = tf.cast(step_num <= hparams.learning_rate_warmup_steps, dtype=tf.float32) ret = 0.5 * (1 + tf.cos( np.pi * step_num / hparams.learning_rate_decay_steps)) # if in warmup stage return 1 else return the decayed value return in_warmup * 1 + (1 - in_warmup) * ret elif name == "single_cycle_cos_decay": # Cosine decay to zero with a single cycle. This is different from # "cosdecay" because it starts at 1 when the warmup steps end. x = tf.maximum(step_num, hparams.learning_rate_warmup_steps) step = x - hparams.learning_rate_warmup_steps return tf.math.cos( step * np.pi / hparams.learning_rate_decay_steps) / 2.0 + 0.5 elif name == "rsqrt_decay": return tf.rsqrt(tf.maximum(step_num, hparams.learning_rate_warmup_steps)) elif name == "rsqrt_normalized_decay": scale = tf.sqrt(tf.to_float(hparams.learning_rate_warmup_steps)) return scale * tf.rsqrt(tf.maximum( step_num, hparams.learning_rate_warmup_steps)) elif name == "exp_decay": decay_steps = hparams.learning_rate_decay_steps warmup_steps = hparams.learning_rate_warmup_steps p = (step_num - warmup_steps) / decay_steps p = tf.maximum(p, 0.) if hparams.learning_rate_decay_staircase: p = tf.floor(p) return tf.pow(hparams.learning_rate_decay_rate, p) elif name == "rsqrt_hidden_size": return hparams.hidden_size ** -0.5 elif name == "legacy": return legacy_learning_rate_schedule(hparams) else: raise ValueError("unknown learning rate factor %s" % name)
python
def learning_rate_factor(name, step_num, hparams): """Compute the designated learning rate factor from hparams.""" if name == "constant": tf.logging.info("Base learning rate: %f", hparams.learning_rate_constant) return hparams.learning_rate_constant elif name == "linear_warmup": return tf.minimum(1.0, step_num / hparams.learning_rate_warmup_steps) elif name == "linear_decay": ret = (hparams.train_steps - step_num) / hparams.learning_rate_decay_steps return tf.minimum(1.0, tf.maximum(0.0, ret)) elif name == "cosdecay": # openai gpt in_warmup = tf.cast(step_num <= hparams.learning_rate_warmup_steps, dtype=tf.float32) ret = 0.5 * (1 + tf.cos( np.pi * step_num / hparams.learning_rate_decay_steps)) # if in warmup stage return 1 else return the decayed value return in_warmup * 1 + (1 - in_warmup) * ret elif name == "single_cycle_cos_decay": # Cosine decay to zero with a single cycle. This is different from # "cosdecay" because it starts at 1 when the warmup steps end. x = tf.maximum(step_num, hparams.learning_rate_warmup_steps) step = x - hparams.learning_rate_warmup_steps return tf.math.cos( step * np.pi / hparams.learning_rate_decay_steps) / 2.0 + 0.5 elif name == "rsqrt_decay": return tf.rsqrt(tf.maximum(step_num, hparams.learning_rate_warmup_steps)) elif name == "rsqrt_normalized_decay": scale = tf.sqrt(tf.to_float(hparams.learning_rate_warmup_steps)) return scale * tf.rsqrt(tf.maximum( step_num, hparams.learning_rate_warmup_steps)) elif name == "exp_decay": decay_steps = hparams.learning_rate_decay_steps warmup_steps = hparams.learning_rate_warmup_steps p = (step_num - warmup_steps) / decay_steps p = tf.maximum(p, 0.) if hparams.learning_rate_decay_staircase: p = tf.floor(p) return tf.pow(hparams.learning_rate_decay_rate, p) elif name == "rsqrt_hidden_size": return hparams.hidden_size ** -0.5 elif name == "legacy": return legacy_learning_rate_schedule(hparams) else: raise ValueError("unknown learning rate factor %s" % name)
[ "def", "learning_rate_factor", "(", "name", ",", "step_num", ",", "hparams", ")", ":", "if", "name", "==", "\"constant\"", ":", "tf", ".", "logging", ".", "info", "(", "\"Base learning rate: %f\"", ",", "hparams", ".", "learning_rate_constant", ")", "return", "hparams", ".", "learning_rate_constant", "elif", "name", "==", "\"linear_warmup\"", ":", "return", "tf", ".", "minimum", "(", "1.0", ",", "step_num", "/", "hparams", ".", "learning_rate_warmup_steps", ")", "elif", "name", "==", "\"linear_decay\"", ":", "ret", "=", "(", "hparams", ".", "train_steps", "-", "step_num", ")", "/", "hparams", ".", "learning_rate_decay_steps", "return", "tf", ".", "minimum", "(", "1.0", ",", "tf", ".", "maximum", "(", "0.0", ",", "ret", ")", ")", "elif", "name", "==", "\"cosdecay\"", ":", "# openai gpt", "in_warmup", "=", "tf", ".", "cast", "(", "step_num", "<=", "hparams", ".", "learning_rate_warmup_steps", ",", "dtype", "=", "tf", ".", "float32", ")", "ret", "=", "0.5", "*", "(", "1", "+", "tf", ".", "cos", "(", "np", ".", "pi", "*", "step_num", "/", "hparams", ".", "learning_rate_decay_steps", ")", ")", "# if in warmup stage return 1 else return the decayed value", "return", "in_warmup", "*", "1", "+", "(", "1", "-", "in_warmup", ")", "*", "ret", "elif", "name", "==", "\"single_cycle_cos_decay\"", ":", "# Cosine decay to zero with a single cycle. This is different from", "# \"cosdecay\" because it starts at 1 when the warmup steps end.", "x", "=", "tf", ".", "maximum", "(", "step_num", ",", "hparams", ".", "learning_rate_warmup_steps", ")", "step", "=", "x", "-", "hparams", ".", "learning_rate_warmup_steps", "return", "tf", ".", "math", ".", "cos", "(", "step", "*", "np", ".", "pi", "/", "hparams", ".", "learning_rate_decay_steps", ")", "/", "2.0", "+", "0.5", "elif", "name", "==", "\"rsqrt_decay\"", ":", "return", "tf", ".", "rsqrt", "(", "tf", ".", "maximum", "(", "step_num", ",", "hparams", ".", "learning_rate_warmup_steps", ")", ")", "elif", "name", "==", "\"rsqrt_normalized_decay\"", ":", "scale", "=", "tf", ".", "sqrt", "(", "tf", ".", "to_float", "(", "hparams", ".", "learning_rate_warmup_steps", ")", ")", "return", "scale", "*", "tf", ".", "rsqrt", "(", "tf", ".", "maximum", "(", "step_num", ",", "hparams", ".", "learning_rate_warmup_steps", ")", ")", "elif", "name", "==", "\"exp_decay\"", ":", "decay_steps", "=", "hparams", ".", "learning_rate_decay_steps", "warmup_steps", "=", "hparams", ".", "learning_rate_warmup_steps", "p", "=", "(", "step_num", "-", "warmup_steps", ")", "/", "decay_steps", "p", "=", "tf", ".", "maximum", "(", "p", ",", "0.", ")", "if", "hparams", ".", "learning_rate_decay_staircase", ":", "p", "=", "tf", ".", "floor", "(", "p", ")", "return", "tf", ".", "pow", "(", "hparams", ".", "learning_rate_decay_rate", ",", "p", ")", "elif", "name", "==", "\"rsqrt_hidden_size\"", ":", "return", "hparams", ".", "hidden_size", "**", "-", "0.5", "elif", "name", "==", "\"legacy\"", ":", "return", "legacy_learning_rate_schedule", "(", "hparams", ")", "else", ":", "raise", "ValueError", "(", "\"unknown learning rate factor %s\"", "%", "name", ")" ]
Compute the designated learning rate factor from hparams.
[ "Compute", "the", "designated", "learning", "rate", "factor", "from", "hparams", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/learning_rate.py#L26-L69
21,670
tensorflow/tensor2tensor
tensor2tensor/utils/learning_rate.py
learning_rate_schedule
def learning_rate_schedule(hparams): """Learning rate schedule based on hparams.""" mlperf_log.transformer_print(key=mlperf_log.OPT_LR, deferred=True) mlperf_log.transformer_print( key=mlperf_log.OPT_LR_WARMUP_STEPS, value=hparams.learning_rate_warmup_steps) step_num = _global_step(hparams) schedule_string = hparams.learning_rate_schedule names = schedule_string.split("*") names = [name.strip() for name in names if name.strip()] ret = tf.constant(1.0) for name in names: ret *= learning_rate_factor(name, step_num, hparams) return ret
python
def learning_rate_schedule(hparams): """Learning rate schedule based on hparams.""" mlperf_log.transformer_print(key=mlperf_log.OPT_LR, deferred=True) mlperf_log.transformer_print( key=mlperf_log.OPT_LR_WARMUP_STEPS, value=hparams.learning_rate_warmup_steps) step_num = _global_step(hparams) schedule_string = hparams.learning_rate_schedule names = schedule_string.split("*") names = [name.strip() for name in names if name.strip()] ret = tf.constant(1.0) for name in names: ret *= learning_rate_factor(name, step_num, hparams) return ret
[ "def", "learning_rate_schedule", "(", "hparams", ")", ":", "mlperf_log", ".", "transformer_print", "(", "key", "=", "mlperf_log", ".", "OPT_LR", ",", "deferred", "=", "True", ")", "mlperf_log", ".", "transformer_print", "(", "key", "=", "mlperf_log", ".", "OPT_LR_WARMUP_STEPS", ",", "value", "=", "hparams", ".", "learning_rate_warmup_steps", ")", "step_num", "=", "_global_step", "(", "hparams", ")", "schedule_string", "=", "hparams", ".", "learning_rate_schedule", "names", "=", "schedule_string", ".", "split", "(", "\"*\"", ")", "names", "=", "[", "name", ".", "strip", "(", ")", "for", "name", "in", "names", "if", "name", ".", "strip", "(", ")", "]", "ret", "=", "tf", ".", "constant", "(", "1.0", ")", "for", "name", "in", "names", ":", "ret", "*=", "learning_rate_factor", "(", "name", ",", "step_num", ",", "hparams", ")", "return", "ret" ]
Learning rate schedule based on hparams.
[ "Learning", "rate", "schedule", "based", "on", "hparams", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/learning_rate.py#L72-L85
21,671
tensorflow/tensor2tensor
tensor2tensor/utils/learning_rate.py
legacy_learning_rate_schedule
def legacy_learning_rate_schedule(hparams): """Backwards-compatible learning-rate schedule.""" step_num = _global_step(hparams) warmup_steps = tf.to_float(hparams.learning_rate_warmup_steps) if hparams.learning_rate_decay_scheme == "noam": ret = 5000.0 * hparams.hidden_size**-0.5 * tf.minimum( (step_num + 1) * warmup_steps**-1.5, (step_num + 1)**-0.5) else: warmup_steps = hparams.learning_rate_warmup_steps warmup = _learning_rate_warmup(warmup_steps, hparams=hparams) decay = _learning_rate_decay(hparams, warmup_steps) ret = tf.where(step_num < warmup_steps, warmup, decay) optimizer_correction = 0.002 if "adam" in hparams.optimizer else 1.0 tf.logging.info("Base learning rate: %f", hparams.learning_rate) return ret * optimizer_correction * hparams.learning_rate
python
def legacy_learning_rate_schedule(hparams): """Backwards-compatible learning-rate schedule.""" step_num = _global_step(hparams) warmup_steps = tf.to_float(hparams.learning_rate_warmup_steps) if hparams.learning_rate_decay_scheme == "noam": ret = 5000.0 * hparams.hidden_size**-0.5 * tf.minimum( (step_num + 1) * warmup_steps**-1.5, (step_num + 1)**-0.5) else: warmup_steps = hparams.learning_rate_warmup_steps warmup = _learning_rate_warmup(warmup_steps, hparams=hparams) decay = _learning_rate_decay(hparams, warmup_steps) ret = tf.where(step_num < warmup_steps, warmup, decay) optimizer_correction = 0.002 if "adam" in hparams.optimizer else 1.0 tf.logging.info("Base learning rate: %f", hparams.learning_rate) return ret * optimizer_correction * hparams.learning_rate
[ "def", "legacy_learning_rate_schedule", "(", "hparams", ")", ":", "step_num", "=", "_global_step", "(", "hparams", ")", "warmup_steps", "=", "tf", ".", "to_float", "(", "hparams", ".", "learning_rate_warmup_steps", ")", "if", "hparams", ".", "learning_rate_decay_scheme", "==", "\"noam\"", ":", "ret", "=", "5000.0", "*", "hparams", ".", "hidden_size", "**", "-", "0.5", "*", "tf", ".", "minimum", "(", "(", "step_num", "+", "1", ")", "*", "warmup_steps", "**", "-", "1.5", ",", "(", "step_num", "+", "1", ")", "**", "-", "0.5", ")", "else", ":", "warmup_steps", "=", "hparams", ".", "learning_rate_warmup_steps", "warmup", "=", "_learning_rate_warmup", "(", "warmup_steps", ",", "hparams", "=", "hparams", ")", "decay", "=", "_learning_rate_decay", "(", "hparams", ",", "warmup_steps", ")", "ret", "=", "tf", ".", "where", "(", "step_num", "<", "warmup_steps", ",", "warmup", ",", "decay", ")", "optimizer_correction", "=", "0.002", "if", "\"adam\"", "in", "hparams", ".", "optimizer", "else", "1.0", "tf", ".", "logging", ".", "info", "(", "\"Base learning rate: %f\"", ",", "hparams", ".", "learning_rate", ")", "return", "ret", "*", "optimizer_correction", "*", "hparams", ".", "learning_rate" ]
Backwards-compatible learning-rate schedule.
[ "Backwards", "-", "compatible", "learning", "-", "rate", "schedule", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/learning_rate.py#L88-L102
21,672
tensorflow/tensor2tensor
tensor2tensor/utils/learning_rate.py
_global_step
def _global_step(hparams): """Adjust global step if a multi-step optimizer is used.""" step = tf.to_float(tf.train.get_or_create_global_step()) multiplier = hparams.optimizer_multistep_accumulate_steps if not multiplier: return step tf.logging.info("Dividing global step by %d for multi-step optimizer." % multiplier) return step / tf.to_float(multiplier)
python
def _global_step(hparams): """Adjust global step if a multi-step optimizer is used.""" step = tf.to_float(tf.train.get_or_create_global_step()) multiplier = hparams.optimizer_multistep_accumulate_steps if not multiplier: return step tf.logging.info("Dividing global step by %d for multi-step optimizer." % multiplier) return step / tf.to_float(multiplier)
[ "def", "_global_step", "(", "hparams", ")", ":", "step", "=", "tf", ".", "to_float", "(", "tf", ".", "train", ".", "get_or_create_global_step", "(", ")", ")", "multiplier", "=", "hparams", ".", "optimizer_multistep_accumulate_steps", "if", "not", "multiplier", ":", "return", "step", "tf", ".", "logging", ".", "info", "(", "\"Dividing global step by %d for multi-step optimizer.\"", "%", "multiplier", ")", "return", "step", "/", "tf", ".", "to_float", "(", "multiplier", ")" ]
Adjust global step if a multi-step optimizer is used.
[ "Adjust", "global", "step", "if", "a", "multi", "-", "step", "optimizer", "is", "used", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/learning_rate.py#L105-L114
21,673
tensorflow/tensor2tensor
tensor2tensor/utils/learning_rate.py
_piecewise_learning_rate
def _piecewise_learning_rate(step, boundaries, values): """Scale learning rate according to the given schedule. Multipliers are not cumulative. Args: step: global step boundaries: List of steps to transition on. values: Multiplier to apply at each boundary transition. Returns: Scaled value for the learning rate. """ values = [1.0] + values boundaries = [float(x) for x in boundaries] return tf.train.piecewise_constant( step, boundaries, values, name="piecewise_lr")
python
def _piecewise_learning_rate(step, boundaries, values): """Scale learning rate according to the given schedule. Multipliers are not cumulative. Args: step: global step boundaries: List of steps to transition on. values: Multiplier to apply at each boundary transition. Returns: Scaled value for the learning rate. """ values = [1.0] + values boundaries = [float(x) for x in boundaries] return tf.train.piecewise_constant( step, boundaries, values, name="piecewise_lr")
[ "def", "_piecewise_learning_rate", "(", "step", ",", "boundaries", ",", "values", ")", ":", "values", "=", "[", "1.0", "]", "+", "values", "boundaries", "=", "[", "float", "(", "x", ")", "for", "x", "in", "boundaries", "]", "return", "tf", ".", "train", ".", "piecewise_constant", "(", "step", ",", "boundaries", ",", "values", ",", "name", "=", "\"piecewise_lr\"", ")" ]
Scale learning rate according to the given schedule. Multipliers are not cumulative. Args: step: global step boundaries: List of steps to transition on. values: Multiplier to apply at each boundary transition. Returns: Scaled value for the learning rate.
[ "Scale", "learning", "rate", "according", "to", "the", "given", "schedule", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/learning_rate.py#L122-L138
21,674
tensorflow/tensor2tensor
tensor2tensor/utils/learning_rate.py
_learning_rate_decay
def _learning_rate_decay(hparams, warmup_steps=0): """Learning rate decay multiplier.""" scheme = hparams.learning_rate_decay_scheme warmup_steps = tf.to_float(warmup_steps) global_step = _global_step(hparams) if not scheme or scheme == "none": return tf.constant(1.) tf.logging.info("Applying learning rate decay: %s.", scheme) if scheme == "exp": decay_steps = hparams.learning_rate_decay_steps p = (global_step - warmup_steps) / decay_steps if hparams.learning_rate_decay_staircase: p = tf.floor(p) return tf.pow(hparams.learning_rate_decay_rate, p) if scheme == "piecewise": return _piecewise_learning_rate(global_step, hparams.learning_rate_boundaries, hparams.learning_rate_multiples) if scheme == "cosine": cycle_steps = hparams.learning_rate_cosine_cycle_steps cycle_position = global_step % (2 * cycle_steps) cycle_position = cycle_steps - tf.abs(cycle_steps - cycle_position) return 0.5 * (1 + tf.cos(np.pi * cycle_position / cycle_steps)) if scheme == "cyclelinear10x": # Cycle the rate linearly by 10x every warmup_steps, up and down. cycle_steps = warmup_steps cycle_position = global_step % (2 * cycle_steps) cycle_position = tf.to_float( # Normalize to the interval [-1, 1]. cycle_position - cycle_steps) / float(cycle_steps) cycle_position = 1.0 - tf.abs(cycle_position) # 0 to 1 and back to 0. return (cycle_position + 0.1) * 3.0 # 10x difference each cycle (0.3-3). if scheme == "sqrt": return _legacy_sqrt_decay(global_step - warmup_steps) raise ValueError("Unrecognized learning rate decay scheme: %s" % hparams.learning_rate_decay_scheme)
python
def _learning_rate_decay(hparams, warmup_steps=0): """Learning rate decay multiplier.""" scheme = hparams.learning_rate_decay_scheme warmup_steps = tf.to_float(warmup_steps) global_step = _global_step(hparams) if not scheme or scheme == "none": return tf.constant(1.) tf.logging.info("Applying learning rate decay: %s.", scheme) if scheme == "exp": decay_steps = hparams.learning_rate_decay_steps p = (global_step - warmup_steps) / decay_steps if hparams.learning_rate_decay_staircase: p = tf.floor(p) return tf.pow(hparams.learning_rate_decay_rate, p) if scheme == "piecewise": return _piecewise_learning_rate(global_step, hparams.learning_rate_boundaries, hparams.learning_rate_multiples) if scheme == "cosine": cycle_steps = hparams.learning_rate_cosine_cycle_steps cycle_position = global_step % (2 * cycle_steps) cycle_position = cycle_steps - tf.abs(cycle_steps - cycle_position) return 0.5 * (1 + tf.cos(np.pi * cycle_position / cycle_steps)) if scheme == "cyclelinear10x": # Cycle the rate linearly by 10x every warmup_steps, up and down. cycle_steps = warmup_steps cycle_position = global_step % (2 * cycle_steps) cycle_position = tf.to_float( # Normalize to the interval [-1, 1]. cycle_position - cycle_steps) / float(cycle_steps) cycle_position = 1.0 - tf.abs(cycle_position) # 0 to 1 and back to 0. return (cycle_position + 0.1) * 3.0 # 10x difference each cycle (0.3-3). if scheme == "sqrt": return _legacy_sqrt_decay(global_step - warmup_steps) raise ValueError("Unrecognized learning rate decay scheme: %s" % hparams.learning_rate_decay_scheme)
[ "def", "_learning_rate_decay", "(", "hparams", ",", "warmup_steps", "=", "0", ")", ":", "scheme", "=", "hparams", ".", "learning_rate_decay_scheme", "warmup_steps", "=", "tf", ".", "to_float", "(", "warmup_steps", ")", "global_step", "=", "_global_step", "(", "hparams", ")", "if", "not", "scheme", "or", "scheme", "==", "\"none\"", ":", "return", "tf", ".", "constant", "(", "1.", ")", "tf", ".", "logging", ".", "info", "(", "\"Applying learning rate decay: %s.\"", ",", "scheme", ")", "if", "scheme", "==", "\"exp\"", ":", "decay_steps", "=", "hparams", ".", "learning_rate_decay_steps", "p", "=", "(", "global_step", "-", "warmup_steps", ")", "/", "decay_steps", "if", "hparams", ".", "learning_rate_decay_staircase", ":", "p", "=", "tf", ".", "floor", "(", "p", ")", "return", "tf", ".", "pow", "(", "hparams", ".", "learning_rate_decay_rate", ",", "p", ")", "if", "scheme", "==", "\"piecewise\"", ":", "return", "_piecewise_learning_rate", "(", "global_step", ",", "hparams", ".", "learning_rate_boundaries", ",", "hparams", ".", "learning_rate_multiples", ")", "if", "scheme", "==", "\"cosine\"", ":", "cycle_steps", "=", "hparams", ".", "learning_rate_cosine_cycle_steps", "cycle_position", "=", "global_step", "%", "(", "2", "*", "cycle_steps", ")", "cycle_position", "=", "cycle_steps", "-", "tf", ".", "abs", "(", "cycle_steps", "-", "cycle_position", ")", "return", "0.5", "*", "(", "1", "+", "tf", ".", "cos", "(", "np", ".", "pi", "*", "cycle_position", "/", "cycle_steps", ")", ")", "if", "scheme", "==", "\"cyclelinear10x\"", ":", "# Cycle the rate linearly by 10x every warmup_steps, up and down.", "cycle_steps", "=", "warmup_steps", "cycle_position", "=", "global_step", "%", "(", "2", "*", "cycle_steps", ")", "cycle_position", "=", "tf", ".", "to_float", "(", "# Normalize to the interval [-1, 1].", "cycle_position", "-", "cycle_steps", ")", "/", "float", "(", "cycle_steps", ")", "cycle_position", "=", "1.0", "-", "tf", ".", "abs", "(", "cycle_position", ")", "# 0 to 1 and back to 0.", "return", "(", "cycle_position", "+", "0.1", ")", "*", "3.0", "# 10x difference each cycle (0.3-3).", "if", "scheme", "==", "\"sqrt\"", ":", "return", "_legacy_sqrt_decay", "(", "global_step", "-", "warmup_steps", ")", "raise", "ValueError", "(", "\"Unrecognized learning rate decay scheme: %s\"", "%", "hparams", ".", "learning_rate_decay_scheme", ")" ]
Learning rate decay multiplier.
[ "Learning", "rate", "decay", "multiplier", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/learning_rate.py#L141-L183
21,675
tensorflow/tensor2tensor
tensor2tensor/utils/learning_rate.py
_learning_rate_warmup
def _learning_rate_warmup(warmup_steps, warmup_schedule="exp", hparams=None): """Learning rate warmup multiplier.""" if not warmup_steps: return tf.constant(1.) tf.logging.info("Applying %s learning rate warmup for %d steps", warmup_schedule, warmup_steps) warmup_steps = tf.to_float(warmup_steps) global_step = _global_step(hparams) if warmup_schedule == "exp": return tf.exp(tf.log(0.01) / warmup_steps)**(warmup_steps - global_step) else: assert warmup_schedule == "linear" start = tf.constant(0.35) return ((tf.constant(1.) - start) / warmup_steps) * global_step + start
python
def _learning_rate_warmup(warmup_steps, warmup_schedule="exp", hparams=None): """Learning rate warmup multiplier.""" if not warmup_steps: return tf.constant(1.) tf.logging.info("Applying %s learning rate warmup for %d steps", warmup_schedule, warmup_steps) warmup_steps = tf.to_float(warmup_steps) global_step = _global_step(hparams) if warmup_schedule == "exp": return tf.exp(tf.log(0.01) / warmup_steps)**(warmup_steps - global_step) else: assert warmup_schedule == "linear" start = tf.constant(0.35) return ((tf.constant(1.) - start) / warmup_steps) * global_step + start
[ "def", "_learning_rate_warmup", "(", "warmup_steps", ",", "warmup_schedule", "=", "\"exp\"", ",", "hparams", "=", "None", ")", ":", "if", "not", "warmup_steps", ":", "return", "tf", ".", "constant", "(", "1.", ")", "tf", ".", "logging", ".", "info", "(", "\"Applying %s learning rate warmup for %d steps\"", ",", "warmup_schedule", ",", "warmup_steps", ")", "warmup_steps", "=", "tf", ".", "to_float", "(", "warmup_steps", ")", "global_step", "=", "_global_step", "(", "hparams", ")", "if", "warmup_schedule", "==", "\"exp\"", ":", "return", "tf", ".", "exp", "(", "tf", ".", "log", "(", "0.01", ")", "/", "warmup_steps", ")", "**", "(", "warmup_steps", "-", "global_step", ")", "else", ":", "assert", "warmup_schedule", "==", "\"linear\"", "start", "=", "tf", ".", "constant", "(", "0.35", ")", "return", "(", "(", "tf", ".", "constant", "(", "1.", ")", "-", "start", ")", "/", "warmup_steps", ")", "*", "global_step", "+", "start" ]
Learning rate warmup multiplier.
[ "Learning", "rate", "warmup", "multiplier", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/learning_rate.py#L186-L202
21,676
tensorflow/tensor2tensor
tensor2tensor/data_generators/algorithmic_math.py
is_in_expr
def is_in_expr(expr, find): """Returns True if `find` is a subtree of `expr`.""" return expr == find or (isinstance(expr, ExprNode) and expr.is_in(find))
python
def is_in_expr(expr, find): """Returns True if `find` is a subtree of `expr`.""" return expr == find or (isinstance(expr, ExprNode) and expr.is_in(find))
[ "def", "is_in_expr", "(", "expr", ",", "find", ")", ":", "return", "expr", "==", "find", "or", "(", "isinstance", "(", "expr", ",", "ExprNode", ")", "and", "expr", ".", "is_in", "(", "find", ")", ")" ]
Returns True if `find` is a subtree of `expr`.
[ "Returns", "True", "if", "find", "is", "a", "subtree", "of", "expr", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/algorithmic_math.py#L90-L92
21,677
tensorflow/tensor2tensor
tensor2tensor/data_generators/algorithmic_math.py
random_expr_with_required_var
def random_expr_with_required_var(depth, required_var, optional_list, ops): """Generate a random expression tree with a required variable. The required variable appears exactly once in the expression. Args: depth: At least one leaf will be this many levels down from the top. required_var: A char. This char is guaranteed to be placed exactly once at a leaf somewhere in the tree. This is the var to solve for. optional_list: A list of chars. These chars are randomly selected as leaf values. These are constant vars. ops: A list of ExprOp instances. Returns: An ExprNode instance which is the root of the generated expression tree. """ if not depth: if required_var: return required_var return str(optional_list[random.randrange(len(optional_list))]) max_depth_side = random.randrange(2) other_side_depth = random.randrange(depth) required_var_side = random.randrange(2) left = random_expr_with_required_var( depth - 1 if max_depth_side else other_side_depth, required_var if required_var_side else None, optional_list, ops) right = random_expr_with_required_var( depth - 1 if not max_depth_side else other_side_depth, required_var if not required_var_side else None, optional_list, ops) op = ops[random.randrange(len(ops))] return ExprNode(left, right, op)
python
def random_expr_with_required_var(depth, required_var, optional_list, ops): """Generate a random expression tree with a required variable. The required variable appears exactly once in the expression. Args: depth: At least one leaf will be this many levels down from the top. required_var: A char. This char is guaranteed to be placed exactly once at a leaf somewhere in the tree. This is the var to solve for. optional_list: A list of chars. These chars are randomly selected as leaf values. These are constant vars. ops: A list of ExprOp instances. Returns: An ExprNode instance which is the root of the generated expression tree. """ if not depth: if required_var: return required_var return str(optional_list[random.randrange(len(optional_list))]) max_depth_side = random.randrange(2) other_side_depth = random.randrange(depth) required_var_side = random.randrange(2) left = random_expr_with_required_var( depth - 1 if max_depth_side else other_side_depth, required_var if required_var_side else None, optional_list, ops) right = random_expr_with_required_var( depth - 1 if not max_depth_side else other_side_depth, required_var if not required_var_side else None, optional_list, ops) op = ops[random.randrange(len(ops))] return ExprNode(left, right, op)
[ "def", "random_expr_with_required_var", "(", "depth", ",", "required_var", ",", "optional_list", ",", "ops", ")", ":", "if", "not", "depth", ":", "if", "required_var", ":", "return", "required_var", "return", "str", "(", "optional_list", "[", "random", ".", "randrange", "(", "len", "(", "optional_list", ")", ")", "]", ")", "max_depth_side", "=", "random", ".", "randrange", "(", "2", ")", "other_side_depth", "=", "random", ".", "randrange", "(", "depth", ")", "required_var_side", "=", "random", ".", "randrange", "(", "2", ")", "left", "=", "random_expr_with_required_var", "(", "depth", "-", "1", "if", "max_depth_side", "else", "other_side_depth", ",", "required_var", "if", "required_var_side", "else", "None", ",", "optional_list", ",", "ops", ")", "right", "=", "random_expr_with_required_var", "(", "depth", "-", "1", "if", "not", "max_depth_side", "else", "other_side_depth", ",", "required_var", "if", "not", "required_var_side", "else", "None", ",", "optional_list", ",", "ops", ")", "op", "=", "ops", "[", "random", ".", "randrange", "(", "len", "(", "ops", ")", ")", "]", "return", "ExprNode", "(", "left", ",", "right", ",", "op", ")" ]
Generate a random expression tree with a required variable. The required variable appears exactly once in the expression. Args: depth: At least one leaf will be this many levels down from the top. required_var: A char. This char is guaranteed to be placed exactly once at a leaf somewhere in the tree. This is the var to solve for. optional_list: A list of chars. These chars are randomly selected as leaf values. These are constant vars. ops: A list of ExprOp instances. Returns: An ExprNode instance which is the root of the generated expression tree.
[ "Generate", "a", "random", "expression", "tree", "with", "a", "required", "variable", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/algorithmic_math.py#L95-L129
21,678
tensorflow/tensor2tensor
tensor2tensor/data_generators/algorithmic_math.py
random_expr
def random_expr(depth, vlist, ops): """Generate a random expression tree. Args: depth: At least one leaf will be this many levels down from the top. vlist: A list of chars. These chars are randomly selected as leaf values. ops: A list of ExprOp instances. Returns: An ExprNode instance which is the root of the generated expression tree. """ if not depth: return str(vlist[random.randrange(len(vlist))]) max_depth_side = random.randrange(2) other_side_depth = random.randrange(depth) left = random_expr(depth - 1 if max_depth_side else other_side_depth, vlist, ops) right = random_expr(depth - 1 if not max_depth_side else other_side_depth, vlist, ops) op = ops[random.randrange(len(ops))] return ExprNode(left, right, op)
python
def random_expr(depth, vlist, ops): """Generate a random expression tree. Args: depth: At least one leaf will be this many levels down from the top. vlist: A list of chars. These chars are randomly selected as leaf values. ops: A list of ExprOp instances. Returns: An ExprNode instance which is the root of the generated expression tree. """ if not depth: return str(vlist[random.randrange(len(vlist))]) max_depth_side = random.randrange(2) other_side_depth = random.randrange(depth) left = random_expr(depth - 1 if max_depth_side else other_side_depth, vlist, ops) right = random_expr(depth - 1 if not max_depth_side else other_side_depth, vlist, ops) op = ops[random.randrange(len(ops))] return ExprNode(left, right, op)
[ "def", "random_expr", "(", "depth", ",", "vlist", ",", "ops", ")", ":", "if", "not", "depth", ":", "return", "str", "(", "vlist", "[", "random", ".", "randrange", "(", "len", "(", "vlist", ")", ")", "]", ")", "max_depth_side", "=", "random", ".", "randrange", "(", "2", ")", "other_side_depth", "=", "random", ".", "randrange", "(", "depth", ")", "left", "=", "random_expr", "(", "depth", "-", "1", "if", "max_depth_side", "else", "other_side_depth", ",", "vlist", ",", "ops", ")", "right", "=", "random_expr", "(", "depth", "-", "1", "if", "not", "max_depth_side", "else", "other_side_depth", ",", "vlist", ",", "ops", ")", "op", "=", "ops", "[", "random", ".", "randrange", "(", "len", "(", "ops", ")", ")", "]", "return", "ExprNode", "(", "left", ",", "right", ",", "op", ")" ]
Generate a random expression tree. Args: depth: At least one leaf will be this many levels down from the top. vlist: A list of chars. These chars are randomly selected as leaf values. ops: A list of ExprOp instances. Returns: An ExprNode instance which is the root of the generated expression tree.
[ "Generate", "a", "random", "expression", "tree", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/algorithmic_math.py#L132-L155
21,679
tensorflow/tensor2tensor
tensor2tensor/data_generators/algorithmic_math.py
algebra_inverse_solve
def algebra_inverse_solve(left, right, var, solve_ops): """Solves for the value of the given var in an expression. Args: left: The root of the ExprNode tree on the left side of the equals sign. right: The root of the ExprNode tree on the right side of the equals sign. var: A char. The variable to solve for. solve_ops: A dictionary with the following properties. * For each operator in the expression, there is a rule that determines how to cancel out a value either to the left or the right of that operator. * For each rule, there is an entry in the dictionary. The key is two chars- the op char, and either 'l' or 'r' meaning rule for canceling out the left or right sides. For example, '+l', '+r', '-l', '-r'. * The value of each entry is a function with the following signature: (left, right, to_tree) -> (new_from_tree, new_to_tree) left- Expression on left side of the op. right- Expression on the right side of the op. to_tree- The tree on the other side of the equal sign. The canceled out expression will be moved here. new_from_tree- The resulting from_tree after the algebraic manipulation. new_to_tree- The resulting to_tree after the algebraic manipulation. Returns: The root of an ExprNode tree which holds the value of `var` after solving. Raises: ValueError: If `var` does not appear exactly once in the equation (which includes the left and right sides). """ is_in_left = is_in_expr(left, var) is_in_right = is_in_expr(right, var) if is_in_left == is_in_right: if is_in_left: raise ValueError("Solve-variable '%s' is on both sides of the equation. " "Only equations where the solve variable-appears once " "are supported by this solver. Left: '%s', right: '%s'" % (var, str(left), str(right))) else: raise ValueError("Solve-variable '%s' is not present in the equation. It " "must appear once. Left: '%s', right: '%s'" % (var, str(left), str(right))) from_tree = left if is_in_left else right to_tree = left if not is_in_left else right while from_tree != var: is_in_left = is_in_expr(from_tree.left, var) is_in_right = is_in_expr(from_tree.right, var) from_tree, to_tree = (solve_ops[str(from_tree.op) + ("l" if is_in_left else "r")]( from_tree.left, from_tree.right, to_tree)) return to_tree
python
def algebra_inverse_solve(left, right, var, solve_ops): """Solves for the value of the given var in an expression. Args: left: The root of the ExprNode tree on the left side of the equals sign. right: The root of the ExprNode tree on the right side of the equals sign. var: A char. The variable to solve for. solve_ops: A dictionary with the following properties. * For each operator in the expression, there is a rule that determines how to cancel out a value either to the left or the right of that operator. * For each rule, there is an entry in the dictionary. The key is two chars- the op char, and either 'l' or 'r' meaning rule for canceling out the left or right sides. For example, '+l', '+r', '-l', '-r'. * The value of each entry is a function with the following signature: (left, right, to_tree) -> (new_from_tree, new_to_tree) left- Expression on left side of the op. right- Expression on the right side of the op. to_tree- The tree on the other side of the equal sign. The canceled out expression will be moved here. new_from_tree- The resulting from_tree after the algebraic manipulation. new_to_tree- The resulting to_tree after the algebraic manipulation. Returns: The root of an ExprNode tree which holds the value of `var` after solving. Raises: ValueError: If `var` does not appear exactly once in the equation (which includes the left and right sides). """ is_in_left = is_in_expr(left, var) is_in_right = is_in_expr(right, var) if is_in_left == is_in_right: if is_in_left: raise ValueError("Solve-variable '%s' is on both sides of the equation. " "Only equations where the solve variable-appears once " "are supported by this solver. Left: '%s', right: '%s'" % (var, str(left), str(right))) else: raise ValueError("Solve-variable '%s' is not present in the equation. It " "must appear once. Left: '%s', right: '%s'" % (var, str(left), str(right))) from_tree = left if is_in_left else right to_tree = left if not is_in_left else right while from_tree != var: is_in_left = is_in_expr(from_tree.left, var) is_in_right = is_in_expr(from_tree.right, var) from_tree, to_tree = (solve_ops[str(from_tree.op) + ("l" if is_in_left else "r")]( from_tree.left, from_tree.right, to_tree)) return to_tree
[ "def", "algebra_inverse_solve", "(", "left", ",", "right", ",", "var", ",", "solve_ops", ")", ":", "is_in_left", "=", "is_in_expr", "(", "left", ",", "var", ")", "is_in_right", "=", "is_in_expr", "(", "right", ",", "var", ")", "if", "is_in_left", "==", "is_in_right", ":", "if", "is_in_left", ":", "raise", "ValueError", "(", "\"Solve-variable '%s' is on both sides of the equation. \"", "\"Only equations where the solve variable-appears once \"", "\"are supported by this solver. Left: '%s', right: '%s'\"", "%", "(", "var", ",", "str", "(", "left", ")", ",", "str", "(", "right", ")", ")", ")", "else", ":", "raise", "ValueError", "(", "\"Solve-variable '%s' is not present in the equation. It \"", "\"must appear once. Left: '%s', right: '%s'\"", "%", "(", "var", ",", "str", "(", "left", ")", ",", "str", "(", "right", ")", ")", ")", "from_tree", "=", "left", "if", "is_in_left", "else", "right", "to_tree", "=", "left", "if", "not", "is_in_left", "else", "right", "while", "from_tree", "!=", "var", ":", "is_in_left", "=", "is_in_expr", "(", "from_tree", ".", "left", ",", "var", ")", "is_in_right", "=", "is_in_expr", "(", "from_tree", ".", "right", ",", "var", ")", "from_tree", ",", "to_tree", "=", "(", "solve_ops", "[", "str", "(", "from_tree", ".", "op", ")", "+", "(", "\"l\"", "if", "is_in_left", "else", "\"r\"", ")", "]", "(", "from_tree", ".", "left", ",", "from_tree", ".", "right", ",", "to_tree", ")", ")", "return", "to_tree" ]
Solves for the value of the given var in an expression. Args: left: The root of the ExprNode tree on the left side of the equals sign. right: The root of the ExprNode tree on the right side of the equals sign. var: A char. The variable to solve for. solve_ops: A dictionary with the following properties. * For each operator in the expression, there is a rule that determines how to cancel out a value either to the left or the right of that operator. * For each rule, there is an entry in the dictionary. The key is two chars- the op char, and either 'l' or 'r' meaning rule for canceling out the left or right sides. For example, '+l', '+r', '-l', '-r'. * The value of each entry is a function with the following signature: (left, right, to_tree) -> (new_from_tree, new_to_tree) left- Expression on left side of the op. right- Expression on the right side of the op. to_tree- The tree on the other side of the equal sign. The canceled out expression will be moved here. new_from_tree- The resulting from_tree after the algebraic manipulation. new_to_tree- The resulting to_tree after the algebraic manipulation. Returns: The root of an ExprNode tree which holds the value of `var` after solving. Raises: ValueError: If `var` does not appear exactly once in the equation (which includes the left and right sides).
[ "Solves", "for", "the", "value", "of", "the", "given", "var", "in", "an", "expression", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/algorithmic_math.py#L158-L211
21,680
tensorflow/tensor2tensor
tensor2tensor/data_generators/algorithmic_math.py
format_sympy_expr
def format_sympy_expr(sympy_expr, functions=None): """Convert sympy expression into a string which can be encoded. Args: sympy_expr: Any sympy expression tree or string. functions: Defines special functions. A dict mapping human readable string names, like "log", "exp", "sin", "cos", etc., to single chars. Each function gets a unique token, like "L" for "log". Returns: A string representation of the expression suitable for encoding as a sequence input. """ if functions is None: functions = {} str_expr = str(sympy_expr) result = str_expr.replace(" ", "") for fn_name, char in six.iteritems(functions): result = result.replace(fn_name, char) return result
python
def format_sympy_expr(sympy_expr, functions=None): """Convert sympy expression into a string which can be encoded. Args: sympy_expr: Any sympy expression tree or string. functions: Defines special functions. A dict mapping human readable string names, like "log", "exp", "sin", "cos", etc., to single chars. Each function gets a unique token, like "L" for "log". Returns: A string representation of the expression suitable for encoding as a sequence input. """ if functions is None: functions = {} str_expr = str(sympy_expr) result = str_expr.replace(" ", "") for fn_name, char in six.iteritems(functions): result = result.replace(fn_name, char) return result
[ "def", "format_sympy_expr", "(", "sympy_expr", ",", "functions", "=", "None", ")", ":", "if", "functions", "is", "None", ":", "functions", "=", "{", "}", "str_expr", "=", "str", "(", "sympy_expr", ")", "result", "=", "str_expr", ".", "replace", "(", "\" \"", ",", "\"\"", ")", "for", "fn_name", ",", "char", "in", "six", ".", "iteritems", "(", "functions", ")", ":", "result", "=", "result", ".", "replace", "(", "fn_name", ",", "char", ")", "return", "result" ]
Convert sympy expression into a string which can be encoded. Args: sympy_expr: Any sympy expression tree or string. functions: Defines special functions. A dict mapping human readable string names, like "log", "exp", "sin", "cos", etc., to single chars. Each function gets a unique token, like "L" for "log". Returns: A string representation of the expression suitable for encoding as a sequence input.
[ "Convert", "sympy", "expression", "into", "a", "string", "which", "can", "be", "encoded", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/algorithmic_math.py#L214-L233
21,681
tensorflow/tensor2tensor
tensor2tensor/data_generators/algorithmic_math.py
generate_algebra_inverse_sample
def generate_algebra_inverse_sample(vlist, ops, solve_ops, min_depth, max_depth): """Randomly generate an algebra inverse dataset sample. Given an input equation and variable, produce the expression equal to the variable. Args: vlist: Variable list. List of chars that can be used in the expression. ops: List of ExprOp instances. The allowed operators for the expression. solve_ops: See `solve_ops` documentation in `algebra_inverse_solve`. min_depth: Expression trees will not have a smaller depth than this. 0 means there is just a variable. 1 means there is one operation. max_depth: Expression trees will not have a larger depth than this. To make all trees have the same depth, set this equal to `min_depth`. Returns: sample: String representation of the input. Will be of the form 'solve_var:left_side=right_side'. target: String representation of the solution. """ side = random.randrange(2) left_depth = random.randrange(min_depth if side else 0, max_depth + 1) right_depth = random.randrange(min_depth if not side else 0, max_depth + 1) var_index = random.randrange(len(vlist)) var = vlist[var_index] consts = vlist[:var_index] + vlist[var_index + 1:] left = random_expr_with_required_var(left_depth, var if side else None, consts, ops) right = random_expr_with_required_var(right_depth, var if not side else None, consts, ops) left_str = str(left) right_str = str(right) target = str(algebra_inverse_solve(left, right, var, solve_ops)) sample = "%s:%s=%s" % (var, left_str, right_str) return sample, target
python
def generate_algebra_inverse_sample(vlist, ops, solve_ops, min_depth, max_depth): """Randomly generate an algebra inverse dataset sample. Given an input equation and variable, produce the expression equal to the variable. Args: vlist: Variable list. List of chars that can be used in the expression. ops: List of ExprOp instances. The allowed operators for the expression. solve_ops: See `solve_ops` documentation in `algebra_inverse_solve`. min_depth: Expression trees will not have a smaller depth than this. 0 means there is just a variable. 1 means there is one operation. max_depth: Expression trees will not have a larger depth than this. To make all trees have the same depth, set this equal to `min_depth`. Returns: sample: String representation of the input. Will be of the form 'solve_var:left_side=right_side'. target: String representation of the solution. """ side = random.randrange(2) left_depth = random.randrange(min_depth if side else 0, max_depth + 1) right_depth = random.randrange(min_depth if not side else 0, max_depth + 1) var_index = random.randrange(len(vlist)) var = vlist[var_index] consts = vlist[:var_index] + vlist[var_index + 1:] left = random_expr_with_required_var(left_depth, var if side else None, consts, ops) right = random_expr_with_required_var(right_depth, var if not side else None, consts, ops) left_str = str(left) right_str = str(right) target = str(algebra_inverse_solve(left, right, var, solve_ops)) sample = "%s:%s=%s" % (var, left_str, right_str) return sample, target
[ "def", "generate_algebra_inverse_sample", "(", "vlist", ",", "ops", ",", "solve_ops", ",", "min_depth", ",", "max_depth", ")", ":", "side", "=", "random", ".", "randrange", "(", "2", ")", "left_depth", "=", "random", ".", "randrange", "(", "min_depth", "if", "side", "else", "0", ",", "max_depth", "+", "1", ")", "right_depth", "=", "random", ".", "randrange", "(", "min_depth", "if", "not", "side", "else", "0", ",", "max_depth", "+", "1", ")", "var_index", "=", "random", ".", "randrange", "(", "len", "(", "vlist", ")", ")", "var", "=", "vlist", "[", "var_index", "]", "consts", "=", "vlist", "[", ":", "var_index", "]", "+", "vlist", "[", "var_index", "+", "1", ":", "]", "left", "=", "random_expr_with_required_var", "(", "left_depth", ",", "var", "if", "side", "else", "None", ",", "consts", ",", "ops", ")", "right", "=", "random_expr_with_required_var", "(", "right_depth", ",", "var", "if", "not", "side", "else", "None", ",", "consts", ",", "ops", ")", "left_str", "=", "str", "(", "left", ")", "right_str", "=", "str", "(", "right", ")", "target", "=", "str", "(", "algebra_inverse_solve", "(", "left", ",", "right", ",", "var", ",", "solve_ops", ")", ")", "sample", "=", "\"%s:%s=%s\"", "%", "(", "var", ",", "left_str", ",", "right_str", ")", "return", "sample", ",", "target" ]
Randomly generate an algebra inverse dataset sample. Given an input equation and variable, produce the expression equal to the variable. Args: vlist: Variable list. List of chars that can be used in the expression. ops: List of ExprOp instances. The allowed operators for the expression. solve_ops: See `solve_ops` documentation in `algebra_inverse_solve`. min_depth: Expression trees will not have a smaller depth than this. 0 means there is just a variable. 1 means there is one operation. max_depth: Expression trees will not have a larger depth than this. To make all trees have the same depth, set this equal to `min_depth`. Returns: sample: String representation of the input. Will be of the form 'solve_var:left_side=right_side'. target: String representation of the solution.
[ "Randomly", "generate", "an", "algebra", "inverse", "dataset", "sample", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/algorithmic_math.py#L236-L274
21,682
tensorflow/tensor2tensor
tensor2tensor/data_generators/algorithmic_math.py
generate_algebra_simplify_sample
def generate_algebra_simplify_sample(vlist, ops, min_depth, max_depth): """Randomly generate an algebra simplify dataset sample. Given an input expression, produce the simplified expression. Args: vlist: Variable list. List of chars that can be used in the expression. ops: List of ExprOp instances. The allowed operators for the expression. min_depth: Expression trees will not have a smaller depth than this. 0 means there is just a variable. 1 means there is one operation. max_depth: Expression trees will not have a larger depth than this. To make all trees have the same depth, set this equal to `min_depth`. Returns: sample: String representation of the input. target: String representation of the solution. """ depth = random.randrange(min_depth, max_depth + 1) expr = random_expr(depth, vlist, ops) sample = str(expr) target = format_sympy_expr(sympy.simplify(sample)) return sample, target
python
def generate_algebra_simplify_sample(vlist, ops, min_depth, max_depth): """Randomly generate an algebra simplify dataset sample. Given an input expression, produce the simplified expression. Args: vlist: Variable list. List of chars that can be used in the expression. ops: List of ExprOp instances. The allowed operators for the expression. min_depth: Expression trees will not have a smaller depth than this. 0 means there is just a variable. 1 means there is one operation. max_depth: Expression trees will not have a larger depth than this. To make all trees have the same depth, set this equal to `min_depth`. Returns: sample: String representation of the input. target: String representation of the solution. """ depth = random.randrange(min_depth, max_depth + 1) expr = random_expr(depth, vlist, ops) sample = str(expr) target = format_sympy_expr(sympy.simplify(sample)) return sample, target
[ "def", "generate_algebra_simplify_sample", "(", "vlist", ",", "ops", ",", "min_depth", ",", "max_depth", ")", ":", "depth", "=", "random", ".", "randrange", "(", "min_depth", ",", "max_depth", "+", "1", ")", "expr", "=", "random_expr", "(", "depth", ",", "vlist", ",", "ops", ")", "sample", "=", "str", "(", "expr", ")", "target", "=", "format_sympy_expr", "(", "sympy", ".", "simplify", "(", "sample", ")", ")", "return", "sample", ",", "target" ]
Randomly generate an algebra simplify dataset sample. Given an input expression, produce the simplified expression. Args: vlist: Variable list. List of chars that can be used in the expression. ops: List of ExprOp instances. The allowed operators for the expression. min_depth: Expression trees will not have a smaller depth than this. 0 means there is just a variable. 1 means there is one operation. max_depth: Expression trees will not have a larger depth than this. To make all trees have the same depth, set this equal to `min_depth`. Returns: sample: String representation of the input. target: String representation of the solution.
[ "Randomly", "generate", "an", "algebra", "simplify", "dataset", "sample", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/algorithmic_math.py#L277-L299
21,683
tensorflow/tensor2tensor
tensor2tensor/data_generators/algorithmic_math.py
generate_calculus_integrate_sample
def generate_calculus_integrate_sample(vlist, ops, min_depth, max_depth, functions): """Randomly generate a symbolic integral dataset sample. Given an input expression, produce the indefinite integral. Args: vlist: Variable list. List of chars that can be used in the expression. ops: List of ExprOp instances. The allowed operators for the expression. min_depth: Expression trees will not have a smaller depth than this. 0 means there is just a variable. 1 means there is one operation. max_depth: Expression trees will not have a larger depth than this. To make all trees have the same depth, set this equal to `min_depth`. functions: Defines special functions. A dict mapping human readable string names, like "log", "exp", "sin", "cos", etc., to single chars. Each function gets a unique token, like "L" for "log". Returns: sample: String representation of the input. Will be of the form 'var:expression'. target: String representation of the solution. """ var_index = random.randrange(len(vlist)) var = vlist[var_index] consts = vlist[:var_index] + vlist[var_index + 1:] depth = random.randrange(min_depth, max_depth + 1) expr = random_expr_with_required_var(depth, var, consts, ops) expr_str = str(expr) sample = var + ":" + expr_str target = format_sympy_expr( sympy.integrate(expr_str, sympy.Symbol(var)), functions=functions) return sample, target
python
def generate_calculus_integrate_sample(vlist, ops, min_depth, max_depth, functions): """Randomly generate a symbolic integral dataset sample. Given an input expression, produce the indefinite integral. Args: vlist: Variable list. List of chars that can be used in the expression. ops: List of ExprOp instances. The allowed operators for the expression. min_depth: Expression trees will not have a smaller depth than this. 0 means there is just a variable. 1 means there is one operation. max_depth: Expression trees will not have a larger depth than this. To make all trees have the same depth, set this equal to `min_depth`. functions: Defines special functions. A dict mapping human readable string names, like "log", "exp", "sin", "cos", etc., to single chars. Each function gets a unique token, like "L" for "log". Returns: sample: String representation of the input. Will be of the form 'var:expression'. target: String representation of the solution. """ var_index = random.randrange(len(vlist)) var = vlist[var_index] consts = vlist[:var_index] + vlist[var_index + 1:] depth = random.randrange(min_depth, max_depth + 1) expr = random_expr_with_required_var(depth, var, consts, ops) expr_str = str(expr) sample = var + ":" + expr_str target = format_sympy_expr( sympy.integrate(expr_str, sympy.Symbol(var)), functions=functions) return sample, target
[ "def", "generate_calculus_integrate_sample", "(", "vlist", ",", "ops", ",", "min_depth", ",", "max_depth", ",", "functions", ")", ":", "var_index", "=", "random", ".", "randrange", "(", "len", "(", "vlist", ")", ")", "var", "=", "vlist", "[", "var_index", "]", "consts", "=", "vlist", "[", ":", "var_index", "]", "+", "vlist", "[", "var_index", "+", "1", ":", "]", "depth", "=", "random", ".", "randrange", "(", "min_depth", ",", "max_depth", "+", "1", ")", "expr", "=", "random_expr_with_required_var", "(", "depth", ",", "var", ",", "consts", ",", "ops", ")", "expr_str", "=", "str", "(", "expr", ")", "sample", "=", "var", "+", "\":\"", "+", "expr_str", "target", "=", "format_sympy_expr", "(", "sympy", ".", "integrate", "(", "expr_str", ",", "sympy", ".", "Symbol", "(", "var", ")", ")", ",", "functions", "=", "functions", ")", "return", "sample", ",", "target" ]
Randomly generate a symbolic integral dataset sample. Given an input expression, produce the indefinite integral. Args: vlist: Variable list. List of chars that can be used in the expression. ops: List of ExprOp instances. The allowed operators for the expression. min_depth: Expression trees will not have a smaller depth than this. 0 means there is just a variable. 1 means there is one operation. max_depth: Expression trees will not have a larger depth than this. To make all trees have the same depth, set this equal to `min_depth`. functions: Defines special functions. A dict mapping human readable string names, like "log", "exp", "sin", "cos", etc., to single chars. Each function gets a unique token, like "L" for "log". Returns: sample: String representation of the input. Will be of the form 'var:expression'. target: String representation of the solution.
[ "Randomly", "generate", "a", "symbolic", "integral", "dataset", "sample", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/algorithmic_math.py#L302-L335
21,684
tensorflow/tensor2tensor
tensor2tensor/data_generators/algorithmic_math.py
algebra_inverse
def algebra_inverse(alphabet_size=26, min_depth=0, max_depth=2, nbr_cases=10000): """Generate the algebra inverse dataset. Each sample is a symbolic math equation involving unknown variables. The task is to solve for the given variable. The target is the resulting expression. Args: alphabet_size: How many possible variables there are. Max 52. min_depth: Minimum depth of the expression trees on both sides of the equals sign in the equation. max_depth: Maximum depth of the expression trees on both sides of the equals sign in the equation. nbr_cases: The number of cases to generate. Yields: A dictionary {"inputs": input-list, "targets": target-list} where input-list are the tokens encoding the variable to solve for and the math equation, and target-list is a list of tokens encoding the resulting math expression after solving for the variable. Raises: ValueError: If `max_depth` < `min_depth`. """ if max_depth < min_depth: raise ValueError("max_depth must be greater than or equal to min_depth. " "Got max_depth=%s, min_depth=%s" % (max_depth, min_depth)) alg_cfg = math_dataset_init(alphabet_size) for _ in range(nbr_cases): sample, target = generate_algebra_inverse_sample( alg_cfg.vlist, list(alg_cfg.ops.values()), alg_cfg.solve_ops, min_depth, max_depth) yield { "inputs": alg_cfg.int_encoder(sample), "targets": alg_cfg.int_encoder(target) }
python
def algebra_inverse(alphabet_size=26, min_depth=0, max_depth=2, nbr_cases=10000): """Generate the algebra inverse dataset. Each sample is a symbolic math equation involving unknown variables. The task is to solve for the given variable. The target is the resulting expression. Args: alphabet_size: How many possible variables there are. Max 52. min_depth: Minimum depth of the expression trees on both sides of the equals sign in the equation. max_depth: Maximum depth of the expression trees on both sides of the equals sign in the equation. nbr_cases: The number of cases to generate. Yields: A dictionary {"inputs": input-list, "targets": target-list} where input-list are the tokens encoding the variable to solve for and the math equation, and target-list is a list of tokens encoding the resulting math expression after solving for the variable. Raises: ValueError: If `max_depth` < `min_depth`. """ if max_depth < min_depth: raise ValueError("max_depth must be greater than or equal to min_depth. " "Got max_depth=%s, min_depth=%s" % (max_depth, min_depth)) alg_cfg = math_dataset_init(alphabet_size) for _ in range(nbr_cases): sample, target = generate_algebra_inverse_sample( alg_cfg.vlist, list(alg_cfg.ops.values()), alg_cfg.solve_ops, min_depth, max_depth) yield { "inputs": alg_cfg.int_encoder(sample), "targets": alg_cfg.int_encoder(target) }
[ "def", "algebra_inverse", "(", "alphabet_size", "=", "26", ",", "min_depth", "=", "0", ",", "max_depth", "=", "2", ",", "nbr_cases", "=", "10000", ")", ":", "if", "max_depth", "<", "min_depth", ":", "raise", "ValueError", "(", "\"max_depth must be greater than or equal to min_depth. \"", "\"Got max_depth=%s, min_depth=%s\"", "%", "(", "max_depth", ",", "min_depth", ")", ")", "alg_cfg", "=", "math_dataset_init", "(", "alphabet_size", ")", "for", "_", "in", "range", "(", "nbr_cases", ")", ":", "sample", ",", "target", "=", "generate_algebra_inverse_sample", "(", "alg_cfg", ".", "vlist", ",", "list", "(", "alg_cfg", ".", "ops", ".", "values", "(", ")", ")", ",", "alg_cfg", ".", "solve_ops", ",", "min_depth", ",", "max_depth", ")", "yield", "{", "\"inputs\"", ":", "alg_cfg", ".", "int_encoder", "(", "sample", ")", ",", "\"targets\"", ":", "alg_cfg", ".", "int_encoder", "(", "target", ")", "}" ]
Generate the algebra inverse dataset. Each sample is a symbolic math equation involving unknown variables. The task is to solve for the given variable. The target is the resulting expression. Args: alphabet_size: How many possible variables there are. Max 52. min_depth: Minimum depth of the expression trees on both sides of the equals sign in the equation. max_depth: Maximum depth of the expression trees on both sides of the equals sign in the equation. nbr_cases: The number of cases to generate. Yields: A dictionary {"inputs": input-list, "targets": target-list} where input-list are the tokens encoding the variable to solve for and the math equation, and target-list is a list of tokens encoding the resulting math expression after solving for the variable. Raises: ValueError: If `max_depth` < `min_depth`.
[ "Generate", "the", "algebra", "inverse", "dataset", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/algorithmic_math.py#L439-L477
21,685
tensorflow/tensor2tensor
tensor2tensor/data_generators/algorithmic_math.py
algebra_simplify
def algebra_simplify(alphabet_size=26, min_depth=0, max_depth=2, nbr_cases=10000): """Generate the algebra simplify dataset. Each sample is a symbolic math expression involving unknown variables. The task is to simplify the expression. The target is the resulting expression. Args: alphabet_size: How many possible variables there are. Max 52. min_depth: Minimum depth of the expression trees on both sides of the equals sign in the equation. max_depth: Maximum depth of the expression trees on both sides of the equals sign in the equation. nbr_cases: The number of cases to generate. Yields: A dictionary {"inputs": input-list, "targets": target-list} where input-list are the tokens encoding the expression to simplify, and target-list is a list of tokens encoding the resulting math expression after simplifying. Raises: ValueError: If `max_depth` < `min_depth`. """ if max_depth < min_depth: raise ValueError("max_depth must be greater than or equal to min_depth. " "Got max_depth=%s, min_depth=%s" % (max_depth, min_depth)) alg_cfg = math_dataset_init(alphabet_size, digits=5) for _ in range(nbr_cases): sample, target = generate_algebra_simplify_sample( alg_cfg.vlist, list(alg_cfg.ops.values()), min_depth, max_depth) yield { "inputs": alg_cfg.int_encoder(sample), "targets": alg_cfg.int_encoder(target) }
python
def algebra_simplify(alphabet_size=26, min_depth=0, max_depth=2, nbr_cases=10000): """Generate the algebra simplify dataset. Each sample is a symbolic math expression involving unknown variables. The task is to simplify the expression. The target is the resulting expression. Args: alphabet_size: How many possible variables there are. Max 52. min_depth: Minimum depth of the expression trees on both sides of the equals sign in the equation. max_depth: Maximum depth of the expression trees on both sides of the equals sign in the equation. nbr_cases: The number of cases to generate. Yields: A dictionary {"inputs": input-list, "targets": target-list} where input-list are the tokens encoding the expression to simplify, and target-list is a list of tokens encoding the resulting math expression after simplifying. Raises: ValueError: If `max_depth` < `min_depth`. """ if max_depth < min_depth: raise ValueError("max_depth must be greater than or equal to min_depth. " "Got max_depth=%s, min_depth=%s" % (max_depth, min_depth)) alg_cfg = math_dataset_init(alphabet_size, digits=5) for _ in range(nbr_cases): sample, target = generate_algebra_simplify_sample( alg_cfg.vlist, list(alg_cfg.ops.values()), min_depth, max_depth) yield { "inputs": alg_cfg.int_encoder(sample), "targets": alg_cfg.int_encoder(target) }
[ "def", "algebra_simplify", "(", "alphabet_size", "=", "26", ",", "min_depth", "=", "0", ",", "max_depth", "=", "2", ",", "nbr_cases", "=", "10000", ")", ":", "if", "max_depth", "<", "min_depth", ":", "raise", "ValueError", "(", "\"max_depth must be greater than or equal to min_depth. \"", "\"Got max_depth=%s, min_depth=%s\"", "%", "(", "max_depth", ",", "min_depth", ")", ")", "alg_cfg", "=", "math_dataset_init", "(", "alphabet_size", ",", "digits", "=", "5", ")", "for", "_", "in", "range", "(", "nbr_cases", ")", ":", "sample", ",", "target", "=", "generate_algebra_simplify_sample", "(", "alg_cfg", ".", "vlist", ",", "list", "(", "alg_cfg", ".", "ops", ".", "values", "(", ")", ")", ",", "min_depth", ",", "max_depth", ")", "yield", "{", "\"inputs\"", ":", "alg_cfg", ".", "int_encoder", "(", "sample", ")", ",", "\"targets\"", ":", "alg_cfg", ".", "int_encoder", "(", "target", ")", "}" ]
Generate the algebra simplify dataset. Each sample is a symbolic math expression involving unknown variables. The task is to simplify the expression. The target is the resulting expression. Args: alphabet_size: How many possible variables there are. Max 52. min_depth: Minimum depth of the expression trees on both sides of the equals sign in the equation. max_depth: Maximum depth of the expression trees on both sides of the equals sign in the equation. nbr_cases: The number of cases to generate. Yields: A dictionary {"inputs": input-list, "targets": target-list} where input-list are the tokens encoding the expression to simplify, and target-list is a list of tokens encoding the resulting math expression after simplifying. Raises: ValueError: If `max_depth` < `min_depth`.
[ "Generate", "the", "algebra", "simplify", "dataset", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/algorithmic_math.py#L480-L517
21,686
tensorflow/tensor2tensor
tensor2tensor/data_generators/algorithmic_math.py
calculus_integrate
def calculus_integrate(alphabet_size=26, min_depth=0, max_depth=2, nbr_cases=10000): """Generate the calculus integrate dataset. Each sample is a symbolic math expression involving unknown variables. The task is to take the indefinite integral of the expression. The target is the resulting expression. Args: alphabet_size: How many possible variables there are. Max 26. min_depth: Minimum depth of the expression trees on both sides of the equals sign in the equation. max_depth: Maximum depth of the expression trees on both sides of the equals sign in the equation. nbr_cases: The number of cases to generate. Yields: A dictionary {"inputs": input-list, "targets": target-list} where input-list are the tokens encoding the variable to integrate with respect to and the expression to integrate, and target-list is a list of tokens encoding the resulting math expression after integrating. Raises: ValueError: If `max_depth` < `min_depth`, or if alphabet_size > 26. """ if max_depth < min_depth: raise ValueError("max_depth must be greater than or equal to min_depth. " "Got max_depth=%s, min_depth=%s" % (max_depth, min_depth)) # Don't allow alphabet to use capital letters. Those are reserved for function # names. if alphabet_size > 26: raise ValueError( "alphabet_size must not be greater than 26. Got %s." % alphabet_size) functions = {"log": "L"} alg_cfg = math_dataset_init(alphabet_size, digits=5, functions=functions) nbr_case = 0 while nbr_case < nbr_cases: try: sample, target = generate_calculus_integrate_sample( alg_cfg.vlist, list(alg_cfg.ops.values()), min_depth, max_depth, alg_cfg.functions) yield { "inputs": alg_cfg.int_encoder(sample), "targets": alg_cfg.int_encoder(target) } except: # pylint:disable=bare-except continue if nbr_case % 10000 == 0: print(" calculus_integrate: generating case %d." % nbr_case) nbr_case += 1
python
def calculus_integrate(alphabet_size=26, min_depth=0, max_depth=2, nbr_cases=10000): """Generate the calculus integrate dataset. Each sample is a symbolic math expression involving unknown variables. The task is to take the indefinite integral of the expression. The target is the resulting expression. Args: alphabet_size: How many possible variables there are. Max 26. min_depth: Minimum depth of the expression trees on both sides of the equals sign in the equation. max_depth: Maximum depth of the expression trees on both sides of the equals sign in the equation. nbr_cases: The number of cases to generate. Yields: A dictionary {"inputs": input-list, "targets": target-list} where input-list are the tokens encoding the variable to integrate with respect to and the expression to integrate, and target-list is a list of tokens encoding the resulting math expression after integrating. Raises: ValueError: If `max_depth` < `min_depth`, or if alphabet_size > 26. """ if max_depth < min_depth: raise ValueError("max_depth must be greater than or equal to min_depth. " "Got max_depth=%s, min_depth=%s" % (max_depth, min_depth)) # Don't allow alphabet to use capital letters. Those are reserved for function # names. if alphabet_size > 26: raise ValueError( "alphabet_size must not be greater than 26. Got %s." % alphabet_size) functions = {"log": "L"} alg_cfg = math_dataset_init(alphabet_size, digits=5, functions=functions) nbr_case = 0 while nbr_case < nbr_cases: try: sample, target = generate_calculus_integrate_sample( alg_cfg.vlist, list(alg_cfg.ops.values()), min_depth, max_depth, alg_cfg.functions) yield { "inputs": alg_cfg.int_encoder(sample), "targets": alg_cfg.int_encoder(target) } except: # pylint:disable=bare-except continue if nbr_case % 10000 == 0: print(" calculus_integrate: generating case %d." % nbr_case) nbr_case += 1
[ "def", "calculus_integrate", "(", "alphabet_size", "=", "26", ",", "min_depth", "=", "0", ",", "max_depth", "=", "2", ",", "nbr_cases", "=", "10000", ")", ":", "if", "max_depth", "<", "min_depth", ":", "raise", "ValueError", "(", "\"max_depth must be greater than or equal to min_depth. \"", "\"Got max_depth=%s, min_depth=%s\"", "%", "(", "max_depth", ",", "min_depth", ")", ")", "# Don't allow alphabet to use capital letters. Those are reserved for function", "# names.", "if", "alphabet_size", ">", "26", ":", "raise", "ValueError", "(", "\"alphabet_size must not be greater than 26. Got %s.\"", "%", "alphabet_size", ")", "functions", "=", "{", "\"log\"", ":", "\"L\"", "}", "alg_cfg", "=", "math_dataset_init", "(", "alphabet_size", ",", "digits", "=", "5", ",", "functions", "=", "functions", ")", "nbr_case", "=", "0", "while", "nbr_case", "<", "nbr_cases", ":", "try", ":", "sample", ",", "target", "=", "generate_calculus_integrate_sample", "(", "alg_cfg", ".", "vlist", ",", "list", "(", "alg_cfg", ".", "ops", ".", "values", "(", ")", ")", ",", "min_depth", ",", "max_depth", ",", "alg_cfg", ".", "functions", ")", "yield", "{", "\"inputs\"", ":", "alg_cfg", ".", "int_encoder", "(", "sample", ")", ",", "\"targets\"", ":", "alg_cfg", ".", "int_encoder", "(", "target", ")", "}", "except", ":", "# pylint:disable=bare-except", "continue", "if", "nbr_case", "%", "10000", "==", "0", ":", "print", "(", "\" calculus_integrate: generating case %d.\"", "%", "nbr_case", ")", "nbr_case", "+=", "1" ]
Generate the calculus integrate dataset. Each sample is a symbolic math expression involving unknown variables. The task is to take the indefinite integral of the expression. The target is the resulting expression. Args: alphabet_size: How many possible variables there are. Max 26. min_depth: Minimum depth of the expression trees on both sides of the equals sign in the equation. max_depth: Maximum depth of the expression trees on both sides of the equals sign in the equation. nbr_cases: The number of cases to generate. Yields: A dictionary {"inputs": input-list, "targets": target-list} where input-list are the tokens encoding the variable to integrate with respect to and the expression to integrate, and target-list is a list of tokens encoding the resulting math expression after integrating. Raises: ValueError: If `max_depth` < `min_depth`, or if alphabet_size > 26.
[ "Generate", "the", "calculus", "integrate", "dataset", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/algorithmic_math.py#L520-L573
21,687
tensorflow/tensor2tensor
tensor2tensor/data_generators/algorithmic_math.py
ExprNode.is_in
def is_in(self, expr): """Returns True if `expr` is a subtree.""" if expr == self: return True is_in_left = is_in_expr(self.left, expr) is_in_right = is_in_expr(self.right, expr) return is_in_left or is_in_right
python
def is_in(self, expr): """Returns True if `expr` is a subtree.""" if expr == self: return True is_in_left = is_in_expr(self.left, expr) is_in_right = is_in_expr(self.right, expr) return is_in_left or is_in_right
[ "def", "is_in", "(", "self", ",", "expr", ")", ":", "if", "expr", "==", "self", ":", "return", "True", "is_in_left", "=", "is_in_expr", "(", "self", ".", "left", ",", "expr", ")", "is_in_right", "=", "is_in_expr", "(", "self", ".", "right", ",", "expr", ")", "return", "is_in_left", "or", "is_in_right" ]
Returns True if `expr` is a subtree.
[ "Returns", "True", "if", "expr", "is", "a", "subtree", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/algorithmic_math.py#L81-L87
21,688
tensorflow/tensor2tensor
tensor2tensor/data_generators/problem.py
preprocess_example_common
def preprocess_example_common(example, mode, hparams): """Preprocessing steps common to all models.""" if "inputs" in example and hparams.max_input_seq_length > 0: example["inputs"] = example["inputs"][:hparams.max_input_seq_length] if hparams.prepend_mode != "none": if mode == tf.estimator.ModeKeys.PREDICT: example["partial_targets"] = tf.concat([example["inputs"], [0]], 0) else: example["targets"] = tf.concat( [example["inputs"], [0], example["targets"]], 0) if "targets" in example and hparams.max_target_seq_length > 0: example["targets"] = example["targets"][:hparams.max_target_seq_length] if hparams.split_to_length: new_example = {} for k, v in six.iteritems(example): if k == "targets" or k == "inputs": new_example[k] = tf.reshape(v, [-1, hparams.split_to_length, 1, 1]) else: tf.logging.warning("Dropping feature %s" % k) return tf.data.Dataset.from_tensor_slices(new_example) return example
python
def preprocess_example_common(example, mode, hparams): """Preprocessing steps common to all models.""" if "inputs" in example and hparams.max_input_seq_length > 0: example["inputs"] = example["inputs"][:hparams.max_input_seq_length] if hparams.prepend_mode != "none": if mode == tf.estimator.ModeKeys.PREDICT: example["partial_targets"] = tf.concat([example["inputs"], [0]], 0) else: example["targets"] = tf.concat( [example["inputs"], [0], example["targets"]], 0) if "targets" in example and hparams.max_target_seq_length > 0: example["targets"] = example["targets"][:hparams.max_target_seq_length] if hparams.split_to_length: new_example = {} for k, v in six.iteritems(example): if k == "targets" or k == "inputs": new_example[k] = tf.reshape(v, [-1, hparams.split_to_length, 1, 1]) else: tf.logging.warning("Dropping feature %s" % k) return tf.data.Dataset.from_tensor_slices(new_example) return example
[ "def", "preprocess_example_common", "(", "example", ",", "mode", ",", "hparams", ")", ":", "if", "\"inputs\"", "in", "example", "and", "hparams", ".", "max_input_seq_length", ">", "0", ":", "example", "[", "\"inputs\"", "]", "=", "example", "[", "\"inputs\"", "]", "[", ":", "hparams", ".", "max_input_seq_length", "]", "if", "hparams", ".", "prepend_mode", "!=", "\"none\"", ":", "if", "mode", "==", "tf", ".", "estimator", ".", "ModeKeys", ".", "PREDICT", ":", "example", "[", "\"partial_targets\"", "]", "=", "tf", ".", "concat", "(", "[", "example", "[", "\"inputs\"", "]", ",", "[", "0", "]", "]", ",", "0", ")", "else", ":", "example", "[", "\"targets\"", "]", "=", "tf", ".", "concat", "(", "[", "example", "[", "\"inputs\"", "]", ",", "[", "0", "]", ",", "example", "[", "\"targets\"", "]", "]", ",", "0", ")", "if", "\"targets\"", "in", "example", "and", "hparams", ".", "max_target_seq_length", ">", "0", ":", "example", "[", "\"targets\"", "]", "=", "example", "[", "\"targets\"", "]", "[", ":", "hparams", ".", "max_target_seq_length", "]", "if", "hparams", ".", "split_to_length", ":", "new_example", "=", "{", "}", "for", "k", ",", "v", "in", "six", ".", "iteritems", "(", "example", ")", ":", "if", "k", "==", "\"targets\"", "or", "k", "==", "\"inputs\"", ":", "new_example", "[", "k", "]", "=", "tf", ".", "reshape", "(", "v", ",", "[", "-", "1", ",", "hparams", ".", "split_to_length", ",", "1", ",", "1", "]", ")", "else", ":", "tf", ".", "logging", ".", "warning", "(", "\"Dropping feature %s\"", "%", "k", ")", "return", "tf", ".", "data", ".", "Dataset", ".", "from_tensor_slices", "(", "new_example", ")", "return", "example" ]
Preprocessing steps common to all models.
[ "Preprocessing", "steps", "common", "to", "all", "models", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/problem.py#L142-L162
21,689
tensorflow/tensor2tensor
tensor2tensor/data_generators/problem.py
_copy_problem_hparams
def _copy_problem_hparams(p_hparams): """Use input modality, vocab, and space id for target.""" p = p_hparams # Duplicate input modality. p.modality["targets"] = p.modality["inputs"] # Duplicate input vocab size. p.vocab_size["targets"] = p.vocab_size["inputs"] # Duplicate input vocabulary. p.vocabulary["targets"] = p.vocabulary["inputs"] # Duplicate input space ids. p.target_space_id = p.input_space_id # Mark that p was reversed. p.was_copy = True
python
def _copy_problem_hparams(p_hparams): """Use input modality, vocab, and space id for target.""" p = p_hparams # Duplicate input modality. p.modality["targets"] = p.modality["inputs"] # Duplicate input vocab size. p.vocab_size["targets"] = p.vocab_size["inputs"] # Duplicate input vocabulary. p.vocabulary["targets"] = p.vocabulary["inputs"] # Duplicate input space ids. p.target_space_id = p.input_space_id # Mark that p was reversed. p.was_copy = True
[ "def", "_copy_problem_hparams", "(", "p_hparams", ")", ":", "p", "=", "p_hparams", "# Duplicate input modality.", "p", ".", "modality", "[", "\"targets\"", "]", "=", "p", ".", "modality", "[", "\"inputs\"", "]", "# Duplicate input vocab size.", "p", ".", "vocab_size", "[", "\"targets\"", "]", "=", "p", ".", "vocab_size", "[", "\"inputs\"", "]", "# Duplicate input vocabulary.", "p", ".", "vocabulary", "[", "\"targets\"", "]", "=", "p", ".", "vocabulary", "[", "\"inputs\"", "]", "# Duplicate input space ids.", "p", ".", "target_space_id", "=", "p", ".", "input_space_id", "# Mark that p was reversed.", "p", ".", "was_copy", "=", "True" ]
Use input modality, vocab, and space id for target.
[ "Use", "input", "modality", "vocab", "and", "space", "id", "for", "target", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/problem.py#L947-L959
21,690
tensorflow/tensor2tensor
tensor2tensor/data_generators/problem.py
_default_hparams
def _default_hparams(): """A set of basic model hyperparameters.""" return hparam.HParams( # Use this parameter to get comparable perplexity numbers with different # tokenizations. This value should be set to the ratio of the number of # tokens in the test set according to the tokenization used to the number # of tokens in the test set in the "official" tokenization. For # example, if we are using a word-piece based model and we want to # compute per-word perplexity, then we set loss_multiplier to the number # of wordpieces per word in the test set. loss_multiplier=1.0, # Use this parameter to allow for larger sequences in the batch. Without # the use of this parameter, the size of the inner two dimensions will # be used to judge the sequence length. batch_size_multiplier=1, # During inference for autoregressive problems, if the batch_size is 1, # the inference will stop when the model predict a text_encoder.EOS_ID # token. stop_at_eos=False, # Modalities used to map from features to a space compatible with # chosen model architecture. It comprises key-value pairs of a feature # name (str) and its modality type. modality={}, vocab_size={}, # Identifiers used to tell the model which input/target space will be # expected. For example, it can tell that we expect French as characters # as output, or Spanish as sound. Spaces defined as constants in SpaceID # class. input_space_id=SpaceID.GENERIC, target_space_id=SpaceID.GENERIC)
python
def _default_hparams(): """A set of basic model hyperparameters.""" return hparam.HParams( # Use this parameter to get comparable perplexity numbers with different # tokenizations. This value should be set to the ratio of the number of # tokens in the test set according to the tokenization used to the number # of tokens in the test set in the "official" tokenization. For # example, if we are using a word-piece based model and we want to # compute per-word perplexity, then we set loss_multiplier to the number # of wordpieces per word in the test set. loss_multiplier=1.0, # Use this parameter to allow for larger sequences in the batch. Without # the use of this parameter, the size of the inner two dimensions will # be used to judge the sequence length. batch_size_multiplier=1, # During inference for autoregressive problems, if the batch_size is 1, # the inference will stop when the model predict a text_encoder.EOS_ID # token. stop_at_eos=False, # Modalities used to map from features to a space compatible with # chosen model architecture. It comprises key-value pairs of a feature # name (str) and its modality type. modality={}, vocab_size={}, # Identifiers used to tell the model which input/target space will be # expected. For example, it can tell that we expect French as characters # as output, or Spanish as sound. Spaces defined as constants in SpaceID # class. input_space_id=SpaceID.GENERIC, target_space_id=SpaceID.GENERIC)
[ "def", "_default_hparams", "(", ")", ":", "return", "hparam", ".", "HParams", "(", "# Use this parameter to get comparable perplexity numbers with different", "# tokenizations. This value should be set to the ratio of the number of", "# tokens in the test set according to the tokenization used to the number", "# of tokens in the test set in the \"official\" tokenization. For", "# example, if we are using a word-piece based model and we want to", "# compute per-word perplexity, then we set loss_multiplier to the number", "# of wordpieces per word in the test set.", "loss_multiplier", "=", "1.0", ",", "# Use this parameter to allow for larger sequences in the batch. Without", "# the use of this parameter, the size of the inner two dimensions will", "# be used to judge the sequence length.", "batch_size_multiplier", "=", "1", ",", "# During inference for autoregressive problems, if the batch_size is 1,", "# the inference will stop when the model predict a text_encoder.EOS_ID", "# token.", "stop_at_eos", "=", "False", ",", "# Modalities used to map from features to a space compatible with", "# chosen model architecture. It comprises key-value pairs of a feature", "# name (str) and its modality type.", "modality", "=", "{", "}", ",", "vocab_size", "=", "{", "}", ",", "# Identifiers used to tell the model which input/target space will be", "# expected. For example, it can tell that we expect French as characters", "# as output, or Spanish as sound. Spaces defined as constants in SpaceID", "# class.", "input_space_id", "=", "SpaceID", ".", "GENERIC", ",", "target_space_id", "=", "SpaceID", ".", "GENERIC", ")" ]
A set of basic model hyperparameters.
[ "A", "set", "of", "basic", "model", "hyperparameters", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/problem.py#L1017-L1050
21,691
tensorflow/tensor2tensor
tensor2tensor/data_generators/problem.py
Problem.tpu_batch_size_per_shard
def tpu_batch_size_per_shard(self, model_hparams): """Batch size in examples per TPU core. Args: model_hparams: model hyperparameters Returns: an integer """ if self.batch_size_means_tokens and not model_hparams.use_fixed_batch_size: return model_hparams.batch_size // self.max_length(model_hparams) else: return model_hparams.batch_size
python
def tpu_batch_size_per_shard(self, model_hparams): """Batch size in examples per TPU core. Args: model_hparams: model hyperparameters Returns: an integer """ if self.batch_size_means_tokens and not model_hparams.use_fixed_batch_size: return model_hparams.batch_size // self.max_length(model_hparams) else: return model_hparams.batch_size
[ "def", "tpu_batch_size_per_shard", "(", "self", ",", "model_hparams", ")", ":", "if", "self", ".", "batch_size_means_tokens", "and", "not", "model_hparams", ".", "use_fixed_batch_size", ":", "return", "model_hparams", ".", "batch_size", "//", "self", ".", "max_length", "(", "model_hparams", ")", "else", ":", "return", "model_hparams", ".", "batch_size" ]
Batch size in examples per TPU core. Args: model_hparams: model hyperparameters Returns: an integer
[ "Batch", "size", "in", "examples", "per", "TPU", "core", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/problem.py#L272-L283
21,692
tensorflow/tensor2tensor
tensor2tensor/data_generators/problem.py
Problem.preprocess
def preprocess(self, dataset, mode, hparams, interleave=True): """Runtime preprocessing on the whole dataset. Return a tf.data.Datset -- the preprocessed version of the given one. By default this function calls preprocess_example. Args: dataset: the Dataset of already decoded but not yet preprocessed features. mode: tf.estimator.ModeKeys hparams: HParams, model hyperparameters interleave: bool, whether to use parallel_interleave, which is faster but will alter the order of samples non-deterministically, or flat_map, which is slower but will preserve the sample order. Returns: a Dataset """ def _preprocess(example): examples = self.preprocess_example(example, mode, hparams) if not isinstance(examples, tf.data.Dataset): examples = tf.data.Dataset.from_tensors(examples) return examples if interleave: dataset = dataset.apply( tf.data.experimental.parallel_interleave( _preprocess, sloppy=True, cycle_length=8)) else: dataset = dataset.flat_map(_preprocess) return dataset
python
def preprocess(self, dataset, mode, hparams, interleave=True): """Runtime preprocessing on the whole dataset. Return a tf.data.Datset -- the preprocessed version of the given one. By default this function calls preprocess_example. Args: dataset: the Dataset of already decoded but not yet preprocessed features. mode: tf.estimator.ModeKeys hparams: HParams, model hyperparameters interleave: bool, whether to use parallel_interleave, which is faster but will alter the order of samples non-deterministically, or flat_map, which is slower but will preserve the sample order. Returns: a Dataset """ def _preprocess(example): examples = self.preprocess_example(example, mode, hparams) if not isinstance(examples, tf.data.Dataset): examples = tf.data.Dataset.from_tensors(examples) return examples if interleave: dataset = dataset.apply( tf.data.experimental.parallel_interleave( _preprocess, sloppy=True, cycle_length=8)) else: dataset = dataset.flat_map(_preprocess) return dataset
[ "def", "preprocess", "(", "self", ",", "dataset", ",", "mode", ",", "hparams", ",", "interleave", "=", "True", ")", ":", "def", "_preprocess", "(", "example", ")", ":", "examples", "=", "self", ".", "preprocess_example", "(", "example", ",", "mode", ",", "hparams", ")", "if", "not", "isinstance", "(", "examples", ",", "tf", ".", "data", ".", "Dataset", ")", ":", "examples", "=", "tf", ".", "data", ".", "Dataset", ".", "from_tensors", "(", "examples", ")", "return", "examples", "if", "interleave", ":", "dataset", "=", "dataset", ".", "apply", "(", "tf", ".", "data", ".", "experimental", ".", "parallel_interleave", "(", "_preprocess", ",", "sloppy", "=", "True", ",", "cycle_length", "=", "8", ")", ")", "else", ":", "dataset", "=", "dataset", ".", "flat_map", "(", "_preprocess", ")", "return", "dataset" ]
Runtime preprocessing on the whole dataset. Return a tf.data.Datset -- the preprocessed version of the given one. By default this function calls preprocess_example. Args: dataset: the Dataset of already decoded but not yet preprocessed features. mode: tf.estimator.ModeKeys hparams: HParams, model hyperparameters interleave: bool, whether to use parallel_interleave, which is faster but will alter the order of samples non-deterministically, or flat_map, which is slower but will preserve the sample order. Returns: a Dataset
[ "Runtime", "preprocessing", "on", "the", "whole", "dataset", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/problem.py#L395-L425
21,693
tensorflow/tensor2tensor
tensor2tensor/data_generators/problem.py
Problem.filepattern
def filepattern(self, data_dir, mode, shard=None): """Get filepattern for data files for mode. Matches mode to a suffix. * DatasetSplit.TRAIN: train * DatasetSplit.EVAL: dev * DatasetSplit.TEST: test * tf.estimator.ModeKeys.PREDICT: dev Args: data_dir: str, data directory. mode: DatasetSplit shard: int, if provided, will only read data from the specified shard. Returns: filepattern str """ path = os.path.join(data_dir, self.dataset_filename()) shard_str = "-%05d" % shard if shard is not None else "" if mode == DatasetSplit.TRAIN: suffix = "train" elif mode in [DatasetSplit.EVAL, tf.estimator.ModeKeys.PREDICT]: suffix = "dev" else: assert mode == DatasetSplit.TEST suffix = "test" return "%s-%s%s*" % (path, suffix, shard_str)
python
def filepattern(self, data_dir, mode, shard=None): """Get filepattern for data files for mode. Matches mode to a suffix. * DatasetSplit.TRAIN: train * DatasetSplit.EVAL: dev * DatasetSplit.TEST: test * tf.estimator.ModeKeys.PREDICT: dev Args: data_dir: str, data directory. mode: DatasetSplit shard: int, if provided, will only read data from the specified shard. Returns: filepattern str """ path = os.path.join(data_dir, self.dataset_filename()) shard_str = "-%05d" % shard if shard is not None else "" if mode == DatasetSplit.TRAIN: suffix = "train" elif mode in [DatasetSplit.EVAL, tf.estimator.ModeKeys.PREDICT]: suffix = "dev" else: assert mode == DatasetSplit.TEST suffix = "test" return "%s-%s%s*" % (path, suffix, shard_str)
[ "def", "filepattern", "(", "self", ",", "data_dir", ",", "mode", ",", "shard", "=", "None", ")", ":", "path", "=", "os", ".", "path", ".", "join", "(", "data_dir", ",", "self", ".", "dataset_filename", "(", ")", ")", "shard_str", "=", "\"-%05d\"", "%", "shard", "if", "shard", "is", "not", "None", "else", "\"\"", "if", "mode", "==", "DatasetSplit", ".", "TRAIN", ":", "suffix", "=", "\"train\"", "elif", "mode", "in", "[", "DatasetSplit", ".", "EVAL", ",", "tf", ".", "estimator", ".", "ModeKeys", ".", "PREDICT", "]", ":", "suffix", "=", "\"dev\"", "else", ":", "assert", "mode", "==", "DatasetSplit", ".", "TEST", "suffix", "=", "\"test\"", "return", "\"%s-%s%s*\"", "%", "(", "path", ",", "suffix", ",", "shard_str", ")" ]
Get filepattern for data files for mode. Matches mode to a suffix. * DatasetSplit.TRAIN: train * DatasetSplit.EVAL: dev * DatasetSplit.TEST: test * tf.estimator.ModeKeys.PREDICT: dev Args: data_dir: str, data directory. mode: DatasetSplit shard: int, if provided, will only read data from the specified shard. Returns: filepattern str
[ "Get", "filepattern", "for", "data", "files", "for", "mode", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/problem.py#L458-L485
21,694
tensorflow/tensor2tensor
tensor2tensor/data_generators/problem.py
Problem.maybe_reverse_features
def maybe_reverse_features(self, feature_map): """Reverse features between inputs and targets if the problem is '_rev'.""" if not self._was_reversed: return inputs = feature_map.pop("inputs", None) targets = feature_map.pop("targets", None) inputs_seg = feature_map.pop("inputs_segmentation", None) targets_seg = feature_map.pop("targets_segmentation", None) inputs_pos = feature_map.pop("inputs_position", None) targets_pos = feature_map.pop("targets_position", None) if inputs is not None: feature_map["targets"] = inputs if targets is not None: feature_map["inputs"] = targets if inputs_seg is not None: feature_map["targets_segmentation"] = inputs_seg if targets_seg is not None: feature_map["inputs_segmentation"] = targets_seg if inputs_pos is not None: feature_map["targets_position"] = inputs_pos if targets_pos is not None: feature_map["inputs_position"] = targets_pos
python
def maybe_reverse_features(self, feature_map): """Reverse features between inputs and targets if the problem is '_rev'.""" if not self._was_reversed: return inputs = feature_map.pop("inputs", None) targets = feature_map.pop("targets", None) inputs_seg = feature_map.pop("inputs_segmentation", None) targets_seg = feature_map.pop("targets_segmentation", None) inputs_pos = feature_map.pop("inputs_position", None) targets_pos = feature_map.pop("targets_position", None) if inputs is not None: feature_map["targets"] = inputs if targets is not None: feature_map["inputs"] = targets if inputs_seg is not None: feature_map["targets_segmentation"] = inputs_seg if targets_seg is not None: feature_map["inputs_segmentation"] = targets_seg if inputs_pos is not None: feature_map["targets_position"] = inputs_pos if targets_pos is not None: feature_map["inputs_position"] = targets_pos
[ "def", "maybe_reverse_features", "(", "self", ",", "feature_map", ")", ":", "if", "not", "self", ".", "_was_reversed", ":", "return", "inputs", "=", "feature_map", ".", "pop", "(", "\"inputs\"", ",", "None", ")", "targets", "=", "feature_map", ".", "pop", "(", "\"targets\"", ",", "None", ")", "inputs_seg", "=", "feature_map", ".", "pop", "(", "\"inputs_segmentation\"", ",", "None", ")", "targets_seg", "=", "feature_map", ".", "pop", "(", "\"targets_segmentation\"", ",", "None", ")", "inputs_pos", "=", "feature_map", ".", "pop", "(", "\"inputs_position\"", ",", "None", ")", "targets_pos", "=", "feature_map", ".", "pop", "(", "\"targets_position\"", ",", "None", ")", "if", "inputs", "is", "not", "None", ":", "feature_map", "[", "\"targets\"", "]", "=", "inputs", "if", "targets", "is", "not", "None", ":", "feature_map", "[", "\"inputs\"", "]", "=", "targets", "if", "inputs_seg", "is", "not", "None", ":", "feature_map", "[", "\"targets_segmentation\"", "]", "=", "inputs_seg", "if", "targets_seg", "is", "not", "None", ":", "feature_map", "[", "\"inputs_segmentation\"", "]", "=", "targets_seg", "if", "inputs_pos", "is", "not", "None", ":", "feature_map", "[", "\"targets_position\"", "]", "=", "inputs_pos", "if", "targets_pos", "is", "not", "None", ":", "feature_map", "[", "\"inputs_position\"", "]", "=", "targets_pos" ]
Reverse features between inputs and targets if the problem is '_rev'.
[ "Reverse", "features", "between", "inputs", "and", "targets", "if", "the", "problem", "is", "_rev", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/problem.py#L544-L565
21,695
tensorflow/tensor2tensor
tensor2tensor/data_generators/problem.py
Problem.dataset
def dataset(self, mode, data_dir=None, num_threads=None, output_buffer_size=None, shuffle_files=None, hparams=None, preprocess=True, dataset_split=None, shard=None, partition_id=0, num_partitions=1, shuffle_buffer_size=1024, max_records=-1): """Build a Dataset for this problem. Args: mode: tf.estimator.ModeKeys; determines which files to read from. data_dir: directory that contains data files. num_threads: int, number of threads to use for decode and preprocess Dataset.map calls. output_buffer_size: int, how many elements to prefetch at end of pipeline. shuffle_files: whether to shuffle input files. Default behavior (i.e. when shuffle_files=None) is to shuffle if mode == TRAIN. hparams: HParams; hparams to be passed to Problem.preprocess_example and Problem.hparams. If None, will use a default set that is a no-op. preprocess: bool, whether to map the Dataset through Problem.preprocess_example. dataset_split: DatasetSplit, which split to read data from (TRAIN:"-train", EVAL:"-dev", "test":"-test"). Defaults to mode. shard: int, if provided, will only read data from the specified shard. partition_id: integer - which partition of the dataset to read from num_partitions: how many partitions in the dataset shuffle_buffer_size: if shuffle_files is True, this is the buffer size used to shuffle records. max_records: int, number of records to truncate to. Returns: Dataset containing dict<feature name, Tensor>. Raises: ValueError: if num_partitions is greater than the number of data files. """ is_training = mode == tf.estimator.ModeKeys.TRAIN shuffle_files = shuffle_files or shuffle_files is None and is_training dataset_split = dataset_split or mode assert data_dir if hparams is None: hparams = default_model_hparams() if not hasattr(hparams, "data_dir"): hparams.add_hparam("data_dir", data_dir) if not hparams.data_dir: hparams.data_dir = data_dir # Construct the Problem's hparams so that items within it are accessible _ = self.get_hparams(hparams) data_filepattern = self.filepattern(data_dir, dataset_split, shard=shard) tf.logging.info("Reading data files from %s", data_filepattern) data_files = sorted(tf.contrib.slim.parallel_reader.get_data_files( data_filepattern)) # Functions used in dataset transforms below. `filenames` can be either a # `tf.string` tensor or `tf.data.Dataset` containing one or more filenames. def _load_records_and_preprocess(filenames): """Reads files from a string tensor or a dataset of filenames.""" # Load records from file(s) with an 8MiB read buffer. dataset = tf.data.TFRecordDataset(filenames, buffer_size=8 * 1024 * 1024) # Decode. dataset = dataset.map(self.decode_example, num_parallel_calls=num_threads) # Preprocess if requested. # Note that preprocessing should happen per-file as order may matter. if preprocess: dataset = self.preprocess(dataset, mode, hparams, interleave=shuffle_files) return dataset if len(data_files) < num_partitions: raise ValueError( "number of data files (%d) must be at least the number of hosts (%d)" % (len(data_files), num_partitions)) data_files = [f for (i, f) in enumerate(data_files) if i % num_partitions == partition_id] tf.logging.info( "partition: %d num_data_files: %d" % (partition_id, len(data_files))) if shuffle_files: mlperf_log.transformer_print(key=mlperf_log.INPUT_ORDER) random.shuffle(data_files) dataset = tf.data.Dataset.from_tensor_slices(tf.constant(data_files)) # Create data-set from files by parsing, pre-processing and interleaving. if shuffle_files: dataset = dataset.apply( tf.data.experimental.parallel_interleave( _load_records_and_preprocess, sloppy=True, cycle_length=8)) else: dataset = _load_records_and_preprocess(dataset) dataset = dataset.map( self.maybe_reverse_and_copy, num_parallel_calls=num_threads) dataset = dataset.take(max_records) ## Shuffle records only for training examples. if shuffle_files and is_training: dataset = dataset.shuffle(shuffle_buffer_size) if hparams.get("pack_dataset", False): dataset = generator_utils.pack_dataset( dataset, hparams.max_length, keys=["inputs", "targets"], use_custom_ops=hparams.get("use_custom_ops", False)) if output_buffer_size: dataset = dataset.prefetch(output_buffer_size) return dataset
python
def dataset(self, mode, data_dir=None, num_threads=None, output_buffer_size=None, shuffle_files=None, hparams=None, preprocess=True, dataset_split=None, shard=None, partition_id=0, num_partitions=1, shuffle_buffer_size=1024, max_records=-1): """Build a Dataset for this problem. Args: mode: tf.estimator.ModeKeys; determines which files to read from. data_dir: directory that contains data files. num_threads: int, number of threads to use for decode and preprocess Dataset.map calls. output_buffer_size: int, how many elements to prefetch at end of pipeline. shuffle_files: whether to shuffle input files. Default behavior (i.e. when shuffle_files=None) is to shuffle if mode == TRAIN. hparams: HParams; hparams to be passed to Problem.preprocess_example and Problem.hparams. If None, will use a default set that is a no-op. preprocess: bool, whether to map the Dataset through Problem.preprocess_example. dataset_split: DatasetSplit, which split to read data from (TRAIN:"-train", EVAL:"-dev", "test":"-test"). Defaults to mode. shard: int, if provided, will only read data from the specified shard. partition_id: integer - which partition of the dataset to read from num_partitions: how many partitions in the dataset shuffle_buffer_size: if shuffle_files is True, this is the buffer size used to shuffle records. max_records: int, number of records to truncate to. Returns: Dataset containing dict<feature name, Tensor>. Raises: ValueError: if num_partitions is greater than the number of data files. """ is_training = mode == tf.estimator.ModeKeys.TRAIN shuffle_files = shuffle_files or shuffle_files is None and is_training dataset_split = dataset_split or mode assert data_dir if hparams is None: hparams = default_model_hparams() if not hasattr(hparams, "data_dir"): hparams.add_hparam("data_dir", data_dir) if not hparams.data_dir: hparams.data_dir = data_dir # Construct the Problem's hparams so that items within it are accessible _ = self.get_hparams(hparams) data_filepattern = self.filepattern(data_dir, dataset_split, shard=shard) tf.logging.info("Reading data files from %s", data_filepattern) data_files = sorted(tf.contrib.slim.parallel_reader.get_data_files( data_filepattern)) # Functions used in dataset transforms below. `filenames` can be either a # `tf.string` tensor or `tf.data.Dataset` containing one or more filenames. def _load_records_and_preprocess(filenames): """Reads files from a string tensor or a dataset of filenames.""" # Load records from file(s) with an 8MiB read buffer. dataset = tf.data.TFRecordDataset(filenames, buffer_size=8 * 1024 * 1024) # Decode. dataset = dataset.map(self.decode_example, num_parallel_calls=num_threads) # Preprocess if requested. # Note that preprocessing should happen per-file as order may matter. if preprocess: dataset = self.preprocess(dataset, mode, hparams, interleave=shuffle_files) return dataset if len(data_files) < num_partitions: raise ValueError( "number of data files (%d) must be at least the number of hosts (%d)" % (len(data_files), num_partitions)) data_files = [f for (i, f) in enumerate(data_files) if i % num_partitions == partition_id] tf.logging.info( "partition: %d num_data_files: %d" % (partition_id, len(data_files))) if shuffle_files: mlperf_log.transformer_print(key=mlperf_log.INPUT_ORDER) random.shuffle(data_files) dataset = tf.data.Dataset.from_tensor_slices(tf.constant(data_files)) # Create data-set from files by parsing, pre-processing and interleaving. if shuffle_files: dataset = dataset.apply( tf.data.experimental.parallel_interleave( _load_records_and_preprocess, sloppy=True, cycle_length=8)) else: dataset = _load_records_and_preprocess(dataset) dataset = dataset.map( self.maybe_reverse_and_copy, num_parallel_calls=num_threads) dataset = dataset.take(max_records) ## Shuffle records only for training examples. if shuffle_files and is_training: dataset = dataset.shuffle(shuffle_buffer_size) if hparams.get("pack_dataset", False): dataset = generator_utils.pack_dataset( dataset, hparams.max_length, keys=["inputs", "targets"], use_custom_ops=hparams.get("use_custom_ops", False)) if output_buffer_size: dataset = dataset.prefetch(output_buffer_size) return dataset
[ "def", "dataset", "(", "self", ",", "mode", ",", "data_dir", "=", "None", ",", "num_threads", "=", "None", ",", "output_buffer_size", "=", "None", ",", "shuffle_files", "=", "None", ",", "hparams", "=", "None", ",", "preprocess", "=", "True", ",", "dataset_split", "=", "None", ",", "shard", "=", "None", ",", "partition_id", "=", "0", ",", "num_partitions", "=", "1", ",", "shuffle_buffer_size", "=", "1024", ",", "max_records", "=", "-", "1", ")", ":", "is_training", "=", "mode", "==", "tf", ".", "estimator", ".", "ModeKeys", ".", "TRAIN", "shuffle_files", "=", "shuffle_files", "or", "shuffle_files", "is", "None", "and", "is_training", "dataset_split", "=", "dataset_split", "or", "mode", "assert", "data_dir", "if", "hparams", "is", "None", ":", "hparams", "=", "default_model_hparams", "(", ")", "if", "not", "hasattr", "(", "hparams", ",", "\"data_dir\"", ")", ":", "hparams", ".", "add_hparam", "(", "\"data_dir\"", ",", "data_dir", ")", "if", "not", "hparams", ".", "data_dir", ":", "hparams", ".", "data_dir", "=", "data_dir", "# Construct the Problem's hparams so that items within it are accessible", "_", "=", "self", ".", "get_hparams", "(", "hparams", ")", "data_filepattern", "=", "self", ".", "filepattern", "(", "data_dir", ",", "dataset_split", ",", "shard", "=", "shard", ")", "tf", ".", "logging", ".", "info", "(", "\"Reading data files from %s\"", ",", "data_filepattern", ")", "data_files", "=", "sorted", "(", "tf", ".", "contrib", ".", "slim", ".", "parallel_reader", ".", "get_data_files", "(", "data_filepattern", ")", ")", "# Functions used in dataset transforms below. `filenames` can be either a", "# `tf.string` tensor or `tf.data.Dataset` containing one or more filenames.", "def", "_load_records_and_preprocess", "(", "filenames", ")", ":", "\"\"\"Reads files from a string tensor or a dataset of filenames.\"\"\"", "# Load records from file(s) with an 8MiB read buffer.", "dataset", "=", "tf", ".", "data", ".", "TFRecordDataset", "(", "filenames", ",", "buffer_size", "=", "8", "*", "1024", "*", "1024", ")", "# Decode.", "dataset", "=", "dataset", ".", "map", "(", "self", ".", "decode_example", ",", "num_parallel_calls", "=", "num_threads", ")", "# Preprocess if requested.", "# Note that preprocessing should happen per-file as order may matter.", "if", "preprocess", ":", "dataset", "=", "self", ".", "preprocess", "(", "dataset", ",", "mode", ",", "hparams", ",", "interleave", "=", "shuffle_files", ")", "return", "dataset", "if", "len", "(", "data_files", ")", "<", "num_partitions", ":", "raise", "ValueError", "(", "\"number of data files (%d) must be at least the number of hosts (%d)\"", "%", "(", "len", "(", "data_files", ")", ",", "num_partitions", ")", ")", "data_files", "=", "[", "f", "for", "(", "i", ",", "f", ")", "in", "enumerate", "(", "data_files", ")", "if", "i", "%", "num_partitions", "==", "partition_id", "]", "tf", ".", "logging", ".", "info", "(", "\"partition: %d num_data_files: %d\"", "%", "(", "partition_id", ",", "len", "(", "data_files", ")", ")", ")", "if", "shuffle_files", ":", "mlperf_log", ".", "transformer_print", "(", "key", "=", "mlperf_log", ".", "INPUT_ORDER", ")", "random", ".", "shuffle", "(", "data_files", ")", "dataset", "=", "tf", ".", "data", ".", "Dataset", ".", "from_tensor_slices", "(", "tf", ".", "constant", "(", "data_files", ")", ")", "# Create data-set from files by parsing, pre-processing and interleaving.", "if", "shuffle_files", ":", "dataset", "=", "dataset", ".", "apply", "(", "tf", ".", "data", ".", "experimental", ".", "parallel_interleave", "(", "_load_records_and_preprocess", ",", "sloppy", "=", "True", ",", "cycle_length", "=", "8", ")", ")", "else", ":", "dataset", "=", "_load_records_and_preprocess", "(", "dataset", ")", "dataset", "=", "dataset", ".", "map", "(", "self", ".", "maybe_reverse_and_copy", ",", "num_parallel_calls", "=", "num_threads", ")", "dataset", "=", "dataset", ".", "take", "(", "max_records", ")", "## Shuffle records only for training examples.", "if", "shuffle_files", "and", "is_training", ":", "dataset", "=", "dataset", ".", "shuffle", "(", "shuffle_buffer_size", ")", "if", "hparams", ".", "get", "(", "\"pack_dataset\"", ",", "False", ")", ":", "dataset", "=", "generator_utils", ".", "pack_dataset", "(", "dataset", ",", "hparams", ".", "max_length", ",", "keys", "=", "[", "\"inputs\"", ",", "\"targets\"", "]", ",", "use_custom_ops", "=", "hparams", ".", "get", "(", "\"use_custom_ops\"", ",", "False", ")", ")", "if", "output_buffer_size", ":", "dataset", "=", "dataset", ".", "prefetch", "(", "output_buffer_size", ")", "return", "dataset" ]
Build a Dataset for this problem. Args: mode: tf.estimator.ModeKeys; determines which files to read from. data_dir: directory that contains data files. num_threads: int, number of threads to use for decode and preprocess Dataset.map calls. output_buffer_size: int, how many elements to prefetch at end of pipeline. shuffle_files: whether to shuffle input files. Default behavior (i.e. when shuffle_files=None) is to shuffle if mode == TRAIN. hparams: HParams; hparams to be passed to Problem.preprocess_example and Problem.hparams. If None, will use a default set that is a no-op. preprocess: bool, whether to map the Dataset through Problem.preprocess_example. dataset_split: DatasetSplit, which split to read data from (TRAIN:"-train", EVAL:"-dev", "test":"-test"). Defaults to mode. shard: int, if provided, will only read data from the specified shard. partition_id: integer - which partition of the dataset to read from num_partitions: how many partitions in the dataset shuffle_buffer_size: if shuffle_files is True, this is the buffer size used to shuffle records. max_records: int, number of records to truncate to. Returns: Dataset containing dict<feature name, Tensor>. Raises: ValueError: if num_partitions is greater than the number of data files.
[ "Build", "a", "Dataset", "for", "this", "problem", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/problem.py#L583-L698
21,696
tensorflow/tensor2tensor
tensor2tensor/data_generators/problem.py
Problem.decode_example
def decode_example(self, serialized_example): """Return a dict of Tensors from a serialized tensorflow.Example.""" data_fields, data_items_to_decoders = self.example_reading_spec() # Necessary to rejoin examples in the correct order with the Cloud ML Engine # batch prediction API. data_fields["batch_prediction_key"] = tf.FixedLenFeature([1], tf.int64, 0) if data_items_to_decoders is None: data_items_to_decoders = { field: tf.contrib.slim.tfexample_decoder.Tensor(field) for field in data_fields } decoder = tf.contrib.slim.tfexample_decoder.TFExampleDecoder( data_fields, data_items_to_decoders) decode_items = list(sorted(data_items_to_decoders)) decoded = decoder.decode(serialized_example, items=decode_items) return dict(zip(decode_items, decoded))
python
def decode_example(self, serialized_example): """Return a dict of Tensors from a serialized tensorflow.Example.""" data_fields, data_items_to_decoders = self.example_reading_spec() # Necessary to rejoin examples in the correct order with the Cloud ML Engine # batch prediction API. data_fields["batch_prediction_key"] = tf.FixedLenFeature([1], tf.int64, 0) if data_items_to_decoders is None: data_items_to_decoders = { field: tf.contrib.slim.tfexample_decoder.Tensor(field) for field in data_fields } decoder = tf.contrib.slim.tfexample_decoder.TFExampleDecoder( data_fields, data_items_to_decoders) decode_items = list(sorted(data_items_to_decoders)) decoded = decoder.decode(serialized_example, items=decode_items) return dict(zip(decode_items, decoded))
[ "def", "decode_example", "(", "self", ",", "serialized_example", ")", ":", "data_fields", ",", "data_items_to_decoders", "=", "self", ".", "example_reading_spec", "(", ")", "# Necessary to rejoin examples in the correct order with the Cloud ML Engine", "# batch prediction API.", "data_fields", "[", "\"batch_prediction_key\"", "]", "=", "tf", ".", "FixedLenFeature", "(", "[", "1", "]", ",", "tf", ".", "int64", ",", "0", ")", "if", "data_items_to_decoders", "is", "None", ":", "data_items_to_decoders", "=", "{", "field", ":", "tf", ".", "contrib", ".", "slim", ".", "tfexample_decoder", ".", "Tensor", "(", "field", ")", "for", "field", "in", "data_fields", "}", "decoder", "=", "tf", ".", "contrib", ".", "slim", ".", "tfexample_decoder", ".", "TFExampleDecoder", "(", "data_fields", ",", "data_items_to_decoders", ")", "decode_items", "=", "list", "(", "sorted", "(", "data_items_to_decoders", ")", ")", "decoded", "=", "decoder", ".", "decode", "(", "serialized_example", ",", "items", "=", "decode_items", ")", "return", "dict", "(", "zip", "(", "decode_items", ",", "decoded", ")", ")" ]
Return a dict of Tensors from a serialized tensorflow.Example.
[ "Return", "a", "dict", "of", "Tensors", "from", "a", "serialized", "tensorflow", ".", "Example", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/problem.py#L700-L717
21,697
tensorflow/tensor2tensor
tensor2tensor/data_generators/problem.py
Problem.make_estimator_input_fn
def make_estimator_input_fn(self, mode, hparams, data_dir=None, force_repeat=False, prevent_repeat=False, dataset_kwargs=None): """Return input_fn wrapped for Estimator.""" def estimator_input_fn(params, config): return self.input_fn( mode, hparams, data_dir=data_dir, params=params, config=config, force_repeat=force_repeat, prevent_repeat=prevent_repeat, dataset_kwargs=dataset_kwargs) return estimator_input_fn
python
def make_estimator_input_fn(self, mode, hparams, data_dir=None, force_repeat=False, prevent_repeat=False, dataset_kwargs=None): """Return input_fn wrapped for Estimator.""" def estimator_input_fn(params, config): return self.input_fn( mode, hparams, data_dir=data_dir, params=params, config=config, force_repeat=force_repeat, prevent_repeat=prevent_repeat, dataset_kwargs=dataset_kwargs) return estimator_input_fn
[ "def", "make_estimator_input_fn", "(", "self", ",", "mode", ",", "hparams", ",", "data_dir", "=", "None", ",", "force_repeat", "=", "False", ",", "prevent_repeat", "=", "False", ",", "dataset_kwargs", "=", "None", ")", ":", "def", "estimator_input_fn", "(", "params", ",", "config", ")", ":", "return", "self", ".", "input_fn", "(", "mode", ",", "hparams", ",", "data_dir", "=", "data_dir", ",", "params", "=", "params", ",", "config", "=", "config", ",", "force_repeat", "=", "force_repeat", ",", "prevent_repeat", "=", "prevent_repeat", ",", "dataset_kwargs", "=", "dataset_kwargs", ")", "return", "estimator_input_fn" ]
Return input_fn wrapped for Estimator.
[ "Return", "input_fn", "wrapped", "for", "Estimator", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/problem.py#L771-L791
21,698
tensorflow/tensor2tensor
tensor2tensor/data_generators/problem.py
Problem._dataset_partition
def _dataset_partition(self, mode, config, params): """Which part of the training data to read. If there are multiple parallel calls to input_fn (multiple TPU hosts), then we want each one to read from a separate partition of the training data. Args: mode: tf.estimator.ModeKeys config: RunConfig params: A dict that contains parameters. Returns: partition_id: an integer num_partitions: an integer """ if mode != tf.estimator.ModeKeys.TRAIN or not hasattr(config, "tpu_config"): # Reset in the case when using TPU but alternating TRAIN and EVAL. self._next_partition_id = 0 return 0, 1 phift = config.tpu_config.per_host_input_for_training # This is the mesh-tensorflow case. if (hasattr(tpu_config.InputPipelineConfig, "BROADCAST") and phift == tpu_config.InputPipelineConfig.BROADCAST): return 0, 1 if phift: num_hosts = (params["context"].num_hosts if "context" in params else config.tpu_config.num_shards // 8) num_partitions = max(num_hosts, 1) else: num_partitions = config.tpu_config.num_shards partition_id = getattr(self, "_next_partition_id", 0) self._next_partition_id = partition_id + 1 tf.logging.info("num_partitions = %d partition_id = %d" % (num_partitions, partition_id)) assert partition_id < num_partitions return partition_id, num_partitions
python
def _dataset_partition(self, mode, config, params): """Which part of the training data to read. If there are multiple parallel calls to input_fn (multiple TPU hosts), then we want each one to read from a separate partition of the training data. Args: mode: tf.estimator.ModeKeys config: RunConfig params: A dict that contains parameters. Returns: partition_id: an integer num_partitions: an integer """ if mode != tf.estimator.ModeKeys.TRAIN or not hasattr(config, "tpu_config"): # Reset in the case when using TPU but alternating TRAIN and EVAL. self._next_partition_id = 0 return 0, 1 phift = config.tpu_config.per_host_input_for_training # This is the mesh-tensorflow case. if (hasattr(tpu_config.InputPipelineConfig, "BROADCAST") and phift == tpu_config.InputPipelineConfig.BROADCAST): return 0, 1 if phift: num_hosts = (params["context"].num_hosts if "context" in params else config.tpu_config.num_shards // 8) num_partitions = max(num_hosts, 1) else: num_partitions = config.tpu_config.num_shards partition_id = getattr(self, "_next_partition_id", 0) self._next_partition_id = partition_id + 1 tf.logging.info("num_partitions = %d partition_id = %d" % (num_partitions, partition_id)) assert partition_id < num_partitions return partition_id, num_partitions
[ "def", "_dataset_partition", "(", "self", ",", "mode", ",", "config", ",", "params", ")", ":", "if", "mode", "!=", "tf", ".", "estimator", ".", "ModeKeys", ".", "TRAIN", "or", "not", "hasattr", "(", "config", ",", "\"tpu_config\"", ")", ":", "# Reset in the case when using TPU but alternating TRAIN and EVAL.", "self", ".", "_next_partition_id", "=", "0", "return", "0", ",", "1", "phift", "=", "config", ".", "tpu_config", ".", "per_host_input_for_training", "# This is the mesh-tensorflow case.", "if", "(", "hasattr", "(", "tpu_config", ".", "InputPipelineConfig", ",", "\"BROADCAST\"", ")", "and", "phift", "==", "tpu_config", ".", "InputPipelineConfig", ".", "BROADCAST", ")", ":", "return", "0", ",", "1", "if", "phift", ":", "num_hosts", "=", "(", "params", "[", "\"context\"", "]", ".", "num_hosts", "if", "\"context\"", "in", "params", "else", "config", ".", "tpu_config", ".", "num_shards", "//", "8", ")", "num_partitions", "=", "max", "(", "num_hosts", ",", "1", ")", "else", ":", "num_partitions", "=", "config", ".", "tpu_config", ".", "num_shards", "partition_id", "=", "getattr", "(", "self", ",", "\"_next_partition_id\"", ",", "0", ")", "self", ".", "_next_partition_id", "=", "partition_id", "+", "1", "tf", ".", "logging", ".", "info", "(", "\"num_partitions = %d partition_id = %d\"", "%", "(", "num_partitions", ",", "partition_id", ")", ")", "assert", "partition_id", "<", "num_partitions", "return", "partition_id", ",", "num_partitions" ]
Which part of the training data to read. If there are multiple parallel calls to input_fn (multiple TPU hosts), then we want each one to read from a separate partition of the training data. Args: mode: tf.estimator.ModeKeys config: RunConfig params: A dict that contains parameters. Returns: partition_id: an integer num_partitions: an integer
[ "Which", "part", "of", "the", "training", "data", "to", "read", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/problem.py#L793-L828
21,699
tensorflow/tensor2tensor
tensor2tensor/data_generators/problem.py
Problem.serving_input_fn
def serving_input_fn(self, hparams, decode_hparams=None, use_tpu=False): """Input fn for serving export, starting from serialized example.""" mode = tf.estimator.ModeKeys.PREDICT serialized_example = tf.placeholder( dtype=tf.string, shape=[None], name="serialized_example") dataset = tf.data.Dataset.from_tensor_slices(serialized_example) dataset = dataset.map(self.decode_example) dataset = dataset.map(lambda ex: self.preprocess_example(ex, mode, hparams)) dataset = dataset.map(data_reader.cast_ints_to_int32) if use_tpu: padded_shapes = data_reader.pad_for_tpu(dataset.output_shapes, hparams, hparams.max_length) batch_size = 1 if not decode_hparams else getattr(decode_hparams, "batch_size", 1) dataset = dataset.padded_batch( batch_size, padded_shapes, drop_remainder=False) dataset = dataset.map( functools.partial(data_reader.pad_batch, batch_multiple=batch_size)) else: dataset = dataset.padded_batch( tf.shape(serialized_example, out_type=tf.int64)[0], dataset.output_shapes) dataset = dataset.map(data_reader.standardize_shapes) features = tf.data.experimental.get_single_element(dataset) if self.has_inputs: features.pop("targets", None) return tf.estimator.export.ServingInputReceiver( features=features, receiver_tensors=serialized_example)
python
def serving_input_fn(self, hparams, decode_hparams=None, use_tpu=False): """Input fn for serving export, starting from serialized example.""" mode = tf.estimator.ModeKeys.PREDICT serialized_example = tf.placeholder( dtype=tf.string, shape=[None], name="serialized_example") dataset = tf.data.Dataset.from_tensor_slices(serialized_example) dataset = dataset.map(self.decode_example) dataset = dataset.map(lambda ex: self.preprocess_example(ex, mode, hparams)) dataset = dataset.map(data_reader.cast_ints_to_int32) if use_tpu: padded_shapes = data_reader.pad_for_tpu(dataset.output_shapes, hparams, hparams.max_length) batch_size = 1 if not decode_hparams else getattr(decode_hparams, "batch_size", 1) dataset = dataset.padded_batch( batch_size, padded_shapes, drop_remainder=False) dataset = dataset.map( functools.partial(data_reader.pad_batch, batch_multiple=batch_size)) else: dataset = dataset.padded_batch( tf.shape(serialized_example, out_type=tf.int64)[0], dataset.output_shapes) dataset = dataset.map(data_reader.standardize_shapes) features = tf.data.experimental.get_single_element(dataset) if self.has_inputs: features.pop("targets", None) return tf.estimator.export.ServingInputReceiver( features=features, receiver_tensors=serialized_example)
[ "def", "serving_input_fn", "(", "self", ",", "hparams", ",", "decode_hparams", "=", "None", ",", "use_tpu", "=", "False", ")", ":", "mode", "=", "tf", ".", "estimator", ".", "ModeKeys", ".", "PREDICT", "serialized_example", "=", "tf", ".", "placeholder", "(", "dtype", "=", "tf", ".", "string", ",", "shape", "=", "[", "None", "]", ",", "name", "=", "\"serialized_example\"", ")", "dataset", "=", "tf", ".", "data", ".", "Dataset", ".", "from_tensor_slices", "(", "serialized_example", ")", "dataset", "=", "dataset", ".", "map", "(", "self", ".", "decode_example", ")", "dataset", "=", "dataset", ".", "map", "(", "lambda", "ex", ":", "self", ".", "preprocess_example", "(", "ex", ",", "mode", ",", "hparams", ")", ")", "dataset", "=", "dataset", ".", "map", "(", "data_reader", ".", "cast_ints_to_int32", ")", "if", "use_tpu", ":", "padded_shapes", "=", "data_reader", ".", "pad_for_tpu", "(", "dataset", ".", "output_shapes", ",", "hparams", ",", "hparams", ".", "max_length", ")", "batch_size", "=", "1", "if", "not", "decode_hparams", "else", "getattr", "(", "decode_hparams", ",", "\"batch_size\"", ",", "1", ")", "dataset", "=", "dataset", ".", "padded_batch", "(", "batch_size", ",", "padded_shapes", ",", "drop_remainder", "=", "False", ")", "dataset", "=", "dataset", ".", "map", "(", "functools", ".", "partial", "(", "data_reader", ".", "pad_batch", ",", "batch_multiple", "=", "batch_size", ")", ")", "else", ":", "dataset", "=", "dataset", ".", "padded_batch", "(", "tf", ".", "shape", "(", "serialized_example", ",", "out_type", "=", "tf", ".", "int64", ")", "[", "0", "]", ",", "dataset", ".", "output_shapes", ")", "dataset", "=", "dataset", ".", "map", "(", "data_reader", ".", "standardize_shapes", ")", "features", "=", "tf", ".", "data", ".", "experimental", ".", "get_single_element", "(", "dataset", ")", "if", "self", ".", "has_inputs", ":", "features", ".", "pop", "(", "\"targets\"", ",", "None", ")", "return", "tf", ".", "estimator", ".", "export", ".", "ServingInputReceiver", "(", "features", "=", "features", ",", "receiver_tensors", "=", "serialized_example", ")" ]
Input fn for serving export, starting from serialized example.
[ "Input", "fn", "for", "serving", "export", "starting", "from", "serialized", "example", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/problem.py#L899-L930