id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
28,700
apple/turicreate
src/unity/python/turicreate/toolkits/_feature_engineering/_transformer_chain.py
TransformerChain.fit_transform
def fit_transform(self, data): """ First fit a transformer using the SFrame `data` and then return a transformed version of `data`. Parameters ---------- data : SFrame The data used to fit the transformer. The same data is then also transformed. Returns ------- Transformed SFrame. See Also -------- transform, fit_transform Notes ----- - The default implementation calls fit() and then calls transform(). You may override this function with a more efficient implementation." Examples -------- .. sourcecode:: python >> transformed_sf = chain.fit_transform(sf) """ if not self._transformers: return self._preprocess(data) transformed_data = self._preprocess(data) final_step = self._transformers[-1] return final_step[1].fit_transform(transformed_data)
python
def fit_transform(self, data): """ First fit a transformer using the SFrame `data` and then return a transformed version of `data`. Parameters ---------- data : SFrame The data used to fit the transformer. The same data is then also transformed. Returns ------- Transformed SFrame. See Also -------- transform, fit_transform Notes ----- - The default implementation calls fit() and then calls transform(). You may override this function with a more efficient implementation." Examples -------- .. sourcecode:: python >> transformed_sf = chain.fit_transform(sf) """ if not self._transformers: return self._preprocess(data) transformed_data = self._preprocess(data) final_step = self._transformers[-1] return final_step[1].fit_transform(transformed_data)
[ "def", "fit_transform", "(", "self", ",", "data", ")", ":", "if", "not", "self", ".", "_transformers", ":", "return", "self", ".", "_preprocess", "(", "data", ")", "transformed_data", "=", "self", ".", "_preprocess", "(", "data", ")", "final_step", "=", "self", ".", "_transformers", "[", "-", "1", "]", "return", "final_step", "[", "1", "]", ".", "fit_transform", "(", "transformed_data", ")" ]
First fit a transformer using the SFrame `data` and then return a transformed version of `data`. Parameters ---------- data : SFrame The data used to fit the transformer. The same data is then also transformed. Returns ------- Transformed SFrame. See Also -------- transform, fit_transform Notes ----- - The default implementation calls fit() and then calls transform(). You may override this function with a more efficient implementation." Examples -------- .. sourcecode:: python >> transformed_sf = chain.fit_transform(sf)
[ "First", "fit", "a", "transformer", "using", "the", "SFrame", "data", "and", "then", "return", "a", "transformed", "version", "of", "data", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_feature_engineering/_transformer_chain.py#L235-L271
28,701
apple/turicreate
src/unity/python/turicreate/toolkits/_feature_engineering/_transformer_chain.py
TransformerChain._load_version
def _load_version(cls, unpickler, version): """ An function to load an object with a specific version of the class. Parameters ---------- pickler : file A GLUnpickler file handle. version : int A version number as maintained by the class writer. """ obj = unpickler.load() return TransformerChain(obj._state["steps"])
python
def _load_version(cls, unpickler, version): """ An function to load an object with a specific version of the class. Parameters ---------- pickler : file A GLUnpickler file handle. version : int A version number as maintained by the class writer. """ obj = unpickler.load() return TransformerChain(obj._state["steps"])
[ "def", "_load_version", "(", "cls", ",", "unpickler", ",", "version", ")", ":", "obj", "=", "unpickler", ".", "load", "(", ")", "return", "TransformerChain", "(", "obj", ".", "_state", "[", "\"steps\"", "]", ")" ]
An function to load an object with a specific version of the class. Parameters ---------- pickler : file A GLUnpickler file handle. version : int A version number as maintained by the class writer.
[ "An", "function", "to", "load", "an", "object", "with", "a", "specific", "version", "of", "the", "class", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_feature_engineering/_transformer_chain.py#L348-L361
28,702
apple/turicreate
src/unity/python/turicreate/toolkits/graph_analytics/pagerank.py
create
def create(graph, reset_probability=0.15, threshold=1e-2, max_iterations=20, _single_precision=False, _distributed='auto', verbose=True): """ Compute the PageRank for each vertex in the graph. Return a model object with total PageRank as well as the PageRank value for each vertex in the graph. Parameters ---------- graph : SGraph The graph on which to compute the pagerank value. reset_probability : float, optional Probability that a random surfer jumps to an arbitrary page. threshold : float, optional Threshold for convergence, measured in the L1 norm (the sum of absolute value) of the delta of each vertex's pagerank value. max_iterations : int, optional The maximum number of iterations to run. _single_precision : bool, optional If true, running pagerank in single precision. The resulting pagerank values may not be accurate for large graph, but should run faster and use less memory. _distributed : distributed environment, internal verbose : bool, optional If True, print progress updates. Returns ------- out : PagerankModel References ---------- - `Wikipedia - PageRank <http://en.wikipedia.org/wiki/PageRank>`_ - Page, L., et al. (1998) `The PageRank Citation Ranking: Bringing Order to the Web <http://ilpubs.stanford.edu:8090/422/1/1999-66.pdf>`_. Examples -------- If given an :class:`~turicreate.SGraph` ``g``, we can create a :class:`~turicreate.pagerank.PageRankModel` as follows: >>> g = turicreate.load_sgraph('http://snap.stanford.edu/data/email-Enron.txt.gz', format='snap') >>> pr = turicreate.pagerank.create(g) We can obtain the page rank corresponding to each vertex in the graph ``g`` using: >>> pr_out = pr['pagerank'] # SFrame We can add the new pagerank field to the original graph g using: >>> g.vertices['pagerank'] = pr['graph'].vertices['pagerank'] Note that the task above does not require a join because the vertex ordering is preserved through ``create()``. See Also -------- PagerankModel """ from turicreate._cython.cy_server import QuietProgress if not isinstance(graph, _SGraph): raise TypeError('graph input must be a SGraph object.') opts = {'threshold': threshold, 'reset_probability': reset_probability, 'max_iterations': max_iterations, 'single_precision': _single_precision, 'graph': graph.__proxy__} with QuietProgress(verbose): params = _tc.extensions._toolkits.graph.pagerank.create(opts) model = params['model'] return PagerankModel(model)
python
def create(graph, reset_probability=0.15, threshold=1e-2, max_iterations=20, _single_precision=False, _distributed='auto', verbose=True): """ Compute the PageRank for each vertex in the graph. Return a model object with total PageRank as well as the PageRank value for each vertex in the graph. Parameters ---------- graph : SGraph The graph on which to compute the pagerank value. reset_probability : float, optional Probability that a random surfer jumps to an arbitrary page. threshold : float, optional Threshold for convergence, measured in the L1 norm (the sum of absolute value) of the delta of each vertex's pagerank value. max_iterations : int, optional The maximum number of iterations to run. _single_precision : bool, optional If true, running pagerank in single precision. The resulting pagerank values may not be accurate for large graph, but should run faster and use less memory. _distributed : distributed environment, internal verbose : bool, optional If True, print progress updates. Returns ------- out : PagerankModel References ---------- - `Wikipedia - PageRank <http://en.wikipedia.org/wiki/PageRank>`_ - Page, L., et al. (1998) `The PageRank Citation Ranking: Bringing Order to the Web <http://ilpubs.stanford.edu:8090/422/1/1999-66.pdf>`_. Examples -------- If given an :class:`~turicreate.SGraph` ``g``, we can create a :class:`~turicreate.pagerank.PageRankModel` as follows: >>> g = turicreate.load_sgraph('http://snap.stanford.edu/data/email-Enron.txt.gz', format='snap') >>> pr = turicreate.pagerank.create(g) We can obtain the page rank corresponding to each vertex in the graph ``g`` using: >>> pr_out = pr['pagerank'] # SFrame We can add the new pagerank field to the original graph g using: >>> g.vertices['pagerank'] = pr['graph'].vertices['pagerank'] Note that the task above does not require a join because the vertex ordering is preserved through ``create()``. See Also -------- PagerankModel """ from turicreate._cython.cy_server import QuietProgress if not isinstance(graph, _SGraph): raise TypeError('graph input must be a SGraph object.') opts = {'threshold': threshold, 'reset_probability': reset_probability, 'max_iterations': max_iterations, 'single_precision': _single_precision, 'graph': graph.__proxy__} with QuietProgress(verbose): params = _tc.extensions._toolkits.graph.pagerank.create(opts) model = params['model'] return PagerankModel(model)
[ "def", "create", "(", "graph", ",", "reset_probability", "=", "0.15", ",", "threshold", "=", "1e-2", ",", "max_iterations", "=", "20", ",", "_single_precision", "=", "False", ",", "_distributed", "=", "'auto'", ",", "verbose", "=", "True", ")", ":", "from", "turicreate", ".", "_cython", ".", "cy_server", "import", "QuietProgress", "if", "not", "isinstance", "(", "graph", ",", "_SGraph", ")", ":", "raise", "TypeError", "(", "'graph input must be a SGraph object.'", ")", "opts", "=", "{", "'threshold'", ":", "threshold", ",", "'reset_probability'", ":", "reset_probability", ",", "'max_iterations'", ":", "max_iterations", ",", "'single_precision'", ":", "_single_precision", ",", "'graph'", ":", "graph", ".", "__proxy__", "}", "with", "QuietProgress", "(", "verbose", ")", ":", "params", "=", "_tc", ".", "extensions", ".", "_toolkits", ".", "graph", ".", "pagerank", ".", "create", "(", "opts", ")", "model", "=", "params", "[", "'model'", "]", "return", "PagerankModel", "(", "model", ")" ]
Compute the PageRank for each vertex in the graph. Return a model object with total PageRank as well as the PageRank value for each vertex in the graph. Parameters ---------- graph : SGraph The graph on which to compute the pagerank value. reset_probability : float, optional Probability that a random surfer jumps to an arbitrary page. threshold : float, optional Threshold for convergence, measured in the L1 norm (the sum of absolute value) of the delta of each vertex's pagerank value. max_iterations : int, optional The maximum number of iterations to run. _single_precision : bool, optional If true, running pagerank in single precision. The resulting pagerank values may not be accurate for large graph, but should run faster and use less memory. _distributed : distributed environment, internal verbose : bool, optional If True, print progress updates. Returns ------- out : PagerankModel References ---------- - `Wikipedia - PageRank <http://en.wikipedia.org/wiki/PageRank>`_ - Page, L., et al. (1998) `The PageRank Citation Ranking: Bringing Order to the Web <http://ilpubs.stanford.edu:8090/422/1/1999-66.pdf>`_. Examples -------- If given an :class:`~turicreate.SGraph` ``g``, we can create a :class:`~turicreate.pagerank.PageRankModel` as follows: >>> g = turicreate.load_sgraph('http://snap.stanford.edu/data/email-Enron.txt.gz', format='snap') >>> pr = turicreate.pagerank.create(g) We can obtain the page rank corresponding to each vertex in the graph ``g`` using: >>> pr_out = pr['pagerank'] # SFrame We can add the new pagerank field to the original graph g using: >>> g.vertices['pagerank'] = pr['graph'].vertices['pagerank'] Note that the task above does not require a join because the vertex ordering is preserved through ``create()``. See Also -------- PagerankModel
[ "Compute", "the", "PageRank", "for", "each", "vertex", "in", "the", "graph", ".", "Return", "a", "model", "object", "with", "total", "PageRank", "as", "well", "as", "the", "PageRank", "value", "for", "each", "vertex", "in", "the", "graph", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/graph_analytics/pagerank.py#L105-L191
28,703
apple/turicreate
deps/src/boost_1_68_0/tools/build/src/tools/gcc.py
init_link_flags
def init_link_flags(toolset, linker, condition): """ Now, the vendor specific flags. The parameter linker can be either gnu, darwin, osf, hpux or sun. """ toolset_link = toolset + '.link' if linker == 'gnu': # Strip the binary when no debugging is needed. We use --strip-all flag # as opposed to -s since icc (intel's compiler) is generally # option-compatible with and inherits from the gcc toolset, but does not # support -s. # FIXME: what does unchecked translate to? flags(toolset_link, 'OPTIONS', map(lambda x: x + '/<debug-symbols>off', condition), ['-Wl,--strip-all']) # : unchecked ; flags(toolset_link, 'RPATH', condition, ['<dll-path>']) # : unchecked ; flags(toolset_link, 'RPATH_LINK', condition, ['<xdll-path>']) # : unchecked ; flags(toolset_link, 'START-GROUP', condition, ['-Wl,--start-group'])# : unchecked ; flags(toolset_link, 'END-GROUP', condition, ['-Wl,--end-group']) # : unchecked ; # gnu ld has the ability to change the search behaviour for libraries # referenced by -l switch. These modifiers are -Bstatic and -Bdynamic # and change search for -l switches that follow them. The following list # shows the tried variants. # The search stops at the first variant that has a match. # *nix: -Bstatic -lxxx # libxxx.a # # *nix: -Bdynamic -lxxx # libxxx.so # libxxx.a # # windows (mingw,cygwin) -Bstatic -lxxx # libxxx.a # xxx.lib # # windows (mingw,cygwin) -Bdynamic -lxxx # libxxx.dll.a # xxx.dll.a # libxxx.a # xxx.lib # cygxxx.dll (*) # libxxx.dll # xxx.dll # libxxx.a # # (*) This is for cygwin # Please note that -Bstatic and -Bdynamic are not a guarantee that a # static or dynamic lib indeed gets linked in. The switches only change # search patterns! # On *nix mixing shared libs with static runtime is not a good idea. flags(toolset_link, 'FINDLIBS-ST-PFX', map(lambda x: x + '/<runtime-link>shared', condition), ['-Wl,-Bstatic']) # : unchecked ; flags(toolset_link, 'FINDLIBS-SA-PFX', map(lambda x: x + '/<runtime-link>shared', condition), ['-Wl,-Bdynamic']) # : unchecked ; # On windows allow mixing of static and dynamic libs with static # runtime. flags(toolset_link, 'FINDLIBS-ST-PFX', map(lambda x: x + '/<runtime-link>static/<target-os>windows', condition), ['-Wl,-Bstatic']) # : unchecked ; flags(toolset_link, 'FINDLIBS-SA-PFX', map(lambda x: x + '/<runtime-link>static/<target-os>windows', condition), ['-Wl,-Bdynamic']) # : unchecked ; flags(toolset_link, 'OPTIONS', map(lambda x: x + '/<runtime-link>static/<target-os>windows', condition), ['-Wl,-Bstatic']) # : unchecked ; elif linker == 'darwin': # On Darwin, the -s option to ld does not work unless we pass -static, # and passing -static unconditionally is a bad idea. So, don't pass -s. # at all, darwin.jam will use separate 'strip' invocation. flags(toolset_link, 'RPATH', condition, ['<dll-path>']) # : unchecked ; flags(toolset_link, 'RPATH_LINK', condition, ['<xdll-path>']) # : unchecked ; elif linker == 'osf': # No --strip-all, just -s. flags(toolset_link, 'OPTIONS', map(lambda x: x + '/<debug-symbols>off', condition), ['-Wl,-s']) # : unchecked ; flags(toolset_link, 'RPATH', condition, ['<dll-path>']) # : unchecked ; # This does not supports -R. flags(toolset_link, 'RPATH_OPTION', condition, ['-rpath']) # : unchecked ; # -rpath-link is not supported at all. elif linker == 'sun': flags(toolset_link, 'OPTIONS', map(lambda x: x + '/<debug-symbols>off', condition), ['-Wl,-s']) # : unchecked ; flags(toolset_link, 'RPATH', condition, ['<dll-path>']) # : unchecked ; # Solaris linker does not have a separate -rpath-link, but allows to use # -L for the same purpose. flags(toolset_link, 'LINKPATH', condition, ['<xdll-path>']) # : unchecked ; # This permits shared libraries with non-PIC code on Solaris. # VP, 2004/09/07: Now that we have -fPIC hardcode in link.dll, the # following is not needed. Whether -fPIC should be hardcoded, is a # separate question. # AH, 2004/10/16: it is still necessary because some tests link against # static libraries that were compiled without PIC. flags(toolset_link, 'OPTIONS', map(lambda x: x + '/<link>shared', condition), ['-mimpure-text']) # : unchecked ; elif linker == 'hpux': flags(toolset_link, 'OPTIONS', map(lambda x: x + '/<debug-symbols>off', condition), ['-Wl,-s']) # : unchecked ; flags(toolset_link, 'OPTIONS', map(lambda x: x + '/<link>shared', condition), ['-fPIC']) # : unchecked ; else: # FIXME: errors.user_error( "$(toolset) initialization: invalid linker '$(linker)' " + "The value '$(linker)' specified for <linker> is not recognized. " + "Possible values are 'gnu', 'darwin', 'osf', 'hpux' or 'sun'")
python
def init_link_flags(toolset, linker, condition): """ Now, the vendor specific flags. The parameter linker can be either gnu, darwin, osf, hpux or sun. """ toolset_link = toolset + '.link' if linker == 'gnu': # Strip the binary when no debugging is needed. We use --strip-all flag # as opposed to -s since icc (intel's compiler) is generally # option-compatible with and inherits from the gcc toolset, but does not # support -s. # FIXME: what does unchecked translate to? flags(toolset_link, 'OPTIONS', map(lambda x: x + '/<debug-symbols>off', condition), ['-Wl,--strip-all']) # : unchecked ; flags(toolset_link, 'RPATH', condition, ['<dll-path>']) # : unchecked ; flags(toolset_link, 'RPATH_LINK', condition, ['<xdll-path>']) # : unchecked ; flags(toolset_link, 'START-GROUP', condition, ['-Wl,--start-group'])# : unchecked ; flags(toolset_link, 'END-GROUP', condition, ['-Wl,--end-group']) # : unchecked ; # gnu ld has the ability to change the search behaviour for libraries # referenced by -l switch. These modifiers are -Bstatic and -Bdynamic # and change search for -l switches that follow them. The following list # shows the tried variants. # The search stops at the first variant that has a match. # *nix: -Bstatic -lxxx # libxxx.a # # *nix: -Bdynamic -lxxx # libxxx.so # libxxx.a # # windows (mingw,cygwin) -Bstatic -lxxx # libxxx.a # xxx.lib # # windows (mingw,cygwin) -Bdynamic -lxxx # libxxx.dll.a # xxx.dll.a # libxxx.a # xxx.lib # cygxxx.dll (*) # libxxx.dll # xxx.dll # libxxx.a # # (*) This is for cygwin # Please note that -Bstatic and -Bdynamic are not a guarantee that a # static or dynamic lib indeed gets linked in. The switches only change # search patterns! # On *nix mixing shared libs with static runtime is not a good idea. flags(toolset_link, 'FINDLIBS-ST-PFX', map(lambda x: x + '/<runtime-link>shared', condition), ['-Wl,-Bstatic']) # : unchecked ; flags(toolset_link, 'FINDLIBS-SA-PFX', map(lambda x: x + '/<runtime-link>shared', condition), ['-Wl,-Bdynamic']) # : unchecked ; # On windows allow mixing of static and dynamic libs with static # runtime. flags(toolset_link, 'FINDLIBS-ST-PFX', map(lambda x: x + '/<runtime-link>static/<target-os>windows', condition), ['-Wl,-Bstatic']) # : unchecked ; flags(toolset_link, 'FINDLIBS-SA-PFX', map(lambda x: x + '/<runtime-link>static/<target-os>windows', condition), ['-Wl,-Bdynamic']) # : unchecked ; flags(toolset_link, 'OPTIONS', map(lambda x: x + '/<runtime-link>static/<target-os>windows', condition), ['-Wl,-Bstatic']) # : unchecked ; elif linker == 'darwin': # On Darwin, the -s option to ld does not work unless we pass -static, # and passing -static unconditionally is a bad idea. So, don't pass -s. # at all, darwin.jam will use separate 'strip' invocation. flags(toolset_link, 'RPATH', condition, ['<dll-path>']) # : unchecked ; flags(toolset_link, 'RPATH_LINK', condition, ['<xdll-path>']) # : unchecked ; elif linker == 'osf': # No --strip-all, just -s. flags(toolset_link, 'OPTIONS', map(lambda x: x + '/<debug-symbols>off', condition), ['-Wl,-s']) # : unchecked ; flags(toolset_link, 'RPATH', condition, ['<dll-path>']) # : unchecked ; # This does not supports -R. flags(toolset_link, 'RPATH_OPTION', condition, ['-rpath']) # : unchecked ; # -rpath-link is not supported at all. elif linker == 'sun': flags(toolset_link, 'OPTIONS', map(lambda x: x + '/<debug-symbols>off', condition), ['-Wl,-s']) # : unchecked ; flags(toolset_link, 'RPATH', condition, ['<dll-path>']) # : unchecked ; # Solaris linker does not have a separate -rpath-link, but allows to use # -L for the same purpose. flags(toolset_link, 'LINKPATH', condition, ['<xdll-path>']) # : unchecked ; # This permits shared libraries with non-PIC code on Solaris. # VP, 2004/09/07: Now that we have -fPIC hardcode in link.dll, the # following is not needed. Whether -fPIC should be hardcoded, is a # separate question. # AH, 2004/10/16: it is still necessary because some tests link against # static libraries that were compiled without PIC. flags(toolset_link, 'OPTIONS', map(lambda x: x + '/<link>shared', condition), ['-mimpure-text']) # : unchecked ; elif linker == 'hpux': flags(toolset_link, 'OPTIONS', map(lambda x: x + '/<debug-symbols>off', condition), ['-Wl,-s']) # : unchecked ; flags(toolset_link, 'OPTIONS', map(lambda x: x + '/<link>shared', condition), ['-fPIC']) # : unchecked ; else: # FIXME: errors.user_error( "$(toolset) initialization: invalid linker '$(linker)' " + "The value '$(linker)' specified for <linker> is not recognized. " + "Possible values are 'gnu', 'darwin', 'osf', 'hpux' or 'sun'")
[ "def", "init_link_flags", "(", "toolset", ",", "linker", ",", "condition", ")", ":", "toolset_link", "=", "toolset", "+", "'.link'", "if", "linker", "==", "'gnu'", ":", "# Strip the binary when no debugging is needed. We use --strip-all flag", "# as opposed to -s since icc (intel's compiler) is generally", "# option-compatible with and inherits from the gcc toolset, but does not", "# support -s.", "# FIXME: what does unchecked translate to?", "flags", "(", "toolset_link", ",", "'OPTIONS'", ",", "map", "(", "lambda", "x", ":", "x", "+", "'/<debug-symbols>off'", ",", "condition", ")", ",", "[", "'-Wl,--strip-all'", "]", ")", "# : unchecked ;", "flags", "(", "toolset_link", ",", "'RPATH'", ",", "condition", ",", "[", "'<dll-path>'", "]", ")", "# : unchecked ;", "flags", "(", "toolset_link", ",", "'RPATH_LINK'", ",", "condition", ",", "[", "'<xdll-path>'", "]", ")", "# : unchecked ;", "flags", "(", "toolset_link", ",", "'START-GROUP'", ",", "condition", ",", "[", "'-Wl,--start-group'", "]", ")", "# : unchecked ;", "flags", "(", "toolset_link", ",", "'END-GROUP'", ",", "condition", ",", "[", "'-Wl,--end-group'", "]", ")", "# : unchecked ;", "# gnu ld has the ability to change the search behaviour for libraries", "# referenced by -l switch. These modifiers are -Bstatic and -Bdynamic", "# and change search for -l switches that follow them. The following list", "# shows the tried variants.", "# The search stops at the first variant that has a match.", "# *nix: -Bstatic -lxxx", "# libxxx.a", "#", "# *nix: -Bdynamic -lxxx", "# libxxx.so", "# libxxx.a", "#", "# windows (mingw,cygwin) -Bstatic -lxxx", "# libxxx.a", "# xxx.lib", "#", "# windows (mingw,cygwin) -Bdynamic -lxxx", "# libxxx.dll.a", "# xxx.dll.a", "# libxxx.a", "# xxx.lib", "# cygxxx.dll (*)", "# libxxx.dll", "# xxx.dll", "# libxxx.a", "#", "# (*) This is for cygwin", "# Please note that -Bstatic and -Bdynamic are not a guarantee that a", "# static or dynamic lib indeed gets linked in. The switches only change", "# search patterns!", "# On *nix mixing shared libs with static runtime is not a good idea.", "flags", "(", "toolset_link", ",", "'FINDLIBS-ST-PFX'", ",", "map", "(", "lambda", "x", ":", "x", "+", "'/<runtime-link>shared'", ",", "condition", ")", ",", "[", "'-Wl,-Bstatic'", "]", ")", "# : unchecked ;", "flags", "(", "toolset_link", ",", "'FINDLIBS-SA-PFX'", ",", "map", "(", "lambda", "x", ":", "x", "+", "'/<runtime-link>shared'", ",", "condition", ")", ",", "[", "'-Wl,-Bdynamic'", "]", ")", "# : unchecked ;", "# On windows allow mixing of static and dynamic libs with static", "# runtime.", "flags", "(", "toolset_link", ",", "'FINDLIBS-ST-PFX'", ",", "map", "(", "lambda", "x", ":", "x", "+", "'/<runtime-link>static/<target-os>windows'", ",", "condition", ")", ",", "[", "'-Wl,-Bstatic'", "]", ")", "# : unchecked ;", "flags", "(", "toolset_link", ",", "'FINDLIBS-SA-PFX'", ",", "map", "(", "lambda", "x", ":", "x", "+", "'/<runtime-link>static/<target-os>windows'", ",", "condition", ")", ",", "[", "'-Wl,-Bdynamic'", "]", ")", "# : unchecked ;", "flags", "(", "toolset_link", ",", "'OPTIONS'", ",", "map", "(", "lambda", "x", ":", "x", "+", "'/<runtime-link>static/<target-os>windows'", ",", "condition", ")", ",", "[", "'-Wl,-Bstatic'", "]", ")", "# : unchecked ;", "elif", "linker", "==", "'darwin'", ":", "# On Darwin, the -s option to ld does not work unless we pass -static,", "# and passing -static unconditionally is a bad idea. So, don't pass -s.", "# at all, darwin.jam will use separate 'strip' invocation.", "flags", "(", "toolset_link", ",", "'RPATH'", ",", "condition", ",", "[", "'<dll-path>'", "]", ")", "# : unchecked ;", "flags", "(", "toolset_link", ",", "'RPATH_LINK'", ",", "condition", ",", "[", "'<xdll-path>'", "]", ")", "# : unchecked ;", "elif", "linker", "==", "'osf'", ":", "# No --strip-all, just -s.", "flags", "(", "toolset_link", ",", "'OPTIONS'", ",", "map", "(", "lambda", "x", ":", "x", "+", "'/<debug-symbols>off'", ",", "condition", ")", ",", "[", "'-Wl,-s'", "]", ")", "# : unchecked ;", "flags", "(", "toolset_link", ",", "'RPATH'", ",", "condition", ",", "[", "'<dll-path>'", "]", ")", "# : unchecked ;", "# This does not supports -R.", "flags", "(", "toolset_link", ",", "'RPATH_OPTION'", ",", "condition", ",", "[", "'-rpath'", "]", ")", "# : unchecked ;", "# -rpath-link is not supported at all.", "elif", "linker", "==", "'sun'", ":", "flags", "(", "toolset_link", ",", "'OPTIONS'", ",", "map", "(", "lambda", "x", ":", "x", "+", "'/<debug-symbols>off'", ",", "condition", ")", ",", "[", "'-Wl,-s'", "]", ")", "# : unchecked ;", "flags", "(", "toolset_link", ",", "'RPATH'", ",", "condition", ",", "[", "'<dll-path>'", "]", ")", "# : unchecked ;", "# Solaris linker does not have a separate -rpath-link, but allows to use", "# -L for the same purpose.", "flags", "(", "toolset_link", ",", "'LINKPATH'", ",", "condition", ",", "[", "'<xdll-path>'", "]", ")", "# : unchecked ;", "# This permits shared libraries with non-PIC code on Solaris.", "# VP, 2004/09/07: Now that we have -fPIC hardcode in link.dll, the", "# following is not needed. Whether -fPIC should be hardcoded, is a", "# separate question.", "# AH, 2004/10/16: it is still necessary because some tests link against", "# static libraries that were compiled without PIC.", "flags", "(", "toolset_link", ",", "'OPTIONS'", ",", "map", "(", "lambda", "x", ":", "x", "+", "'/<link>shared'", ",", "condition", ")", ",", "[", "'-mimpure-text'", "]", ")", "# : unchecked ;", "elif", "linker", "==", "'hpux'", ":", "flags", "(", "toolset_link", ",", "'OPTIONS'", ",", "map", "(", "lambda", "x", ":", "x", "+", "'/<debug-symbols>off'", ",", "condition", ")", ",", "[", "'-Wl,-s'", "]", ")", "# : unchecked ;", "flags", "(", "toolset_link", ",", "'OPTIONS'", ",", "map", "(", "lambda", "x", ":", "x", "+", "'/<link>shared'", ",", "condition", ")", ",", "[", "'-fPIC'", "]", ")", "# : unchecked ;", "else", ":", "# FIXME:", "errors", ".", "user_error", "(", "\"$(toolset) initialization: invalid linker '$(linker)' \"", "+", "\"The value '$(linker)' specified for <linker> is not recognized. \"", "+", "\"Possible values are 'gnu', 'darwin', 'osf', 'hpux' or 'sun'\"", ")" ]
Now, the vendor specific flags. The parameter linker can be either gnu, darwin, osf, hpux or sun.
[ "Now", "the", "vendor", "specific", "flags", ".", "The", "parameter", "linker", "can", "be", "either", "gnu", "darwin", "osf", "hpux", "or", "sun", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/tools/gcc.py#L494-L608
28,704
apple/turicreate
deps/src/boost_1_68_0/tools/build/src/build/engine.py
Engine.add_dependency
def add_dependency (self, targets, sources): """Adds a dependency from 'targets' to 'sources' Both 'targets' and 'sources' can be either list of target names, or a single target name. """ if isinstance (targets, str): targets = [targets] if isinstance (sources, str): sources = [sources] assert is_iterable(targets) assert is_iterable(sources) for target in targets: for source in sources: self.do_add_dependency (target, source)
python
def add_dependency (self, targets, sources): """Adds a dependency from 'targets' to 'sources' Both 'targets' and 'sources' can be either list of target names, or a single target name. """ if isinstance (targets, str): targets = [targets] if isinstance (sources, str): sources = [sources] assert is_iterable(targets) assert is_iterable(sources) for target in targets: for source in sources: self.do_add_dependency (target, source)
[ "def", "add_dependency", "(", "self", ",", "targets", ",", "sources", ")", ":", "if", "isinstance", "(", "targets", ",", "str", ")", ":", "targets", "=", "[", "targets", "]", "if", "isinstance", "(", "sources", ",", "str", ")", ":", "sources", "=", "[", "sources", "]", "assert", "is_iterable", "(", "targets", ")", "assert", "is_iterable", "(", "sources", ")", "for", "target", "in", "targets", ":", "for", "source", "in", "sources", ":", "self", ".", "do_add_dependency", "(", "target", ",", "source", ")" ]
Adds a dependency from 'targets' to 'sources' Both 'targets' and 'sources' can be either list of target names, or a single target name.
[ "Adds", "a", "dependency", "from", "targets", "to", "sources" ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/engine.py#L76-L91
28,705
apple/turicreate
deps/src/boost_1_68_0/tools/build/src/build/engine.py
Engine.get_target_variable
def get_target_variable(self, targets, variable): """Gets the value of `variable` on set on the first target in `targets`. Args: targets (str or list): one or more targets to get the variable from. variable (str): the name of the variable Returns: the value of `variable` set on `targets` (list) Example: >>> ENGINE = get_manager().engine() >>> ENGINE.set_target_variable(targets, 'MY-VAR', 'Hello World') >>> ENGINE.get_target_variable(targets, 'MY-VAR') ['Hello World'] Equivalent Jam code: MY-VAR on $(targets) = "Hello World" ; echo [ on $(targets) return $(MY-VAR) ] ; "Hello World" """ if isinstance(targets, str): targets = [targets] assert is_iterable(targets) assert isinstance(variable, basestring) return bjam_interface.call('get-target-variable', targets, variable)
python
def get_target_variable(self, targets, variable): """Gets the value of `variable` on set on the first target in `targets`. Args: targets (str or list): one or more targets to get the variable from. variable (str): the name of the variable Returns: the value of `variable` set on `targets` (list) Example: >>> ENGINE = get_manager().engine() >>> ENGINE.set_target_variable(targets, 'MY-VAR', 'Hello World') >>> ENGINE.get_target_variable(targets, 'MY-VAR') ['Hello World'] Equivalent Jam code: MY-VAR on $(targets) = "Hello World" ; echo [ on $(targets) return $(MY-VAR) ] ; "Hello World" """ if isinstance(targets, str): targets = [targets] assert is_iterable(targets) assert isinstance(variable, basestring) return bjam_interface.call('get-target-variable', targets, variable)
[ "def", "get_target_variable", "(", "self", ",", "targets", ",", "variable", ")", ":", "if", "isinstance", "(", "targets", ",", "str", ")", ":", "targets", "=", "[", "targets", "]", "assert", "is_iterable", "(", "targets", ")", "assert", "isinstance", "(", "variable", ",", "basestring", ")", "return", "bjam_interface", ".", "call", "(", "'get-target-variable'", ",", "targets", ",", "variable", ")" ]
Gets the value of `variable` on set on the first target in `targets`. Args: targets (str or list): one or more targets to get the variable from. variable (str): the name of the variable Returns: the value of `variable` set on `targets` (list) Example: >>> ENGINE = get_manager().engine() >>> ENGINE.set_target_variable(targets, 'MY-VAR', 'Hello World') >>> ENGINE.get_target_variable(targets, 'MY-VAR') ['Hello World'] Equivalent Jam code: MY-VAR on $(targets) = "Hello World" ; echo [ on $(targets) return $(MY-VAR) ] ; "Hello World"
[ "Gets", "the", "value", "of", "variable", "on", "set", "on", "the", "first", "target", "in", "targets", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/engine.py#L93-L121
28,706
apple/turicreate
deps/src/boost_1_68_0/tools/build/src/build/engine.py
Engine.set_target_variable
def set_target_variable (self, targets, variable, value, append=0): """ Sets a target variable. The 'variable' will be available to bjam when it decides where to generate targets, and will also be available to updating rule for that 'taret'. """ if isinstance (targets, str): targets = [targets] if isinstance(value, str): value = [value] assert is_iterable(targets) assert isinstance(variable, basestring) assert is_iterable(value) if targets: if append: bjam_interface.call("set-target-variable", targets, variable, value, "true") else: bjam_interface.call("set-target-variable", targets, variable, value)
python
def set_target_variable (self, targets, variable, value, append=0): """ Sets a target variable. The 'variable' will be available to bjam when it decides where to generate targets, and will also be available to updating rule for that 'taret'. """ if isinstance (targets, str): targets = [targets] if isinstance(value, str): value = [value] assert is_iterable(targets) assert isinstance(variable, basestring) assert is_iterable(value) if targets: if append: bjam_interface.call("set-target-variable", targets, variable, value, "true") else: bjam_interface.call("set-target-variable", targets, variable, value)
[ "def", "set_target_variable", "(", "self", ",", "targets", ",", "variable", ",", "value", ",", "append", "=", "0", ")", ":", "if", "isinstance", "(", "targets", ",", "str", ")", ":", "targets", "=", "[", "targets", "]", "if", "isinstance", "(", "value", ",", "str", ")", ":", "value", "=", "[", "value", "]", "assert", "is_iterable", "(", "targets", ")", "assert", "isinstance", "(", "variable", ",", "basestring", ")", "assert", "is_iterable", "(", "value", ")", "if", "targets", ":", "if", "append", ":", "bjam_interface", ".", "call", "(", "\"set-target-variable\"", ",", "targets", ",", "variable", ",", "value", ",", "\"true\"", ")", "else", ":", "bjam_interface", ".", "call", "(", "\"set-target-variable\"", ",", "targets", ",", "variable", ",", "value", ")" ]
Sets a target variable. The 'variable' will be available to bjam when it decides where to generate targets, and will also be available to updating rule for that 'taret'.
[ "Sets", "a", "target", "variable", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/engine.py#L123-L143
28,707
apple/turicreate
deps/src/boost_1_68_0/tools/build/src/build/engine.py
Engine.set_update_action
def set_update_action (self, action_name, targets, sources, properties=None): """ Binds a target to the corresponding update action. If target needs to be updated, the action registered with action_name will be used. The 'action_name' must be previously registered by either 'register_action' or 'register_bjam_action' method. """ if isinstance(targets, str): targets = [targets] if isinstance(sources, str): sources = [sources] if properties is None: properties = property_set.empty() assert isinstance(action_name, basestring) assert is_iterable(targets) assert is_iterable(sources) assert(isinstance(properties, property_set.PropertySet)) self.do_set_update_action (action_name, targets, sources, properties)
python
def set_update_action (self, action_name, targets, sources, properties=None): """ Binds a target to the corresponding update action. If target needs to be updated, the action registered with action_name will be used. The 'action_name' must be previously registered by either 'register_action' or 'register_bjam_action' method. """ if isinstance(targets, str): targets = [targets] if isinstance(sources, str): sources = [sources] if properties is None: properties = property_set.empty() assert isinstance(action_name, basestring) assert is_iterable(targets) assert is_iterable(sources) assert(isinstance(properties, property_set.PropertySet)) self.do_set_update_action (action_name, targets, sources, properties)
[ "def", "set_update_action", "(", "self", ",", "action_name", ",", "targets", ",", "sources", ",", "properties", "=", "None", ")", ":", "if", "isinstance", "(", "targets", ",", "str", ")", ":", "targets", "=", "[", "targets", "]", "if", "isinstance", "(", "sources", ",", "str", ")", ":", "sources", "=", "[", "sources", "]", "if", "properties", "is", "None", ":", "properties", "=", "property_set", ".", "empty", "(", ")", "assert", "isinstance", "(", "action_name", ",", "basestring", ")", "assert", "is_iterable", "(", "targets", ")", "assert", "is_iterable", "(", "sources", ")", "assert", "(", "isinstance", "(", "properties", ",", "property_set", ".", "PropertySet", ")", ")", "self", ".", "do_set_update_action", "(", "action_name", ",", "targets", ",", "sources", ",", "properties", ")" ]
Binds a target to the corresponding update action. If target needs to be updated, the action registered with action_name will be used. The 'action_name' must be previously registered by either 'register_action' or 'register_bjam_action' method.
[ "Binds", "a", "target", "to", "the", "corresponding", "update", "action", ".", "If", "target", "needs", "to", "be", "updated", "the", "action", "registered", "with", "action_name", "will", "be", "used", ".", "The", "action_name", "must", "be", "previously", "registered", "by", "either", "register_action", "or", "register_bjam_action", "method", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/engine.py#L145-L164
28,708
apple/turicreate
deps/src/boost_1_68_0/tools/build/src/build/engine.py
Engine.register_action
def register_action (self, action_name, command='', bound_list = [], flags = [], function = None): """Creates a new build engine action. Creates on bjam side an action named 'action_name', with 'command' as the command to be executed, 'bound_variables' naming the list of variables bound when the command is executed and specified flag. If 'function' is not None, it should be a callable taking three parameters: - targets - sources - instance of the property_set class This function will be called by set_update_action, and can set additional target variables. """ assert isinstance(action_name, basestring) assert isinstance(command, basestring) assert is_iterable(bound_list) assert is_iterable(flags) assert function is None or callable(function) bjam_flags = reduce(operator.or_, (action_modifiers[flag] for flag in flags), 0) # We allow command to be empty so that we can define 'action' as pure # python function that would do some conditional logic and then relay # to other actions. assert command or function if command: bjam_interface.define_action(action_name, command, bound_list, bjam_flags) self.actions[action_name] = BjamAction( action_name, function, has_command=bool(command))
python
def register_action (self, action_name, command='', bound_list = [], flags = [], function = None): """Creates a new build engine action. Creates on bjam side an action named 'action_name', with 'command' as the command to be executed, 'bound_variables' naming the list of variables bound when the command is executed and specified flag. If 'function' is not None, it should be a callable taking three parameters: - targets - sources - instance of the property_set class This function will be called by set_update_action, and can set additional target variables. """ assert isinstance(action_name, basestring) assert isinstance(command, basestring) assert is_iterable(bound_list) assert is_iterable(flags) assert function is None or callable(function) bjam_flags = reduce(operator.or_, (action_modifiers[flag] for flag in flags), 0) # We allow command to be empty so that we can define 'action' as pure # python function that would do some conditional logic and then relay # to other actions. assert command or function if command: bjam_interface.define_action(action_name, command, bound_list, bjam_flags) self.actions[action_name] = BjamAction( action_name, function, has_command=bool(command))
[ "def", "register_action", "(", "self", ",", "action_name", ",", "command", "=", "''", ",", "bound_list", "=", "[", "]", ",", "flags", "=", "[", "]", ",", "function", "=", "None", ")", ":", "assert", "isinstance", "(", "action_name", ",", "basestring", ")", "assert", "isinstance", "(", "command", ",", "basestring", ")", "assert", "is_iterable", "(", "bound_list", ")", "assert", "is_iterable", "(", "flags", ")", "assert", "function", "is", "None", "or", "callable", "(", "function", ")", "bjam_flags", "=", "reduce", "(", "operator", ".", "or_", ",", "(", "action_modifiers", "[", "flag", "]", "for", "flag", "in", "flags", ")", ",", "0", ")", "# We allow command to be empty so that we can define 'action' as pure", "# python function that would do some conditional logic and then relay", "# to other actions.", "assert", "command", "or", "function", "if", "command", ":", "bjam_interface", ".", "define_action", "(", "action_name", ",", "command", ",", "bound_list", ",", "bjam_flags", ")", "self", ".", "actions", "[", "action_name", "]", "=", "BjamAction", "(", "action_name", ",", "function", ",", "has_command", "=", "bool", "(", "command", ")", ")" ]
Creates a new build engine action. Creates on bjam side an action named 'action_name', with 'command' as the command to be executed, 'bound_variables' naming the list of variables bound when the command is executed and specified flag. If 'function' is not None, it should be a callable taking three parameters: - targets - sources - instance of the property_set class This function will be called by set_update_action, and can set additional target variables.
[ "Creates", "a", "new", "build", "engine", "action", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/engine.py#L166-L199
28,709
apple/turicreate
deps/src/boost_1_68_0/tools/build/src/build/engine.py
Engine.register_bjam_action
def register_bjam_action (self, action_name, function=None): """Informs self that 'action_name' is declared in bjam. From this point, 'action_name' is a valid argument to the set_update_action method. The action_name should be callable in the global module of bjam. """ # We allow duplicate calls to this rule for the same # action name. This way, jamfile rules that take action names # can just register them without specially checking if # action is already registered. assert isinstance(action_name, basestring) assert function is None or callable(function) if action_name not in self.actions: self.actions[action_name] = BjamNativeAction(action_name, function)
python
def register_bjam_action (self, action_name, function=None): """Informs self that 'action_name' is declared in bjam. From this point, 'action_name' is a valid argument to the set_update_action method. The action_name should be callable in the global module of bjam. """ # We allow duplicate calls to this rule for the same # action name. This way, jamfile rules that take action names # can just register them without specially checking if # action is already registered. assert isinstance(action_name, basestring) assert function is None or callable(function) if action_name not in self.actions: self.actions[action_name] = BjamNativeAction(action_name, function)
[ "def", "register_bjam_action", "(", "self", ",", "action_name", ",", "function", "=", "None", ")", ":", "# We allow duplicate calls to this rule for the same", "# action name. This way, jamfile rules that take action names", "# can just register them without specially checking if", "# action is already registered.", "assert", "isinstance", "(", "action_name", ",", "basestring", ")", "assert", "function", "is", "None", "or", "callable", "(", "function", ")", "if", "action_name", "not", "in", "self", ".", "actions", ":", "self", ".", "actions", "[", "action_name", "]", "=", "BjamNativeAction", "(", "action_name", ",", "function", ")" ]
Informs self that 'action_name' is declared in bjam. From this point, 'action_name' is a valid argument to the set_update_action method. The action_name should be callable in the global module of bjam.
[ "Informs", "self", "that", "action_name", "is", "declared", "in", "bjam", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/engine.py#L201-L216
28,710
apple/turicreate
src/unity/python/turicreate/data_structures/image.py
Image.pixel_data
def pixel_data(self): """ Returns the pixel data stored in the Image object. Returns ------- out : numpy.array The pixel data of the Image object. It returns a multi-dimensional numpy array, where the shape of the array represents the shape of the image (height, weight, channels). See Also -------- width, channels, height Examples -------- >>> img = turicreate.Image('https://static.turi.com/datasets/images/sample.jpg') >>> image_array = img.pixel_data """ from .. import extensions as _extensions data = _np.zeros((self.height, self.width, self.channels), dtype=_np.uint8) _extensions.image_load_to_numpy(self, data.ctypes.data, data.strides) if self.channels == 1: data = data.squeeze(2) return data
python
def pixel_data(self): """ Returns the pixel data stored in the Image object. Returns ------- out : numpy.array The pixel data of the Image object. It returns a multi-dimensional numpy array, where the shape of the array represents the shape of the image (height, weight, channels). See Also -------- width, channels, height Examples -------- >>> img = turicreate.Image('https://static.turi.com/datasets/images/sample.jpg') >>> image_array = img.pixel_data """ from .. import extensions as _extensions data = _np.zeros((self.height, self.width, self.channels), dtype=_np.uint8) _extensions.image_load_to_numpy(self, data.ctypes.data, data.strides) if self.channels == 1: data = data.squeeze(2) return data
[ "def", "pixel_data", "(", "self", ")", ":", "from", ".", ".", "import", "extensions", "as", "_extensions", "data", "=", "_np", ".", "zeros", "(", "(", "self", ".", "height", ",", "self", ".", "width", ",", "self", ".", "channels", ")", ",", "dtype", "=", "_np", ".", "uint8", ")", "_extensions", ".", "image_load_to_numpy", "(", "self", ",", "data", ".", "ctypes", ".", "data", ",", "data", ".", "strides", ")", "if", "self", ".", "channels", "==", "1", ":", "data", "=", "data", ".", "squeeze", "(", "2", ")", "return", "data" ]
Returns the pixel data stored in the Image object. Returns ------- out : numpy.array The pixel data of the Image object. It returns a multi-dimensional numpy array, where the shape of the array represents the shape of the image (height, weight, channels). See Also -------- width, channels, height Examples -------- >>> img = turicreate.Image('https://static.turi.com/datasets/images/sample.jpg') >>> image_array = img.pixel_data
[ "Returns", "the", "pixel", "data", "stored", "in", "the", "Image", "object", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/data_structures/image.py#L141-L167
28,711
apple/turicreate
src/external/coremltools_wrap/coremltools/coremltools/models/model.py
MLModel.predict
def predict(self, data, useCPUOnly=False, **kwargs): """ Return predictions for the model. The kwargs gets passed into the model as a dictionary. Parameters ---------- data : dict[str, value] Dictionary of data to make predictions from where the keys are the names of the input features. useCPUOnly : bool Set to true to restrict computation to use only the CPU. Defaults to False. Returns ------- out : dict[str, value] Predictions as a dictionary where each key is the output feature name. Examples -------- >>> data = {'bedroom': 1.0, 'bath': 1.0, 'size': 1240} >>> predictions = model.predict(data) """ if self.__proxy__: return self.__proxy__.predict(data,useCPUOnly) else: if _macos_version() < (10, 13): raise Exception('Model prediction is only supported on macOS version 10.13 or later.') try: from ..libcoremlpython import _MLModelProxy except: _MLModelProxy = None if not _MLModelProxy: raise Exception('Unable to load CoreML.framework. Cannot make predictions.') elif _MLModelProxy.maximum_supported_specification_version() < self._spec.specificationVersion: engineVersion = _MLModelProxy.maximum_supported_specification_version() raise Exception('The specification has version ' + str(self._spec.specificationVersion) + ' but the Core ML framework version installed only supports Core ML model specification version ' + str(engineVersion) + ' or older.') elif _has_custom_layer(self._spec): raise Exception('This model contains a custom neural network layer, so predict is not supported.') else: raise Exception('Unable to load CoreML.framework. Cannot make predictions.')
python
def predict(self, data, useCPUOnly=False, **kwargs): """ Return predictions for the model. The kwargs gets passed into the model as a dictionary. Parameters ---------- data : dict[str, value] Dictionary of data to make predictions from where the keys are the names of the input features. useCPUOnly : bool Set to true to restrict computation to use only the CPU. Defaults to False. Returns ------- out : dict[str, value] Predictions as a dictionary where each key is the output feature name. Examples -------- >>> data = {'bedroom': 1.0, 'bath': 1.0, 'size': 1240} >>> predictions = model.predict(data) """ if self.__proxy__: return self.__proxy__.predict(data,useCPUOnly) else: if _macos_version() < (10, 13): raise Exception('Model prediction is only supported on macOS version 10.13 or later.') try: from ..libcoremlpython import _MLModelProxy except: _MLModelProxy = None if not _MLModelProxy: raise Exception('Unable to load CoreML.framework. Cannot make predictions.') elif _MLModelProxy.maximum_supported_specification_version() < self._spec.specificationVersion: engineVersion = _MLModelProxy.maximum_supported_specification_version() raise Exception('The specification has version ' + str(self._spec.specificationVersion) + ' but the Core ML framework version installed only supports Core ML model specification version ' + str(engineVersion) + ' or older.') elif _has_custom_layer(self._spec): raise Exception('This model contains a custom neural network layer, so predict is not supported.') else: raise Exception('Unable to load CoreML.framework. Cannot make predictions.')
[ "def", "predict", "(", "self", ",", "data", ",", "useCPUOnly", "=", "False", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "__proxy__", ":", "return", "self", ".", "__proxy__", ".", "predict", "(", "data", ",", "useCPUOnly", ")", "else", ":", "if", "_macos_version", "(", ")", "<", "(", "10", ",", "13", ")", ":", "raise", "Exception", "(", "'Model prediction is only supported on macOS version 10.13 or later.'", ")", "try", ":", "from", ".", ".", "libcoremlpython", "import", "_MLModelProxy", "except", ":", "_MLModelProxy", "=", "None", "if", "not", "_MLModelProxy", ":", "raise", "Exception", "(", "'Unable to load CoreML.framework. Cannot make predictions.'", ")", "elif", "_MLModelProxy", ".", "maximum_supported_specification_version", "(", ")", "<", "self", ".", "_spec", ".", "specificationVersion", ":", "engineVersion", "=", "_MLModelProxy", ".", "maximum_supported_specification_version", "(", ")", "raise", "Exception", "(", "'The specification has version '", "+", "str", "(", "self", ".", "_spec", ".", "specificationVersion", ")", "+", "' but the Core ML framework version installed only supports Core ML model specification version '", "+", "str", "(", "engineVersion", ")", "+", "' or older.'", ")", "elif", "_has_custom_layer", "(", "self", ".", "_spec", ")", ":", "raise", "Exception", "(", "'This model contains a custom neural network layer, so predict is not supported.'", ")", "else", ":", "raise", "Exception", "(", "'Unable to load CoreML.framework. Cannot make predictions.'", ")" ]
Return predictions for the model. The kwargs gets passed into the model as a dictionary. Parameters ---------- data : dict[str, value] Dictionary of data to make predictions from where the keys are the names of the input features. useCPUOnly : bool Set to true to restrict computation to use only the CPU. Defaults to False. Returns ------- out : dict[str, value] Predictions as a dictionary where each key is the output feature name. Examples -------- >>> data = {'bedroom': 1.0, 'bath': 1.0, 'size': 1240} >>> predictions = model.predict(data)
[ "Return", "predictions", "for", "the", "model", ".", "The", "kwargs", "gets", "passed", "into", "the", "model", "as", "a", "dictionary", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/model.py#L300-L347
28,712
apple/turicreate
src/external/coremltools_wrap/coremltools/coremltools/models/model.py
MLModel.visualize_spec
def visualize_spec(self, port=None, input_shape_dict=None): """ Visualize the model. Parameters ---------- port : int if server is to be hosted on specific localhost port input_shape_dict : dict The shapes are calculated assuming the batch and sequence are 1 i.e. (1, 1, C, H, W). If either is not 1, then provide full input shape Returns ------- None Examples -------- >>> model = coreml.models.MLModel('HousePricer.mlmodel') >>> model.visualize_spec() """ spec = self._spec model_type = spec.WhichOneof('Type') model_description = spec.description input_spec = model_description.input output_spec = model_description.output spec_inputs = [] for model_input in input_spec: spec_inputs.append((model_input.name, str(model_input.type))) spec_outputs = [] for model_output in output_spec: spec_outputs.append((model_output.name, str(model_output.type))) cy_nodes = [] cy_edges = [] cy_nodes.append({ 'data': { 'id': 'input_node', 'name': '', 'info': { 'type': 'input node' }, 'classes': 'input', } }) for model_input, input_type in spec_inputs: cy_nodes.append({ 'data': { 'id': str(model_input), 'name': str(model_input), 'info': { 'type': "\n".join(str(input_type).split("\n")), 'inputs': str([]), 'outputs': str([model_input]) }, 'parent': 'input_node' }, 'classes': 'input' }) if model_type == 'pipeline': pipeline_spec = spec.pipeline cy_data = _pipeline_nodes_and_edges(cy_nodes, cy_edges, pipeline_spec, spec_outputs ) elif model_type == 'pipelineRegressor': pipeline_spec = spec.pipelineRegressor.pipeline cy_data = _pipeline_nodes_and_edges(cy_nodes, cy_edges, pipeline_spec, spec_outputs ) elif model_type == 'pipelineClassifier': pipeline_spec = spec.pipelineClassifier.pipeline cy_data = _pipeline_nodes_and_edges(cy_nodes, cy_edges, pipeline_spec, spec_outputs ) elif model_type == 'neuralNetwork': nn_spec = spec.neuralNetwork cy_data = _neural_network_nodes_and_edges(nn_spec, cy_nodes, cy_edges, spec_outputs, input_spec, input_shape_dict=input_shape_dict ) elif model_type == 'neuralNetworkClassifier': nn_spec = spec.neuralNetworkClassifier cy_data = _neural_network_nodes_and_edges(nn_spec, cy_nodes, cy_edges, spec_outputs, input_spec, input_shape_dict=input_shape_dict ) elif model_type == 'neuralNetworkRegressor': nn_spec = spec.neuralNetworkRegressor cy_data = _neural_network_nodes_and_edges(nn_spec, cy_nodes, cy_edges, spec_outputs, input_spec, input_shape_dict=input_shape_dict ) else: print("Model is not of type Pipeline or Neural Network " "and cannot be visualized") return import coremltools web_dir = _os.path.join(_os.path.dirname(coremltools.__file__), 'graph_visualization') with open('{}/model.json'.format(web_dir), 'w') as file: _json.dump(cy_data, file) _start_server(port, web_dir)
python
def visualize_spec(self, port=None, input_shape_dict=None): """ Visualize the model. Parameters ---------- port : int if server is to be hosted on specific localhost port input_shape_dict : dict The shapes are calculated assuming the batch and sequence are 1 i.e. (1, 1, C, H, W). If either is not 1, then provide full input shape Returns ------- None Examples -------- >>> model = coreml.models.MLModel('HousePricer.mlmodel') >>> model.visualize_spec() """ spec = self._spec model_type = spec.WhichOneof('Type') model_description = spec.description input_spec = model_description.input output_spec = model_description.output spec_inputs = [] for model_input in input_spec: spec_inputs.append((model_input.name, str(model_input.type))) spec_outputs = [] for model_output in output_spec: spec_outputs.append((model_output.name, str(model_output.type))) cy_nodes = [] cy_edges = [] cy_nodes.append({ 'data': { 'id': 'input_node', 'name': '', 'info': { 'type': 'input node' }, 'classes': 'input', } }) for model_input, input_type in spec_inputs: cy_nodes.append({ 'data': { 'id': str(model_input), 'name': str(model_input), 'info': { 'type': "\n".join(str(input_type).split("\n")), 'inputs': str([]), 'outputs': str([model_input]) }, 'parent': 'input_node' }, 'classes': 'input' }) if model_type == 'pipeline': pipeline_spec = spec.pipeline cy_data = _pipeline_nodes_and_edges(cy_nodes, cy_edges, pipeline_spec, spec_outputs ) elif model_type == 'pipelineRegressor': pipeline_spec = spec.pipelineRegressor.pipeline cy_data = _pipeline_nodes_and_edges(cy_nodes, cy_edges, pipeline_spec, spec_outputs ) elif model_type == 'pipelineClassifier': pipeline_spec = spec.pipelineClassifier.pipeline cy_data = _pipeline_nodes_and_edges(cy_nodes, cy_edges, pipeline_spec, spec_outputs ) elif model_type == 'neuralNetwork': nn_spec = spec.neuralNetwork cy_data = _neural_network_nodes_and_edges(nn_spec, cy_nodes, cy_edges, spec_outputs, input_spec, input_shape_dict=input_shape_dict ) elif model_type == 'neuralNetworkClassifier': nn_spec = spec.neuralNetworkClassifier cy_data = _neural_network_nodes_and_edges(nn_spec, cy_nodes, cy_edges, spec_outputs, input_spec, input_shape_dict=input_shape_dict ) elif model_type == 'neuralNetworkRegressor': nn_spec = spec.neuralNetworkRegressor cy_data = _neural_network_nodes_and_edges(nn_spec, cy_nodes, cy_edges, spec_outputs, input_spec, input_shape_dict=input_shape_dict ) else: print("Model is not of type Pipeline or Neural Network " "and cannot be visualized") return import coremltools web_dir = _os.path.join(_os.path.dirname(coremltools.__file__), 'graph_visualization') with open('{}/model.json'.format(web_dir), 'w') as file: _json.dump(cy_data, file) _start_server(port, web_dir)
[ "def", "visualize_spec", "(", "self", ",", "port", "=", "None", ",", "input_shape_dict", "=", "None", ")", ":", "spec", "=", "self", ".", "_spec", "model_type", "=", "spec", ".", "WhichOneof", "(", "'Type'", ")", "model_description", "=", "spec", ".", "description", "input_spec", "=", "model_description", ".", "input", "output_spec", "=", "model_description", ".", "output", "spec_inputs", "=", "[", "]", "for", "model_input", "in", "input_spec", ":", "spec_inputs", ".", "append", "(", "(", "model_input", ".", "name", ",", "str", "(", "model_input", ".", "type", ")", ")", ")", "spec_outputs", "=", "[", "]", "for", "model_output", "in", "output_spec", ":", "spec_outputs", ".", "append", "(", "(", "model_output", ".", "name", ",", "str", "(", "model_output", ".", "type", ")", ")", ")", "cy_nodes", "=", "[", "]", "cy_edges", "=", "[", "]", "cy_nodes", ".", "append", "(", "{", "'data'", ":", "{", "'id'", ":", "'input_node'", ",", "'name'", ":", "''", ",", "'info'", ":", "{", "'type'", ":", "'input node'", "}", ",", "'classes'", ":", "'input'", ",", "}", "}", ")", "for", "model_input", ",", "input_type", "in", "spec_inputs", ":", "cy_nodes", ".", "append", "(", "{", "'data'", ":", "{", "'id'", ":", "str", "(", "model_input", ")", ",", "'name'", ":", "str", "(", "model_input", ")", ",", "'info'", ":", "{", "'type'", ":", "\"\\n\"", ".", "join", "(", "str", "(", "input_type", ")", ".", "split", "(", "\"\\n\"", ")", ")", ",", "'inputs'", ":", "str", "(", "[", "]", ")", ",", "'outputs'", ":", "str", "(", "[", "model_input", "]", ")", "}", ",", "'parent'", ":", "'input_node'", "}", ",", "'classes'", ":", "'input'", "}", ")", "if", "model_type", "==", "'pipeline'", ":", "pipeline_spec", "=", "spec", ".", "pipeline", "cy_data", "=", "_pipeline_nodes_and_edges", "(", "cy_nodes", ",", "cy_edges", ",", "pipeline_spec", ",", "spec_outputs", ")", "elif", "model_type", "==", "'pipelineRegressor'", ":", "pipeline_spec", "=", "spec", ".", "pipelineRegressor", ".", "pipeline", "cy_data", "=", "_pipeline_nodes_and_edges", "(", "cy_nodes", ",", "cy_edges", ",", "pipeline_spec", ",", "spec_outputs", ")", "elif", "model_type", "==", "'pipelineClassifier'", ":", "pipeline_spec", "=", "spec", ".", "pipelineClassifier", ".", "pipeline", "cy_data", "=", "_pipeline_nodes_and_edges", "(", "cy_nodes", ",", "cy_edges", ",", "pipeline_spec", ",", "spec_outputs", ")", "elif", "model_type", "==", "'neuralNetwork'", ":", "nn_spec", "=", "spec", ".", "neuralNetwork", "cy_data", "=", "_neural_network_nodes_and_edges", "(", "nn_spec", ",", "cy_nodes", ",", "cy_edges", ",", "spec_outputs", ",", "input_spec", ",", "input_shape_dict", "=", "input_shape_dict", ")", "elif", "model_type", "==", "'neuralNetworkClassifier'", ":", "nn_spec", "=", "spec", ".", "neuralNetworkClassifier", "cy_data", "=", "_neural_network_nodes_and_edges", "(", "nn_spec", ",", "cy_nodes", ",", "cy_edges", ",", "spec_outputs", ",", "input_spec", ",", "input_shape_dict", "=", "input_shape_dict", ")", "elif", "model_type", "==", "'neuralNetworkRegressor'", ":", "nn_spec", "=", "spec", ".", "neuralNetworkRegressor", "cy_data", "=", "_neural_network_nodes_and_edges", "(", "nn_spec", ",", "cy_nodes", ",", "cy_edges", ",", "spec_outputs", ",", "input_spec", ",", "input_shape_dict", "=", "input_shape_dict", ")", "else", ":", "print", "(", "\"Model is not of type Pipeline or Neural Network \"", "\"and cannot be visualized\"", ")", "return", "import", "coremltools", "web_dir", "=", "_os", ".", "path", ".", "join", "(", "_os", ".", "path", ".", "dirname", "(", "coremltools", ".", "__file__", ")", ",", "'graph_visualization'", ")", "with", "open", "(", "'{}/model.json'", ".", "format", "(", "web_dir", ")", ",", "'w'", ")", "as", "file", ":", "_json", ".", "dump", "(", "cy_data", ",", "file", ")", "_start_server", "(", "port", ",", "web_dir", ")" ]
Visualize the model. Parameters ---------- port : int if server is to be hosted on specific localhost port input_shape_dict : dict The shapes are calculated assuming the batch and sequence are 1 i.e. (1, 1, C, H, W). If either is not 1, then provide full input shape Returns ------- None Examples -------- >>> model = coreml.models.MLModel('HousePricer.mlmodel') >>> model.visualize_spec()
[ "Visualize", "the", "model", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/model.py#L349-L478
28,713
apple/turicreate
src/unity/python/turicreate/toolkits/nearest_neighbors/_nearest_neighbors.py
_construct_auto_distance
def _construct_auto_distance(feature_names, column_names, column_types, sample): """ Construct composite distance parameters based on selected features and their types. """ ## Make a dictionary from the column_names and column_types col_type_dict = {k: v for k, v in zip(column_names, column_types)} ## Loop through feature names, appending a distance component if the # feature's type is *not* numeric. If the type *is* numeric, append it to # the numeric_cols list, then at the end make a numeric columns distance # component. composite_distance_params = [] numeric_cols = [] for c in feature_names: if col_type_dict[c] == str: composite_distance_params.append([[c], _turicreate.distances.levenshtein, 1]) elif col_type_dict[c] == dict: composite_distance_params.append([[c], _turicreate.distances.jaccard, 1]) elif col_type_dict[c] == array.array: composite_distance_params.append([[c], _turicreate.distances.euclidean, 1]) elif col_type_dict[c] == list: only_str_lists = _validate_lists(sample[c], allowed_types=[str]) if not only_str_lists: raise TypeError("Only lists of all str objects are currently supported") composite_distance_params.append([[c], _turicreate.distances.jaccard, 1]) elif col_type_dict[c] in [int, float, array.array, list]: numeric_cols.append(c) else: raise TypeError("Unable to automatically determine a distance "+\ "for column {}".format(c)) # Make the standalone numeric column distance component if len(numeric_cols) > 0: composite_distance_params.append([numeric_cols, _turicreate.distances.euclidean, 1]) return composite_distance_params
python
def _construct_auto_distance(feature_names, column_names, column_types, sample): """ Construct composite distance parameters based on selected features and their types. """ ## Make a dictionary from the column_names and column_types col_type_dict = {k: v for k, v in zip(column_names, column_types)} ## Loop through feature names, appending a distance component if the # feature's type is *not* numeric. If the type *is* numeric, append it to # the numeric_cols list, then at the end make a numeric columns distance # component. composite_distance_params = [] numeric_cols = [] for c in feature_names: if col_type_dict[c] == str: composite_distance_params.append([[c], _turicreate.distances.levenshtein, 1]) elif col_type_dict[c] == dict: composite_distance_params.append([[c], _turicreate.distances.jaccard, 1]) elif col_type_dict[c] == array.array: composite_distance_params.append([[c], _turicreate.distances.euclidean, 1]) elif col_type_dict[c] == list: only_str_lists = _validate_lists(sample[c], allowed_types=[str]) if not only_str_lists: raise TypeError("Only lists of all str objects are currently supported") composite_distance_params.append([[c], _turicreate.distances.jaccard, 1]) elif col_type_dict[c] in [int, float, array.array, list]: numeric_cols.append(c) else: raise TypeError("Unable to automatically determine a distance "+\ "for column {}".format(c)) # Make the standalone numeric column distance component if len(numeric_cols) > 0: composite_distance_params.append([numeric_cols, _turicreate.distances.euclidean, 1]) return composite_distance_params
[ "def", "_construct_auto_distance", "(", "feature_names", ",", "column_names", ",", "column_types", ",", "sample", ")", ":", "## Make a dictionary from the column_names and column_types", "col_type_dict", "=", "{", "k", ":", "v", "for", "k", ",", "v", "in", "zip", "(", "column_names", ",", "column_types", ")", "}", "## Loop through feature names, appending a distance component if the", "# feature's type is *not* numeric. If the type *is* numeric, append it to", "# the numeric_cols list, then at the end make a numeric columns distance", "# component.", "composite_distance_params", "=", "[", "]", "numeric_cols", "=", "[", "]", "for", "c", "in", "feature_names", ":", "if", "col_type_dict", "[", "c", "]", "==", "str", ":", "composite_distance_params", ".", "append", "(", "[", "[", "c", "]", ",", "_turicreate", ".", "distances", ".", "levenshtein", ",", "1", "]", ")", "elif", "col_type_dict", "[", "c", "]", "==", "dict", ":", "composite_distance_params", ".", "append", "(", "[", "[", "c", "]", ",", "_turicreate", ".", "distances", ".", "jaccard", ",", "1", "]", ")", "elif", "col_type_dict", "[", "c", "]", "==", "array", ".", "array", ":", "composite_distance_params", ".", "append", "(", "[", "[", "c", "]", ",", "_turicreate", ".", "distances", ".", "euclidean", ",", "1", "]", ")", "elif", "col_type_dict", "[", "c", "]", "==", "list", ":", "only_str_lists", "=", "_validate_lists", "(", "sample", "[", "c", "]", ",", "allowed_types", "=", "[", "str", "]", ")", "if", "not", "only_str_lists", ":", "raise", "TypeError", "(", "\"Only lists of all str objects are currently supported\"", ")", "composite_distance_params", ".", "append", "(", "[", "[", "c", "]", ",", "_turicreate", ".", "distances", ".", "jaccard", ",", "1", "]", ")", "elif", "col_type_dict", "[", "c", "]", "in", "[", "int", ",", "float", ",", "array", ".", "array", ",", "list", "]", ":", "numeric_cols", ".", "append", "(", "c", ")", "else", ":", "raise", "TypeError", "(", "\"Unable to automatically determine a distance \"", "+", "\"for column {}\"", ".", "format", "(", "c", ")", ")", "# Make the standalone numeric column distance component", "if", "len", "(", "numeric_cols", ")", ">", "0", ":", "composite_distance_params", ".", "append", "(", "[", "numeric_cols", ",", "_turicreate", ".", "distances", ".", "euclidean", ",", "1", "]", ")", "return", "composite_distance_params" ]
Construct composite distance parameters based on selected features and their types.
[ "Construct", "composite", "distance", "parameters", "based", "on", "selected", "features", "and", "their", "types", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/nearest_neighbors/_nearest_neighbors.py#L33-L71
28,714
apple/turicreate
src/unity/python/turicreate/toolkits/nearest_neighbors/_nearest_neighbors.py
NearestNeighborsModel._list_fields
def _list_fields(self): """ List the fields stored in the model, including data, model, and training options. Each field can be queried with the ``get`` method. Returns ------- out : list List of fields queryable with the ``get`` method. """ opts = {'model': self.__proxy__, 'model_name': self.__name__} response = _turicreate.extensions._nearest_neighbors.list_fields(opts) return sorted(response.keys())
python
def _list_fields(self): """ List the fields stored in the model, including data, model, and training options. Each field can be queried with the ``get`` method. Returns ------- out : list List of fields queryable with the ``get`` method. """ opts = {'model': self.__proxy__, 'model_name': self.__name__} response = _turicreate.extensions._nearest_neighbors.list_fields(opts) return sorted(response.keys())
[ "def", "_list_fields", "(", "self", ")", ":", "opts", "=", "{", "'model'", ":", "self", ".", "__proxy__", ",", "'model_name'", ":", "self", ".", "__name__", "}", "response", "=", "_turicreate", ".", "extensions", ".", "_nearest_neighbors", ".", "list_fields", "(", "opts", ")", "return", "sorted", "(", "response", ".", "keys", "(", ")", ")" ]
List the fields stored in the model, including data, model, and training options. Each field can be queried with the ``get`` method. Returns ------- out : list List of fields queryable with the ``get`` method.
[ "List", "the", "fields", "stored", "in", "the", "model", "including", "data", "model", "and", "training", "options", ".", "Each", "field", "can", "be", "queried", "with", "the", "get", "method", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/nearest_neighbors/_nearest_neighbors.py#L675-L688
28,715
apple/turicreate
src/unity/python/turicreate/toolkits/nearest_neighbors/_nearest_neighbors.py
NearestNeighborsModel.query
def query(self, dataset, label=None, k=5, radius=None, verbose=True): """ For each row of the input 'dataset', retrieve the nearest neighbors from the model's stored data. In general, the query dataset does not need to be the same as the reference data stored in the model, but if it is, the 'include_self_edges' parameter can be set to False to exclude results that match query points to themselves. Parameters ---------- dataset : SFrame Query data. Must contain columns with the same names and types as the features used to train the model. Additional columns are allowed, but ignored. Please see the nearest neighbors :func:`~turicreate.nearest_neighbors.create` documentation for more detail on allowable data types. label : str, optional Name of the query SFrame column with row labels. If 'label' is not specified, row numbers are used to identify query dataset rows in the output SFrame. k : int, optional Number of nearest neighbors to return from the reference set for each query observation. The default is 5 neighbors, but setting it to ``None`` will return all neighbors within ``radius`` of the query point. radius : float, optional Only neighbors whose distance to a query point is smaller than this value are returned. The default is ``None``, in which case the ``k`` nearest neighbors are returned for each query point, regardless of distance. verbose: bool, optional If True, print progress updates and model details. Returns ------- out : SFrame An SFrame with the k-nearest neighbors of each query observation. The result contains four columns: the first is the label of the query observation, the second is the label of the nearby reference observation, the third is the distance between the query and reference observations, and the fourth is the rank of the reference observation among the query's k-nearest neighbors. See Also -------- similarity_graph Notes ----- - The `dataset` input to this method *can* have missing values (in contrast to the reference dataset used to create the nearest neighbors model). Missing numeric values are imputed to be the mean of the corresponding feature in the reference dataset, and missing strings are imputed to be empty strings. - If both ``k`` and ``radius`` are set to ``None``, each query point returns all of the reference set. If the reference dataset has :math:`n` rows and the query dataset has :math:`m` rows, the output is an SFrame with :math:`nm` rows. - For models created with the 'lsh' method, the query results may have fewer query labels than input query points. Because LSH is an approximate method, a query point may have fewer than 'k' neighbors. If LSH returns no neighbors at all for a query, the query point is omitted from the results. Examples -------- First construct a toy SFrame and create a nearest neighbors model: >>> sf = turicreate.SFrame({'label': range(3), ... 'feature1': [0.98, 0.62, 0.11], ... 'feature2': [0.69, 0.58, 0.36]}) >>> model = turicreate.nearest_neighbors.create(sf, 'label') A new SFrame contains query observations with same schema as the reference SFrame. This SFrame is passed to the ``query`` method. >>> queries = turicreate.SFrame({'label': range(3), ... 'feature1': [0.05, 0.61, 0.99], ... 'feature2': [0.06, 0.97, 0.86]}) >>> model.query(queries, 'label', k=2) +-------------+-----------------+----------------+------+ | query_label | reference_label | distance | rank | +-------------+-----------------+----------------+------+ | 0 | 2 | 0.305941170816 | 1 | | 0 | 1 | 0.771556867638 | 2 | | 1 | 1 | 0.390128184063 | 1 | | 1 | 0 | 0.464004310325 | 2 | | 2 | 0 | 0.170293863659 | 1 | | 2 | 1 | 0.464004310325 | 2 | +-------------+-----------------+----------------+------+ """ ## Validate the 'dataset' input _tkutl._raise_error_if_not_sframe(dataset, "dataset") _tkutl._raise_error_if_sframe_empty(dataset, "dataset") ## Get model features ref_features = self.features sf_features = _tkutl._toolkits_select_columns(dataset, ref_features) ## Validate and preprocess the 'label' input if label is None: query_labels = _turicreate.SArray.from_sequence(len(dataset)) else: if not label in dataset.column_names(): raise ValueError( "Input 'label' must be a string matching the name of a " +\ "column in the reference SFrame 'dataset'.") if not dataset[label].dtype == str and not dataset[label].dtype == int: raise TypeError("The label column must contain integers or strings.") if label in ref_features: raise ValueError("The label column cannot be one of the features.") query_labels = dataset[label] ## Validate neighborhood parameters 'k' and 'radius' if k is not None: if not isinstance(k, int): raise ValueError("Input 'k' must be an integer.") if k <= 0: raise ValueError("Input 'k' must be larger than 0.") if radius is not None: if not isinstance(radius, (int, float)): raise ValueError("Input 'radius' must be an integer or float.") if radius < 0: raise ValueError("Input 'radius' must be non-negative.") ## Set k and radius to special values to indicate 'None' if k is None: k = -1 if radius is None: radius = -1.0 opts = {'model': self.__proxy__, 'model_name': self.__name__, 'features': sf_features, 'query_labels': query_labels, 'k': k, 'radius': radius} with QuietProgress(verbose): result = _turicreate.extensions._nearest_neighbors.query(opts) return result['neighbors']
python
def query(self, dataset, label=None, k=5, radius=None, verbose=True): """ For each row of the input 'dataset', retrieve the nearest neighbors from the model's stored data. In general, the query dataset does not need to be the same as the reference data stored in the model, but if it is, the 'include_self_edges' parameter can be set to False to exclude results that match query points to themselves. Parameters ---------- dataset : SFrame Query data. Must contain columns with the same names and types as the features used to train the model. Additional columns are allowed, but ignored. Please see the nearest neighbors :func:`~turicreate.nearest_neighbors.create` documentation for more detail on allowable data types. label : str, optional Name of the query SFrame column with row labels. If 'label' is not specified, row numbers are used to identify query dataset rows in the output SFrame. k : int, optional Number of nearest neighbors to return from the reference set for each query observation. The default is 5 neighbors, but setting it to ``None`` will return all neighbors within ``radius`` of the query point. radius : float, optional Only neighbors whose distance to a query point is smaller than this value are returned. The default is ``None``, in which case the ``k`` nearest neighbors are returned for each query point, regardless of distance. verbose: bool, optional If True, print progress updates and model details. Returns ------- out : SFrame An SFrame with the k-nearest neighbors of each query observation. The result contains four columns: the first is the label of the query observation, the second is the label of the nearby reference observation, the third is the distance between the query and reference observations, and the fourth is the rank of the reference observation among the query's k-nearest neighbors. See Also -------- similarity_graph Notes ----- - The `dataset` input to this method *can* have missing values (in contrast to the reference dataset used to create the nearest neighbors model). Missing numeric values are imputed to be the mean of the corresponding feature in the reference dataset, and missing strings are imputed to be empty strings. - If both ``k`` and ``radius`` are set to ``None``, each query point returns all of the reference set. If the reference dataset has :math:`n` rows and the query dataset has :math:`m` rows, the output is an SFrame with :math:`nm` rows. - For models created with the 'lsh' method, the query results may have fewer query labels than input query points. Because LSH is an approximate method, a query point may have fewer than 'k' neighbors. If LSH returns no neighbors at all for a query, the query point is omitted from the results. Examples -------- First construct a toy SFrame and create a nearest neighbors model: >>> sf = turicreate.SFrame({'label': range(3), ... 'feature1': [0.98, 0.62, 0.11], ... 'feature2': [0.69, 0.58, 0.36]}) >>> model = turicreate.nearest_neighbors.create(sf, 'label') A new SFrame contains query observations with same schema as the reference SFrame. This SFrame is passed to the ``query`` method. >>> queries = turicreate.SFrame({'label': range(3), ... 'feature1': [0.05, 0.61, 0.99], ... 'feature2': [0.06, 0.97, 0.86]}) >>> model.query(queries, 'label', k=2) +-------------+-----------------+----------------+------+ | query_label | reference_label | distance | rank | +-------------+-----------------+----------------+------+ | 0 | 2 | 0.305941170816 | 1 | | 0 | 1 | 0.771556867638 | 2 | | 1 | 1 | 0.390128184063 | 1 | | 1 | 0 | 0.464004310325 | 2 | | 2 | 0 | 0.170293863659 | 1 | | 2 | 1 | 0.464004310325 | 2 | +-------------+-----------------+----------------+------+ """ ## Validate the 'dataset' input _tkutl._raise_error_if_not_sframe(dataset, "dataset") _tkutl._raise_error_if_sframe_empty(dataset, "dataset") ## Get model features ref_features = self.features sf_features = _tkutl._toolkits_select_columns(dataset, ref_features) ## Validate and preprocess the 'label' input if label is None: query_labels = _turicreate.SArray.from_sequence(len(dataset)) else: if not label in dataset.column_names(): raise ValueError( "Input 'label' must be a string matching the name of a " +\ "column in the reference SFrame 'dataset'.") if not dataset[label].dtype == str and not dataset[label].dtype == int: raise TypeError("The label column must contain integers or strings.") if label in ref_features: raise ValueError("The label column cannot be one of the features.") query_labels = dataset[label] ## Validate neighborhood parameters 'k' and 'radius' if k is not None: if not isinstance(k, int): raise ValueError("Input 'k' must be an integer.") if k <= 0: raise ValueError("Input 'k' must be larger than 0.") if radius is not None: if not isinstance(radius, (int, float)): raise ValueError("Input 'radius' must be an integer or float.") if radius < 0: raise ValueError("Input 'radius' must be non-negative.") ## Set k and radius to special values to indicate 'None' if k is None: k = -1 if radius is None: radius = -1.0 opts = {'model': self.__proxy__, 'model_name': self.__name__, 'features': sf_features, 'query_labels': query_labels, 'k': k, 'radius': radius} with QuietProgress(verbose): result = _turicreate.extensions._nearest_neighbors.query(opts) return result['neighbors']
[ "def", "query", "(", "self", ",", "dataset", ",", "label", "=", "None", ",", "k", "=", "5", ",", "radius", "=", "None", ",", "verbose", "=", "True", ")", ":", "## Validate the 'dataset' input", "_tkutl", ".", "_raise_error_if_not_sframe", "(", "dataset", ",", "\"dataset\"", ")", "_tkutl", ".", "_raise_error_if_sframe_empty", "(", "dataset", ",", "\"dataset\"", ")", "## Get model features", "ref_features", "=", "self", ".", "features", "sf_features", "=", "_tkutl", ".", "_toolkits_select_columns", "(", "dataset", ",", "ref_features", ")", "## Validate and preprocess the 'label' input", "if", "label", "is", "None", ":", "query_labels", "=", "_turicreate", ".", "SArray", ".", "from_sequence", "(", "len", "(", "dataset", ")", ")", "else", ":", "if", "not", "label", "in", "dataset", ".", "column_names", "(", ")", ":", "raise", "ValueError", "(", "\"Input 'label' must be a string matching the name of a \"", "+", "\"column in the reference SFrame 'dataset'.\"", ")", "if", "not", "dataset", "[", "label", "]", ".", "dtype", "==", "str", "and", "not", "dataset", "[", "label", "]", ".", "dtype", "==", "int", ":", "raise", "TypeError", "(", "\"The label column must contain integers or strings.\"", ")", "if", "label", "in", "ref_features", ":", "raise", "ValueError", "(", "\"The label column cannot be one of the features.\"", ")", "query_labels", "=", "dataset", "[", "label", "]", "## Validate neighborhood parameters 'k' and 'radius'", "if", "k", "is", "not", "None", ":", "if", "not", "isinstance", "(", "k", ",", "int", ")", ":", "raise", "ValueError", "(", "\"Input 'k' must be an integer.\"", ")", "if", "k", "<=", "0", ":", "raise", "ValueError", "(", "\"Input 'k' must be larger than 0.\"", ")", "if", "radius", "is", "not", "None", ":", "if", "not", "isinstance", "(", "radius", ",", "(", "int", ",", "float", ")", ")", ":", "raise", "ValueError", "(", "\"Input 'radius' must be an integer or float.\"", ")", "if", "radius", "<", "0", ":", "raise", "ValueError", "(", "\"Input 'radius' must be non-negative.\"", ")", "## Set k and radius to special values to indicate 'None'", "if", "k", "is", "None", ":", "k", "=", "-", "1", "if", "radius", "is", "None", ":", "radius", "=", "-", "1.0", "opts", "=", "{", "'model'", ":", "self", ".", "__proxy__", ",", "'model_name'", ":", "self", ".", "__name__", ",", "'features'", ":", "sf_features", ",", "'query_labels'", ":", "query_labels", ",", "'k'", ":", "k", ",", "'radius'", ":", "radius", "}", "with", "QuietProgress", "(", "verbose", ")", ":", "result", "=", "_turicreate", ".", "extensions", ".", "_nearest_neighbors", ".", "query", "(", "opts", ")", "return", "result", "[", "'neighbors'", "]" ]
For each row of the input 'dataset', retrieve the nearest neighbors from the model's stored data. In general, the query dataset does not need to be the same as the reference data stored in the model, but if it is, the 'include_self_edges' parameter can be set to False to exclude results that match query points to themselves. Parameters ---------- dataset : SFrame Query data. Must contain columns with the same names and types as the features used to train the model. Additional columns are allowed, but ignored. Please see the nearest neighbors :func:`~turicreate.nearest_neighbors.create` documentation for more detail on allowable data types. label : str, optional Name of the query SFrame column with row labels. If 'label' is not specified, row numbers are used to identify query dataset rows in the output SFrame. k : int, optional Number of nearest neighbors to return from the reference set for each query observation. The default is 5 neighbors, but setting it to ``None`` will return all neighbors within ``radius`` of the query point. radius : float, optional Only neighbors whose distance to a query point is smaller than this value are returned. The default is ``None``, in which case the ``k`` nearest neighbors are returned for each query point, regardless of distance. verbose: bool, optional If True, print progress updates and model details. Returns ------- out : SFrame An SFrame with the k-nearest neighbors of each query observation. The result contains four columns: the first is the label of the query observation, the second is the label of the nearby reference observation, the third is the distance between the query and reference observations, and the fourth is the rank of the reference observation among the query's k-nearest neighbors. See Also -------- similarity_graph Notes ----- - The `dataset` input to this method *can* have missing values (in contrast to the reference dataset used to create the nearest neighbors model). Missing numeric values are imputed to be the mean of the corresponding feature in the reference dataset, and missing strings are imputed to be empty strings. - If both ``k`` and ``radius`` are set to ``None``, each query point returns all of the reference set. If the reference dataset has :math:`n` rows and the query dataset has :math:`m` rows, the output is an SFrame with :math:`nm` rows. - For models created with the 'lsh' method, the query results may have fewer query labels than input query points. Because LSH is an approximate method, a query point may have fewer than 'k' neighbors. If LSH returns no neighbors at all for a query, the query point is omitted from the results. Examples -------- First construct a toy SFrame and create a nearest neighbors model: >>> sf = turicreate.SFrame({'label': range(3), ... 'feature1': [0.98, 0.62, 0.11], ... 'feature2': [0.69, 0.58, 0.36]}) >>> model = turicreate.nearest_neighbors.create(sf, 'label') A new SFrame contains query observations with same schema as the reference SFrame. This SFrame is passed to the ``query`` method. >>> queries = turicreate.SFrame({'label': range(3), ... 'feature1': [0.05, 0.61, 0.99], ... 'feature2': [0.06, 0.97, 0.86]}) >>> model.query(queries, 'label', k=2) +-------------+-----------------+----------------+------+ | query_label | reference_label | distance | rank | +-------------+-----------------+----------------+------+ | 0 | 2 | 0.305941170816 | 1 | | 0 | 1 | 0.771556867638 | 2 | | 1 | 1 | 0.390128184063 | 1 | | 1 | 0 | 0.464004310325 | 2 | | 2 | 0 | 0.170293863659 | 1 | | 2 | 1 | 0.464004310325 | 2 | +-------------+-----------------+----------------+------+
[ "For", "each", "row", "of", "the", "input", "dataset", "retrieve", "the", "nearest", "neighbors", "from", "the", "model", "s", "stored", "data", ".", "In", "general", "the", "query", "dataset", "does", "not", "need", "to", "be", "the", "same", "as", "the", "reference", "data", "stored", "in", "the", "model", "but", "if", "it", "is", "the", "include_self_edges", "parameter", "can", "be", "set", "to", "False", "to", "exclude", "results", "that", "match", "query", "points", "to", "themselves", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/nearest_neighbors/_nearest_neighbors.py#L777-L935
28,716
apple/turicreate
src/unity/python/turicreate/toolkits/nearest_neighbors/_nearest_neighbors.py
NearestNeighborsModel.similarity_graph
def similarity_graph(self, k=5, radius=None, include_self_edges=False, output_type='SGraph', verbose=True): """ Construct the similarity graph on the reference dataset, which is already stored in the model. This is conceptually very similar to running `query` with the reference set, but this method is optimized for the purpose, syntactically simpler, and automatically removes self-edges. Parameters ---------- k : int, optional Maximum number of neighbors to return for each point in the dataset. Setting this to ``None`` deactivates the constraint, so that all neighbors are returned within ``radius`` of a given point. radius : float, optional For a given point, only neighbors within this distance are returned. The default is ``None``, in which case the ``k`` nearest neighbors are returned for each query point, regardless of distance. include_self_edges : bool, optional For most distance functions, each point in the model's reference dataset is its own nearest neighbor. If this parameter is set to False, this result is ignored, and the nearest neighbors are returned *excluding* the point itself. output_type : {'SGraph', 'SFrame'}, optional By default, the results are returned in the form of an SGraph, where each point in the reference dataset is a vertex and an edge A -> B indicates that vertex B is a nearest neighbor of vertex A. If 'output_type' is set to 'SFrame', the output is in the same form as the results of the 'query' method: an SFrame with columns indicating the query label (in this case the query data is the same as the reference data), reference label, distance between the two points, and the rank of the neighbor. verbose : bool, optional If True, print progress updates and model details. Returns ------- out : SFrame or SGraph The type of the output object depends on the 'output_type' parameter. See the parameter description for more detail. Notes ----- - If both ``k`` and ``radius`` are set to ``None``, each data point is matched to the entire dataset. If the reference dataset has :math:`n` rows, the output is an SFrame with :math:`n^2` rows (or an SGraph with :math:`n^2` edges). - For models created with the 'lsh' method, the output similarity graph may have fewer vertices than there are data points in the original reference set. Because LSH is an approximate method, a query point may have fewer than 'k' neighbors. If LSH returns no neighbors at all for a query and self-edges are excluded, the query point is omitted from the results. Examples -------- First construct an SFrame and create a nearest neighbors model: >>> sf = turicreate.SFrame({'x1': [0.98, 0.62, 0.11], ... 'x2': [0.69, 0.58, 0.36]}) ... >>> model = turicreate.nearest_neighbors.create(sf, distance='euclidean') Unlike the ``query`` method, there is no need for a second dataset with ``similarity_graph``. >>> g = model.similarity_graph(k=1) # an SGraph >>> g.edges +----------+----------+----------------+------+ | __src_id | __dst_id | distance | rank | +----------+----------+----------------+------+ | 0 | 1 | 0.376430604494 | 1 | | 2 | 1 | 0.55542776308 | 1 | | 1 | 0 | 0.376430604494 | 1 | +----------+----------+----------------+------+ """ ## Validate inputs. if k is not None: if not isinstance(k, int): raise ValueError("Input 'k' must be an integer.") if k <= 0: raise ValueError("Input 'k' must be larger than 0.") if radius is not None: if not isinstance(radius, (int, float)): raise ValueError("Input 'radius' must be an integer or float.") if radius < 0: raise ValueError("Input 'radius' must be non-negative.") ## Set k and radius to special values to indicate 'None' if k is None: k = -1 if radius is None: radius = -1.0 opts = {'model': self.__proxy__, 'model_name': self.__name__, 'k': k, 'radius': radius, 'include_self_edges': include_self_edges} with QuietProgress(verbose): result = _turicreate.extensions._nearest_neighbors.similarity_graph(opts) knn = result['neighbors'] if output_type == "SFrame": return knn else: sg = _SGraph(edges=knn, src_field='query_label', dst_field='reference_label') return sg
python
def similarity_graph(self, k=5, radius=None, include_self_edges=False, output_type='SGraph', verbose=True): """ Construct the similarity graph on the reference dataset, which is already stored in the model. This is conceptually very similar to running `query` with the reference set, but this method is optimized for the purpose, syntactically simpler, and automatically removes self-edges. Parameters ---------- k : int, optional Maximum number of neighbors to return for each point in the dataset. Setting this to ``None`` deactivates the constraint, so that all neighbors are returned within ``radius`` of a given point. radius : float, optional For a given point, only neighbors within this distance are returned. The default is ``None``, in which case the ``k`` nearest neighbors are returned for each query point, regardless of distance. include_self_edges : bool, optional For most distance functions, each point in the model's reference dataset is its own nearest neighbor. If this parameter is set to False, this result is ignored, and the nearest neighbors are returned *excluding* the point itself. output_type : {'SGraph', 'SFrame'}, optional By default, the results are returned in the form of an SGraph, where each point in the reference dataset is a vertex and an edge A -> B indicates that vertex B is a nearest neighbor of vertex A. If 'output_type' is set to 'SFrame', the output is in the same form as the results of the 'query' method: an SFrame with columns indicating the query label (in this case the query data is the same as the reference data), reference label, distance between the two points, and the rank of the neighbor. verbose : bool, optional If True, print progress updates and model details. Returns ------- out : SFrame or SGraph The type of the output object depends on the 'output_type' parameter. See the parameter description for more detail. Notes ----- - If both ``k`` and ``radius`` are set to ``None``, each data point is matched to the entire dataset. If the reference dataset has :math:`n` rows, the output is an SFrame with :math:`n^2` rows (or an SGraph with :math:`n^2` edges). - For models created with the 'lsh' method, the output similarity graph may have fewer vertices than there are data points in the original reference set. Because LSH is an approximate method, a query point may have fewer than 'k' neighbors. If LSH returns no neighbors at all for a query and self-edges are excluded, the query point is omitted from the results. Examples -------- First construct an SFrame and create a nearest neighbors model: >>> sf = turicreate.SFrame({'x1': [0.98, 0.62, 0.11], ... 'x2': [0.69, 0.58, 0.36]}) ... >>> model = turicreate.nearest_neighbors.create(sf, distance='euclidean') Unlike the ``query`` method, there is no need for a second dataset with ``similarity_graph``. >>> g = model.similarity_graph(k=1) # an SGraph >>> g.edges +----------+----------+----------------+------+ | __src_id | __dst_id | distance | rank | +----------+----------+----------------+------+ | 0 | 1 | 0.376430604494 | 1 | | 2 | 1 | 0.55542776308 | 1 | | 1 | 0 | 0.376430604494 | 1 | +----------+----------+----------------+------+ """ ## Validate inputs. if k is not None: if not isinstance(k, int): raise ValueError("Input 'k' must be an integer.") if k <= 0: raise ValueError("Input 'k' must be larger than 0.") if radius is not None: if not isinstance(radius, (int, float)): raise ValueError("Input 'radius' must be an integer or float.") if radius < 0: raise ValueError("Input 'radius' must be non-negative.") ## Set k and radius to special values to indicate 'None' if k is None: k = -1 if radius is None: radius = -1.0 opts = {'model': self.__proxy__, 'model_name': self.__name__, 'k': k, 'radius': radius, 'include_self_edges': include_self_edges} with QuietProgress(verbose): result = _turicreate.extensions._nearest_neighbors.similarity_graph(opts) knn = result['neighbors'] if output_type == "SFrame": return knn else: sg = _SGraph(edges=knn, src_field='query_label', dst_field='reference_label') return sg
[ "def", "similarity_graph", "(", "self", ",", "k", "=", "5", ",", "radius", "=", "None", ",", "include_self_edges", "=", "False", ",", "output_type", "=", "'SGraph'", ",", "verbose", "=", "True", ")", ":", "## Validate inputs.", "if", "k", "is", "not", "None", ":", "if", "not", "isinstance", "(", "k", ",", "int", ")", ":", "raise", "ValueError", "(", "\"Input 'k' must be an integer.\"", ")", "if", "k", "<=", "0", ":", "raise", "ValueError", "(", "\"Input 'k' must be larger than 0.\"", ")", "if", "radius", "is", "not", "None", ":", "if", "not", "isinstance", "(", "radius", ",", "(", "int", ",", "float", ")", ")", ":", "raise", "ValueError", "(", "\"Input 'radius' must be an integer or float.\"", ")", "if", "radius", "<", "0", ":", "raise", "ValueError", "(", "\"Input 'radius' must be non-negative.\"", ")", "## Set k and radius to special values to indicate 'None'", "if", "k", "is", "None", ":", "k", "=", "-", "1", "if", "radius", "is", "None", ":", "radius", "=", "-", "1.0", "opts", "=", "{", "'model'", ":", "self", ".", "__proxy__", ",", "'model_name'", ":", "self", ".", "__name__", ",", "'k'", ":", "k", ",", "'radius'", ":", "radius", ",", "'include_self_edges'", ":", "include_self_edges", "}", "with", "QuietProgress", "(", "verbose", ")", ":", "result", "=", "_turicreate", ".", "extensions", ".", "_nearest_neighbors", ".", "similarity_graph", "(", "opts", ")", "knn", "=", "result", "[", "'neighbors'", "]", "if", "output_type", "==", "\"SFrame\"", ":", "return", "knn", "else", ":", "sg", "=", "_SGraph", "(", "edges", "=", "knn", ",", "src_field", "=", "'query_label'", ",", "dst_field", "=", "'reference_label'", ")", "return", "sg" ]
Construct the similarity graph on the reference dataset, which is already stored in the model. This is conceptually very similar to running `query` with the reference set, but this method is optimized for the purpose, syntactically simpler, and automatically removes self-edges. Parameters ---------- k : int, optional Maximum number of neighbors to return for each point in the dataset. Setting this to ``None`` deactivates the constraint, so that all neighbors are returned within ``radius`` of a given point. radius : float, optional For a given point, only neighbors within this distance are returned. The default is ``None``, in which case the ``k`` nearest neighbors are returned for each query point, regardless of distance. include_self_edges : bool, optional For most distance functions, each point in the model's reference dataset is its own nearest neighbor. If this parameter is set to False, this result is ignored, and the nearest neighbors are returned *excluding* the point itself. output_type : {'SGraph', 'SFrame'}, optional By default, the results are returned in the form of an SGraph, where each point in the reference dataset is a vertex and an edge A -> B indicates that vertex B is a nearest neighbor of vertex A. If 'output_type' is set to 'SFrame', the output is in the same form as the results of the 'query' method: an SFrame with columns indicating the query label (in this case the query data is the same as the reference data), reference label, distance between the two points, and the rank of the neighbor. verbose : bool, optional If True, print progress updates and model details. Returns ------- out : SFrame or SGraph The type of the output object depends on the 'output_type' parameter. See the parameter description for more detail. Notes ----- - If both ``k`` and ``radius`` are set to ``None``, each data point is matched to the entire dataset. If the reference dataset has :math:`n` rows, the output is an SFrame with :math:`n^2` rows (or an SGraph with :math:`n^2` edges). - For models created with the 'lsh' method, the output similarity graph may have fewer vertices than there are data points in the original reference set. Because LSH is an approximate method, a query point may have fewer than 'k' neighbors. If LSH returns no neighbors at all for a query and self-edges are excluded, the query point is omitted from the results. Examples -------- First construct an SFrame and create a nearest neighbors model: >>> sf = turicreate.SFrame({'x1': [0.98, 0.62, 0.11], ... 'x2': [0.69, 0.58, 0.36]}) ... >>> model = turicreate.nearest_neighbors.create(sf, distance='euclidean') Unlike the ``query`` method, there is no need for a second dataset with ``similarity_graph``. >>> g = model.similarity_graph(k=1) # an SGraph >>> g.edges +----------+----------+----------------+------+ | __src_id | __dst_id | distance | rank | +----------+----------+----------------+------+ | 0 | 1 | 0.376430604494 | 1 | | 2 | 1 | 0.55542776308 | 1 | | 1 | 0 | 0.376430604494 | 1 | +----------+----------+----------------+------+
[ "Construct", "the", "similarity", "graph", "on", "the", "reference", "dataset", "which", "is", "already", "stored", "in", "the", "model", ".", "This", "is", "conceptually", "very", "similar", "to", "running", "query", "with", "the", "reference", "set", "but", "this", "method", "is", "optimized", "for", "the", "purpose", "syntactically", "simpler", "and", "automatically", "removes", "self", "-", "edges", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/nearest_neighbors/_nearest_neighbors.py#L937-L1060
28,717
apple/turicreate
src/unity/python/turicreate/toolkits/activity_classifier/util.py
random_split_by_session
def random_split_by_session(dataset, session_id, fraction=0.9, seed=None): """ Randomly split an SFrame into two SFrames based on the `session_id` such that one split contains data for a `fraction` of the sessions while the second split contains all data for the rest of the sessions. Parameters ---------- dataset : SFrame Dataset to split. It must contain a column of session ids. session_id : string, optional The name of the column in `dataset` that corresponds to the a unique identifier for each session. fraction : float, optional Fraction of the sessions to fetch for the first returned SFrame. Must be between 0 and 1. Once the sessions are split, all data from a single session is in the same SFrame. seed : int, optional Seed for the random number generator used to split. Examples -------- .. sourcecode:: python # Split the data so that train has 90% of the users. >>> train, valid = tc.activity_classifier.util.random_split_by_session( ... dataset, session_id='session_id', fraction=0.9) # For example: If dataset has 2055 sessions >>> len(dataset['session_id'].unique()) 2055 # The training set now has 90% of the sessions >>> len(train['session_id'].unique()) 1850 # The validation set has the remaining 10% of the sessions >>> len(valid['session_id'].unique()) 205 """ from random import Random _raise_error_if_not_of_type(dataset, _SFrame, 'dataset') _raise_error_if_not_of_type(session_id, str, 'session_id') _raise_error_if_not_of_type(fraction, float, 'fraction') _raise_error_if_not_of_type(seed, [int, type(None)], 'seed') _numeric_param_check_range('fraction', fraction, 0, 1) if session_id not in dataset.column_names(): raise _ToolkitError( 'Input "dataset" must contain a column called %s.' % session_id) if seed is None: # Include the nanosecond component as well. import time seed = abs(hash("%0.20f" % time.time())) % (2 ** 31) # The cython bindings require this to be an int, so cast if we can. try: seed = int(seed) except ValueError: raise ValueError('The \'seed\' parameter must be of type int.') random = Random() # Create a random binary filter (boolean SArray), using the same probability across all lines # that belong to the same session. In expectancy - the desired fraction of the sessions will # go to the training set. # Since boolean filters preserve order - there is no need to re-sort the lines within each session. # The boolean filter is a pseudorandom function of the session_id and the # global seed above, allowing the train-test split to vary across runs using # the same dataset. def random_session_pick(session_id_hash): random.seed(session_id_hash) return random.uniform(0, 1) < fraction chosen_filter = dataset[session_id].hash(seed).apply(random_session_pick) train = dataset[chosen_filter] valid = dataset[1 - chosen_filter] return train, valid
python
def random_split_by_session(dataset, session_id, fraction=0.9, seed=None): """ Randomly split an SFrame into two SFrames based on the `session_id` such that one split contains data for a `fraction` of the sessions while the second split contains all data for the rest of the sessions. Parameters ---------- dataset : SFrame Dataset to split. It must contain a column of session ids. session_id : string, optional The name of the column in `dataset` that corresponds to the a unique identifier for each session. fraction : float, optional Fraction of the sessions to fetch for the first returned SFrame. Must be between 0 and 1. Once the sessions are split, all data from a single session is in the same SFrame. seed : int, optional Seed for the random number generator used to split. Examples -------- .. sourcecode:: python # Split the data so that train has 90% of the users. >>> train, valid = tc.activity_classifier.util.random_split_by_session( ... dataset, session_id='session_id', fraction=0.9) # For example: If dataset has 2055 sessions >>> len(dataset['session_id'].unique()) 2055 # The training set now has 90% of the sessions >>> len(train['session_id'].unique()) 1850 # The validation set has the remaining 10% of the sessions >>> len(valid['session_id'].unique()) 205 """ from random import Random _raise_error_if_not_of_type(dataset, _SFrame, 'dataset') _raise_error_if_not_of_type(session_id, str, 'session_id') _raise_error_if_not_of_type(fraction, float, 'fraction') _raise_error_if_not_of_type(seed, [int, type(None)], 'seed') _numeric_param_check_range('fraction', fraction, 0, 1) if session_id not in dataset.column_names(): raise _ToolkitError( 'Input "dataset" must contain a column called %s.' % session_id) if seed is None: # Include the nanosecond component as well. import time seed = abs(hash("%0.20f" % time.time())) % (2 ** 31) # The cython bindings require this to be an int, so cast if we can. try: seed = int(seed) except ValueError: raise ValueError('The \'seed\' parameter must be of type int.') random = Random() # Create a random binary filter (boolean SArray), using the same probability across all lines # that belong to the same session. In expectancy - the desired fraction of the sessions will # go to the training set. # Since boolean filters preserve order - there is no need to re-sort the lines within each session. # The boolean filter is a pseudorandom function of the session_id and the # global seed above, allowing the train-test split to vary across runs using # the same dataset. def random_session_pick(session_id_hash): random.seed(session_id_hash) return random.uniform(0, 1) < fraction chosen_filter = dataset[session_id].hash(seed).apply(random_session_pick) train = dataset[chosen_filter] valid = dataset[1 - chosen_filter] return train, valid
[ "def", "random_split_by_session", "(", "dataset", ",", "session_id", ",", "fraction", "=", "0.9", ",", "seed", "=", "None", ")", ":", "from", "random", "import", "Random", "_raise_error_if_not_of_type", "(", "dataset", ",", "_SFrame", ",", "'dataset'", ")", "_raise_error_if_not_of_type", "(", "session_id", ",", "str", ",", "'session_id'", ")", "_raise_error_if_not_of_type", "(", "fraction", ",", "float", ",", "'fraction'", ")", "_raise_error_if_not_of_type", "(", "seed", ",", "[", "int", ",", "type", "(", "None", ")", "]", ",", "'seed'", ")", "_numeric_param_check_range", "(", "'fraction'", ",", "fraction", ",", "0", ",", "1", ")", "if", "session_id", "not", "in", "dataset", ".", "column_names", "(", ")", ":", "raise", "_ToolkitError", "(", "'Input \"dataset\" must contain a column called %s.'", "%", "session_id", ")", "if", "seed", "is", "None", ":", "# Include the nanosecond component as well.", "import", "time", "seed", "=", "abs", "(", "hash", "(", "\"%0.20f\"", "%", "time", ".", "time", "(", ")", ")", ")", "%", "(", "2", "**", "31", ")", "# The cython bindings require this to be an int, so cast if we can.", "try", ":", "seed", "=", "int", "(", "seed", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "'The \\'seed\\' parameter must be of type int.'", ")", "random", "=", "Random", "(", ")", "# Create a random binary filter (boolean SArray), using the same probability across all lines", "# that belong to the same session. In expectancy - the desired fraction of the sessions will", "# go to the training set.", "# Since boolean filters preserve order - there is no need to re-sort the lines within each session.", "# The boolean filter is a pseudorandom function of the session_id and the", "# global seed above, allowing the train-test split to vary across runs using", "# the same dataset.", "def", "random_session_pick", "(", "session_id_hash", ")", ":", "random", ".", "seed", "(", "session_id_hash", ")", "return", "random", ".", "uniform", "(", "0", ",", "1", ")", "<", "fraction", "chosen_filter", "=", "dataset", "[", "session_id", "]", ".", "hash", "(", "seed", ")", ".", "apply", "(", "random_session_pick", ")", "train", "=", "dataset", "[", "chosen_filter", "]", "valid", "=", "dataset", "[", "1", "-", "chosen_filter", "]", "return", "train", ",", "valid" ]
Randomly split an SFrame into two SFrames based on the `session_id` such that one split contains data for a `fraction` of the sessions while the second split contains all data for the rest of the sessions. Parameters ---------- dataset : SFrame Dataset to split. It must contain a column of session ids. session_id : string, optional The name of the column in `dataset` that corresponds to the a unique identifier for each session. fraction : float, optional Fraction of the sessions to fetch for the first returned SFrame. Must be between 0 and 1. Once the sessions are split, all data from a single session is in the same SFrame. seed : int, optional Seed for the random number generator used to split. Examples -------- .. sourcecode:: python # Split the data so that train has 90% of the users. >>> train, valid = tc.activity_classifier.util.random_split_by_session( ... dataset, session_id='session_id', fraction=0.9) # For example: If dataset has 2055 sessions >>> len(dataset['session_id'].unique()) 2055 # The training set now has 90% of the sessions >>> len(train['session_id'].unique()) 1850 # The validation set has the remaining 10% of the sessions >>> len(valid['session_id'].unique()) 205
[ "Randomly", "split", "an", "SFrame", "into", "two", "SFrames", "based", "on", "the", "session_id", "such", "that", "one", "split", "contains", "data", "for", "a", "fraction", "of", "the", "sessions", "while", "the", "second", "split", "contains", "all", "data", "for", "the", "rest", "of", "the", "sessions", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/activity_classifier/util.py#L20-L104
28,718
apple/turicreate
deps/src/cmake-3.13.4/Source/cmConvertMSBuildXMLToJSON.py
read_msbuild_xml
def read_msbuild_xml(path, values={}): """Reads the MS Build XML file at the path and returns its contents. Keyword arguments: values -- The map to append the contents to (default {}) """ # Attempt to read the file contents try: document = parse(path) except Exception as e: logging.exception('Could not read MS Build XML file at %s', path) return values # Convert the XML to JSON format logging.info('Processing MS Build XML file at %s', path) # Get the rule node rule = document.getElementsByTagName('Rule')[0] rule_name = rule.attributes['Name'].value logging.info('Found rules for %s', rule_name) # Proprocess Argument values __preprocess_arguments(rule) # Get all the values converted_values = [] __convert(rule, 'EnumProperty', converted_values, __convert_enum) __convert(rule, 'BoolProperty', converted_values, __convert_bool) __convert(rule, 'StringListProperty', converted_values, __convert_string_list) __convert(rule, 'StringProperty', converted_values, __convert_string) __convert(rule, 'IntProperty', converted_values, __convert_string) values[rule_name] = converted_values return values
python
def read_msbuild_xml(path, values={}): """Reads the MS Build XML file at the path and returns its contents. Keyword arguments: values -- The map to append the contents to (default {}) """ # Attempt to read the file contents try: document = parse(path) except Exception as e: logging.exception('Could not read MS Build XML file at %s', path) return values # Convert the XML to JSON format logging.info('Processing MS Build XML file at %s', path) # Get the rule node rule = document.getElementsByTagName('Rule')[0] rule_name = rule.attributes['Name'].value logging.info('Found rules for %s', rule_name) # Proprocess Argument values __preprocess_arguments(rule) # Get all the values converted_values = [] __convert(rule, 'EnumProperty', converted_values, __convert_enum) __convert(rule, 'BoolProperty', converted_values, __convert_bool) __convert(rule, 'StringListProperty', converted_values, __convert_string_list) __convert(rule, 'StringProperty', converted_values, __convert_string) __convert(rule, 'IntProperty', converted_values, __convert_string) values[rule_name] = converted_values return values
[ "def", "read_msbuild_xml", "(", "path", ",", "values", "=", "{", "}", ")", ":", "# Attempt to read the file contents", "try", ":", "document", "=", "parse", "(", "path", ")", "except", "Exception", "as", "e", ":", "logging", ".", "exception", "(", "'Could not read MS Build XML file at %s'", ",", "path", ")", "return", "values", "# Convert the XML to JSON format", "logging", ".", "info", "(", "'Processing MS Build XML file at %s'", ",", "path", ")", "# Get the rule node", "rule", "=", "document", ".", "getElementsByTagName", "(", "'Rule'", ")", "[", "0", "]", "rule_name", "=", "rule", ".", "attributes", "[", "'Name'", "]", ".", "value", "logging", ".", "info", "(", "'Found rules for %s'", ",", "rule_name", ")", "# Proprocess Argument values", "__preprocess_arguments", "(", "rule", ")", "# Get all the values", "converted_values", "=", "[", "]", "__convert", "(", "rule", ",", "'EnumProperty'", ",", "converted_values", ",", "__convert_enum", ")", "__convert", "(", "rule", ",", "'BoolProperty'", ",", "converted_values", ",", "__convert_bool", ")", "__convert", "(", "rule", ",", "'StringListProperty'", ",", "converted_values", ",", "__convert_string_list", ")", "__convert", "(", "rule", ",", "'StringProperty'", ",", "converted_values", ",", "__convert_string", ")", "__convert", "(", "rule", ",", "'IntProperty'", ",", "converted_values", ",", "__convert_string", ")", "values", "[", "rule_name", "]", "=", "converted_values", "return", "values" ]
Reads the MS Build XML file at the path and returns its contents. Keyword arguments: values -- The map to append the contents to (default {})
[ "Reads", "the", "MS", "Build", "XML", "file", "at", "the", "path", "and", "returns", "its", "contents", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/cmake-3.13.4/Source/cmConvertMSBuildXMLToJSON.py#L38-L76
28,719
apple/turicreate
deps/src/cmake-3.13.4/Source/cmConvertMSBuildXMLToJSON.py
read_msbuild_json
def read_msbuild_json(path, values=[]): """Reads the MS Build JSON file at the path and returns its contents. Keyword arguments: values -- The list to append the contents to (default []) """ if not os.path.exists(path): logging.info('Could not find MS Build JSON file at %s', path) return values try: values.extend(__read_json_file(path)) except Exception as e: logging.exception('Could not read MS Build JSON file at %s', path) return values logging.info('Processing MS Build JSON file at %s', path) return values
python
def read_msbuild_json(path, values=[]): """Reads the MS Build JSON file at the path and returns its contents. Keyword arguments: values -- The list to append the contents to (default []) """ if not os.path.exists(path): logging.info('Could not find MS Build JSON file at %s', path) return values try: values.extend(__read_json_file(path)) except Exception as e: logging.exception('Could not read MS Build JSON file at %s', path) return values logging.info('Processing MS Build JSON file at %s', path) return values
[ "def", "read_msbuild_json", "(", "path", ",", "values", "=", "[", "]", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "logging", ".", "info", "(", "'Could not find MS Build JSON file at %s'", ",", "path", ")", "return", "values", "try", ":", "values", ".", "extend", "(", "__read_json_file", "(", "path", ")", ")", "except", "Exception", "as", "e", ":", "logging", ".", "exception", "(", "'Could not read MS Build JSON file at %s'", ",", "path", ")", "return", "values", "logging", ".", "info", "(", "'Processing MS Build JSON file at %s'", ",", "path", ")", "return", "values" ]
Reads the MS Build JSON file at the path and returns its contents. Keyword arguments: values -- The list to append the contents to (default [])
[ "Reads", "the", "MS", "Build", "JSON", "file", "at", "the", "path", "and", "returns", "its", "contents", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/cmake-3.13.4/Source/cmConvertMSBuildXMLToJSON.py#L79-L97
28,720
apple/turicreate
deps/src/cmake-3.13.4/Source/cmConvertMSBuildXMLToJSON.py
main
def main(): """Script entrypoint.""" # Parse the arguments parser = argparse.ArgumentParser( description='Convert MSBuild XML to JSON format') parser.add_argument( '-t', '--toolchain', help='The name of the toolchain', required=True) parser.add_argument( '-o', '--output', help='The output directory', default='') parser.add_argument( '-r', '--overwrite', help='Whether previously output should be overwritten', dest='overwrite', action='store_true') parser.set_defaults(overwrite=False) parser.add_argument( '-d', '--debug', help="Debug tool output", action="store_const", dest="loglevel", const=logging.DEBUG, default=logging.WARNING) parser.add_argument( '-v', '--verbose', help="Verbose output", action="store_const", dest="loglevel", const=logging.INFO) parser.add_argument('input', help='The input files', nargs='+') args = parser.parse_args() toolchain = args.toolchain logging.basicConfig(level=args.loglevel) logging.info('Creating %s toolchain files', toolchain) values = {} # Iterate through the inputs for input in args.input: input = __get_path(input) read_msbuild_xml(input, values) # Determine if the output directory needs to be created output_dir = __get_path(args.output) if not os.path.exists(output_dir): os.mkdir(output_dir) logging.info('Created output directory %s', output_dir) for key, value in values.items(): output_path = __output_path(toolchain, key, output_dir) if os.path.exists(output_path) and not args.overwrite: logging.info('Comparing previous output to current') __merge_json_values(value, read_msbuild_json(output_path)) else: logging.info('Original output will be overwritten') logging.info('Writing MS Build JSON file at %s', output_path) __write_json_file(output_path, value)
python
def main(): """Script entrypoint.""" # Parse the arguments parser = argparse.ArgumentParser( description='Convert MSBuild XML to JSON format') parser.add_argument( '-t', '--toolchain', help='The name of the toolchain', required=True) parser.add_argument( '-o', '--output', help='The output directory', default='') parser.add_argument( '-r', '--overwrite', help='Whether previously output should be overwritten', dest='overwrite', action='store_true') parser.set_defaults(overwrite=False) parser.add_argument( '-d', '--debug', help="Debug tool output", action="store_const", dest="loglevel", const=logging.DEBUG, default=logging.WARNING) parser.add_argument( '-v', '--verbose', help="Verbose output", action="store_const", dest="loglevel", const=logging.INFO) parser.add_argument('input', help='The input files', nargs='+') args = parser.parse_args() toolchain = args.toolchain logging.basicConfig(level=args.loglevel) logging.info('Creating %s toolchain files', toolchain) values = {} # Iterate through the inputs for input in args.input: input = __get_path(input) read_msbuild_xml(input, values) # Determine if the output directory needs to be created output_dir = __get_path(args.output) if not os.path.exists(output_dir): os.mkdir(output_dir) logging.info('Created output directory %s', output_dir) for key, value in values.items(): output_path = __output_path(toolchain, key, output_dir) if os.path.exists(output_path) and not args.overwrite: logging.info('Comparing previous output to current') __merge_json_values(value, read_msbuild_json(output_path)) else: logging.info('Original output will be overwritten') logging.info('Writing MS Build JSON file at %s', output_path) __write_json_file(output_path, value)
[ "def", "main", "(", ")", ":", "# Parse the arguments", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "'Convert MSBuild XML to JSON format'", ")", "parser", ".", "add_argument", "(", "'-t'", ",", "'--toolchain'", ",", "help", "=", "'The name of the toolchain'", ",", "required", "=", "True", ")", "parser", ".", "add_argument", "(", "'-o'", ",", "'--output'", ",", "help", "=", "'The output directory'", ",", "default", "=", "''", ")", "parser", ".", "add_argument", "(", "'-r'", ",", "'--overwrite'", ",", "help", "=", "'Whether previously output should be overwritten'", ",", "dest", "=", "'overwrite'", ",", "action", "=", "'store_true'", ")", "parser", ".", "set_defaults", "(", "overwrite", "=", "False", ")", "parser", ".", "add_argument", "(", "'-d'", ",", "'--debug'", ",", "help", "=", "\"Debug tool output\"", ",", "action", "=", "\"store_const\"", ",", "dest", "=", "\"loglevel\"", ",", "const", "=", "logging", ".", "DEBUG", ",", "default", "=", "logging", ".", "WARNING", ")", "parser", ".", "add_argument", "(", "'-v'", ",", "'--verbose'", ",", "help", "=", "\"Verbose output\"", ",", "action", "=", "\"store_const\"", ",", "dest", "=", "\"loglevel\"", ",", "const", "=", "logging", ".", "INFO", ")", "parser", ".", "add_argument", "(", "'input'", ",", "help", "=", "'The input files'", ",", "nargs", "=", "'+'", ")", "args", "=", "parser", ".", "parse_args", "(", ")", "toolchain", "=", "args", ".", "toolchain", "logging", ".", "basicConfig", "(", "level", "=", "args", ".", "loglevel", ")", "logging", ".", "info", "(", "'Creating %s toolchain files'", ",", "toolchain", ")", "values", "=", "{", "}", "# Iterate through the inputs", "for", "input", "in", "args", ".", "input", ":", "input", "=", "__get_path", "(", "input", ")", "read_msbuild_xml", "(", "input", ",", "values", ")", "# Determine if the output directory needs to be created", "output_dir", "=", "__get_path", "(", "args", ".", "output", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "output_dir", ")", ":", "os", ".", "mkdir", "(", "output_dir", ")", "logging", ".", "info", "(", "'Created output directory %s'", ",", "output_dir", ")", "for", "key", ",", "value", "in", "values", ".", "items", "(", ")", ":", "output_path", "=", "__output_path", "(", "toolchain", ",", "key", ",", "output_dir", ")", "if", "os", ".", "path", ".", "exists", "(", "output_path", ")", "and", "not", "args", ".", "overwrite", ":", "logging", ".", "info", "(", "'Comparing previous output to current'", ")", "__merge_json_values", "(", "value", ",", "read_msbuild_json", "(", "output_path", ")", ")", "else", ":", "logging", ".", "info", "(", "'Original output will be overwritten'", ")", "logging", ".", "info", "(", "'Writing MS Build JSON file at %s'", ",", "output_path", ")", "__write_json_file", "(", "output_path", ",", "value", ")" ]
Script entrypoint.
[ "Script", "entrypoint", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/cmake-3.13.4/Source/cmConvertMSBuildXMLToJSON.py#L100-L168
28,721
apple/turicreate
deps/src/cmake-3.13.4/Source/cmConvertMSBuildXMLToJSON.py
__merge_json_values
def __merge_json_values(current, previous): """Merges the values between the current and previous run of the script.""" for value in current: name = value['name'] # Find the previous value previous_value = __find_and_remove_value(previous, value) if previous_value is not None: flags = value['flags'] previous_flags = previous_value['flags'] if flags != previous_flags: logging.warning( 'Flags for %s are different. Using previous value.', name) value['flags'] = previous_flags else: logging.warning('Value %s is a new value', name) for value in previous: name = value['name'] logging.warning( 'Value %s not present in current run. Appending value.', name) current.append(value)
python
def __merge_json_values(current, previous): """Merges the values between the current and previous run of the script.""" for value in current: name = value['name'] # Find the previous value previous_value = __find_and_remove_value(previous, value) if previous_value is not None: flags = value['flags'] previous_flags = previous_value['flags'] if flags != previous_flags: logging.warning( 'Flags for %s are different. Using previous value.', name) value['flags'] = previous_flags else: logging.warning('Value %s is a new value', name) for value in previous: name = value['name'] logging.warning( 'Value %s not present in current run. Appending value.', name) current.append(value)
[ "def", "__merge_json_values", "(", "current", ",", "previous", ")", ":", "for", "value", "in", "current", ":", "name", "=", "value", "[", "'name'", "]", "# Find the previous value", "previous_value", "=", "__find_and_remove_value", "(", "previous", ",", "value", ")", "if", "previous_value", "is", "not", "None", ":", "flags", "=", "value", "[", "'flags'", "]", "previous_flags", "=", "previous_value", "[", "'flags'", "]", "if", "flags", "!=", "previous_flags", ":", "logging", ".", "warning", "(", "'Flags for %s are different. Using previous value.'", ",", "name", ")", "value", "[", "'flags'", "]", "=", "previous_flags", "else", ":", "logging", ".", "warning", "(", "'Value %s is a new value'", ",", "name", ")", "for", "value", "in", "previous", ":", "name", "=", "value", "[", "'name'", "]", "logging", ".", "warning", "(", "'Value %s not present in current run. Appending value.'", ",", "name", ")", "current", ".", "append", "(", "value", ")" ]
Merges the values between the current and previous run of the script.
[ "Merges", "the", "values", "between", "the", "current", "and", "previous", "run", "of", "the", "script", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/cmake-3.13.4/Source/cmConvertMSBuildXMLToJSON.py#L173-L198
28,722
apple/turicreate
deps/src/cmake-3.13.4/Source/cmConvertMSBuildXMLToJSON.py
__find_and_remove_value
def __find_and_remove_value(list, compare): """Finds the value in the list that corresponds with the value of compare.""" # next throws if there are no matches try: found = next(value for value in list if value['name'] == compare['name'] and value['switch'] == compare['switch']) except: return None list.remove(found) return found
python
def __find_and_remove_value(list, compare): """Finds the value in the list that corresponds with the value of compare.""" # next throws if there are no matches try: found = next(value for value in list if value['name'] == compare['name'] and value['switch'] == compare['switch']) except: return None list.remove(found) return found
[ "def", "__find_and_remove_value", "(", "list", ",", "compare", ")", ":", "# next throws if there are no matches", "try", ":", "found", "=", "next", "(", "value", "for", "value", "in", "list", "if", "value", "[", "'name'", "]", "==", "compare", "[", "'name'", "]", "and", "value", "[", "'switch'", "]", "==", "compare", "[", "'switch'", "]", ")", "except", ":", "return", "None", "list", ".", "remove", "(", "found", ")", "return", "found" ]
Finds the value in the list that corresponds with the value of compare.
[ "Finds", "the", "value", "in", "the", "list", "that", "corresponds", "with", "the", "value", "of", "compare", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/cmake-3.13.4/Source/cmConvertMSBuildXMLToJSON.py#L201-L213
28,723
apple/turicreate
deps/src/cmake-3.13.4/Source/cmConvertMSBuildXMLToJSON.py
__convert
def __convert(root, tag, values, func): """Converts the tag type found in the root and converts them using the func and appends them to the values. """ elements = root.getElementsByTagName(tag) for element in elements: converted = func(element) # Append to the list __append_list(values, converted)
python
def __convert(root, tag, values, func): """Converts the tag type found in the root and converts them using the func and appends them to the values. """ elements = root.getElementsByTagName(tag) for element in elements: converted = func(element) # Append to the list __append_list(values, converted)
[ "def", "__convert", "(", "root", ",", "tag", ",", "values", ",", "func", ")", ":", "elements", "=", "root", ".", "getElementsByTagName", "(", "tag", ")", "for", "element", "in", "elements", ":", "converted", "=", "func", "(", "element", ")", "# Append to the list", "__append_list", "(", "values", ",", "converted", ")" ]
Converts the tag type found in the root and converts them using the func and appends them to the values.
[ "Converts", "the", "tag", "type", "found", "in", "the", "root", "and", "converts", "them", "using", "the", "func", "and", "appends", "them", "to", "the", "values", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/cmake-3.13.4/Source/cmConvertMSBuildXMLToJSON.py#L218-L228
28,724
apple/turicreate
deps/src/cmake-3.13.4/Source/cmConvertMSBuildXMLToJSON.py
__convert_enum
def __convert_enum(node): """Converts an EnumProperty node to JSON format.""" name = __get_attribute(node, 'Name') logging.debug('Found EnumProperty named %s', name) converted_values = [] for value in node.getElementsByTagName('EnumValue'): converted = __convert_node(value) converted['value'] = converted['name'] converted['name'] = name # Modify flags when there is an argument child __with_argument(value, converted) converted_values.append(converted) return converted_values
python
def __convert_enum(node): """Converts an EnumProperty node to JSON format.""" name = __get_attribute(node, 'Name') logging.debug('Found EnumProperty named %s', name) converted_values = [] for value in node.getElementsByTagName('EnumValue'): converted = __convert_node(value) converted['value'] = converted['name'] converted['name'] = name # Modify flags when there is an argument child __with_argument(value, converted) converted_values.append(converted) return converted_values
[ "def", "__convert_enum", "(", "node", ")", ":", "name", "=", "__get_attribute", "(", "node", ",", "'Name'", ")", "logging", ".", "debug", "(", "'Found EnumProperty named %s'", ",", "name", ")", "converted_values", "=", "[", "]", "for", "value", "in", "node", ".", "getElementsByTagName", "(", "'EnumValue'", ")", ":", "converted", "=", "__convert_node", "(", "value", ")", "converted", "[", "'value'", "]", "=", "converted", "[", "'name'", "]", "converted", "[", "'name'", "]", "=", "name", "# Modify flags when there is an argument child", "__with_argument", "(", "value", ",", "converted", ")", "converted_values", ".", "append", "(", "converted", ")", "return", "converted_values" ]
Converts an EnumProperty node to JSON format.
[ "Converts", "an", "EnumProperty", "node", "to", "JSON", "format", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/cmake-3.13.4/Source/cmConvertMSBuildXMLToJSON.py#L231-L249
28,725
apple/turicreate
deps/src/cmake-3.13.4/Source/cmConvertMSBuildXMLToJSON.py
__convert_bool
def __convert_bool(node): """Converts an BoolProperty node to JSON format.""" converted = __convert_node(node, default_value='true') # Check for a switch for reversing the value reverse_switch = __get_attribute(node, 'ReverseSwitch') if reverse_switch: converted_reverse = copy.deepcopy(converted) converted_reverse['switch'] = reverse_switch converted_reverse['value'] = 'false' return [converted_reverse, converted] # Modify flags when there is an argument child __with_argument(node, converted) return __check_for_flag(converted)
python
def __convert_bool(node): """Converts an BoolProperty node to JSON format.""" converted = __convert_node(node, default_value='true') # Check for a switch for reversing the value reverse_switch = __get_attribute(node, 'ReverseSwitch') if reverse_switch: converted_reverse = copy.deepcopy(converted) converted_reverse['switch'] = reverse_switch converted_reverse['value'] = 'false' return [converted_reverse, converted] # Modify flags when there is an argument child __with_argument(node, converted) return __check_for_flag(converted)
[ "def", "__convert_bool", "(", "node", ")", ":", "converted", "=", "__convert_node", "(", "node", ",", "default_value", "=", "'true'", ")", "# Check for a switch for reversing the value", "reverse_switch", "=", "__get_attribute", "(", "node", ",", "'ReverseSwitch'", ")", "if", "reverse_switch", ":", "converted_reverse", "=", "copy", ".", "deepcopy", "(", "converted", ")", "converted_reverse", "[", "'switch'", "]", "=", "reverse_switch", "converted_reverse", "[", "'value'", "]", "=", "'false'", "return", "[", "converted_reverse", ",", "converted", "]", "# Modify flags when there is an argument child", "__with_argument", "(", "node", ",", "converted", ")", "return", "__check_for_flag", "(", "converted", ")" ]
Converts an BoolProperty node to JSON format.
[ "Converts", "an", "BoolProperty", "node", "to", "JSON", "format", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/cmake-3.13.4/Source/cmConvertMSBuildXMLToJSON.py#L252-L270
28,726
apple/turicreate
deps/src/cmake-3.13.4/Source/cmConvertMSBuildXMLToJSON.py
__convert_string_list
def __convert_string_list(node): """Converts a StringListProperty node to JSON format.""" converted = __convert_node(node) # Determine flags for the string list flags = vsflags(VSFlags.UserValue) # Check for a separator to determine if it is semicolon appendable # If not present assume the value should be ; separator = __get_attribute(node, 'Separator', default_value=';') if separator == ';': flags = vsflags(flags, VSFlags.SemicolonAppendable) converted['flags'] = flags return __check_for_flag(converted)
python
def __convert_string_list(node): """Converts a StringListProperty node to JSON format.""" converted = __convert_node(node) # Determine flags for the string list flags = vsflags(VSFlags.UserValue) # Check for a separator to determine if it is semicolon appendable # If not present assume the value should be ; separator = __get_attribute(node, 'Separator', default_value=';') if separator == ';': flags = vsflags(flags, VSFlags.SemicolonAppendable) converted['flags'] = flags return __check_for_flag(converted)
[ "def", "__convert_string_list", "(", "node", ")", ":", "converted", "=", "__convert_node", "(", "node", ")", "# Determine flags for the string list", "flags", "=", "vsflags", "(", "VSFlags", ".", "UserValue", ")", "# Check for a separator to determine if it is semicolon appendable", "# If not present assume the value should be ;", "separator", "=", "__get_attribute", "(", "node", ",", "'Separator'", ",", "default_value", "=", "';'", ")", "if", "separator", "==", "';'", ":", "flags", "=", "vsflags", "(", "flags", ",", "VSFlags", ".", "SemicolonAppendable", ")", "converted", "[", "'flags'", "]", "=", "flags", "return", "__check_for_flag", "(", "converted", ")" ]
Converts a StringListProperty node to JSON format.
[ "Converts", "a", "StringListProperty", "node", "to", "JSON", "format", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/cmake-3.13.4/Source/cmConvertMSBuildXMLToJSON.py#L273-L289
28,727
apple/turicreate
deps/src/cmake-3.13.4/Source/cmConvertMSBuildXMLToJSON.py
__convert_string
def __convert_string(node): """Converts a StringProperty node to JSON format.""" converted = __convert_node(node, default_flags=vsflags(VSFlags.UserValue)) return __check_for_flag(converted)
python
def __convert_string(node): """Converts a StringProperty node to JSON format.""" converted = __convert_node(node, default_flags=vsflags(VSFlags.UserValue)) return __check_for_flag(converted)
[ "def", "__convert_string", "(", "node", ")", ":", "converted", "=", "__convert_node", "(", "node", ",", "default_flags", "=", "vsflags", "(", "VSFlags", ".", "UserValue", ")", ")", "return", "__check_for_flag", "(", "converted", ")" ]
Converts a StringProperty node to JSON format.
[ "Converts", "a", "StringProperty", "node", "to", "JSON", "format", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/cmake-3.13.4/Source/cmConvertMSBuildXMLToJSON.py#L292-L296
28,728
apple/turicreate
deps/src/cmake-3.13.4/Source/cmConvertMSBuildXMLToJSON.py
__convert_node
def __convert_node(node, default_value='', default_flags=vsflags()): """Converts a XML node to a JSON equivalent.""" name = __get_attribute(node, 'Name') logging.debug('Found %s named %s', node.tagName, name) converted = {} converted['name'] = name converted['switch'] = __get_attribute(node, 'Switch') converted['comment'] = __get_attribute(node, 'DisplayName') converted['value'] = default_value # Check for the Flags attribute in case it was created during preprocessing flags = __get_attribute(node, 'Flags') if flags: flags = flags.split(',') else: flags = default_flags converted['flags'] = flags return converted
python
def __convert_node(node, default_value='', default_flags=vsflags()): """Converts a XML node to a JSON equivalent.""" name = __get_attribute(node, 'Name') logging.debug('Found %s named %s', node.tagName, name) converted = {} converted['name'] = name converted['switch'] = __get_attribute(node, 'Switch') converted['comment'] = __get_attribute(node, 'DisplayName') converted['value'] = default_value # Check for the Flags attribute in case it was created during preprocessing flags = __get_attribute(node, 'Flags') if flags: flags = flags.split(',') else: flags = default_flags converted['flags'] = flags return converted
[ "def", "__convert_node", "(", "node", ",", "default_value", "=", "''", ",", "default_flags", "=", "vsflags", "(", ")", ")", ":", "name", "=", "__get_attribute", "(", "node", ",", "'Name'", ")", "logging", ".", "debug", "(", "'Found %s named %s'", ",", "node", ".", "tagName", ",", "name", ")", "converted", "=", "{", "}", "converted", "[", "'name'", "]", "=", "name", "converted", "[", "'switch'", "]", "=", "__get_attribute", "(", "node", ",", "'Switch'", ")", "converted", "[", "'comment'", "]", "=", "__get_attribute", "(", "node", ",", "'DisplayName'", ")", "converted", "[", "'value'", "]", "=", "default_value", "# Check for the Flags attribute in case it was created during preprocessing", "flags", "=", "__get_attribute", "(", "node", ",", "'Flags'", ")", "if", "flags", ":", "flags", "=", "flags", ".", "split", "(", "','", ")", "else", ":", "flags", "=", "default_flags", "converted", "[", "'flags'", "]", "=", "flags", "return", "converted" ]
Converts a XML node to a JSON equivalent.
[ "Converts", "a", "XML", "node", "to", "a", "JSON", "equivalent", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/cmake-3.13.4/Source/cmConvertMSBuildXMLToJSON.py#L299-L320
28,729
apple/turicreate
deps/src/cmake-3.13.4/Source/cmConvertMSBuildXMLToJSON.py
__with_argument
def __with_argument(node, value): """Modifies the flags in value if the node contains an Argument.""" arguments = node.getElementsByTagName('Argument') if arguments: logging.debug('Found argument within %s', value['name']) value['flags'] = vsflags(VSFlags.UserValueIgnored, VSFlags.Continue)
python
def __with_argument(node, value): """Modifies the flags in value if the node contains an Argument.""" arguments = node.getElementsByTagName('Argument') if arguments: logging.debug('Found argument within %s', value['name']) value['flags'] = vsflags(VSFlags.UserValueIgnored, VSFlags.Continue)
[ "def", "__with_argument", "(", "node", ",", "value", ")", ":", "arguments", "=", "node", ".", "getElementsByTagName", "(", "'Argument'", ")", "if", "arguments", ":", "logging", ".", "debug", "(", "'Found argument within %s'", ",", "value", "[", "'name'", "]", ")", "value", "[", "'flags'", "]", "=", "vsflags", "(", "VSFlags", ".", "UserValueIgnored", ",", "VSFlags", ".", "Continue", ")" ]
Modifies the flags in value if the node contains an Argument.
[ "Modifies", "the", "flags", "in", "value", "if", "the", "node", "contains", "an", "Argument", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/cmake-3.13.4/Source/cmConvertMSBuildXMLToJSON.py#L336-L342
28,730
apple/turicreate
deps/src/cmake-3.13.4/Source/cmConvertMSBuildXMLToJSON.py
__preprocess_arguments
def __preprocess_arguments(root): """Preprocesses occurrences of Argument within the root. Argument XML values reference other values within the document by name. The referenced value does not contain a switch. This function will add the switch associated with the argument. """ # Set the flags to require a value flags = ','.join(vsflags(VSFlags.UserValueRequired)) # Search through the arguments arguments = root.getElementsByTagName('Argument') for argument in arguments: reference = __get_attribute(argument, 'Property') found = None # Look for the argument within the root's children for child in root.childNodes: # Ignore Text nodes if isinstance(child, Element): name = __get_attribute(child, 'Name') if name == reference: found = child break if found is not None: logging.info('Found property named %s', reference) # Get the associated switch switch = __get_attribute(argument.parentNode, 'Switch') # See if there is already a switch associated with the element. if __get_attribute(found, 'Switch'): logging.debug('Copying node %s', reference) clone = found.cloneNode(True) root.insertBefore(clone, found) found = clone found.setAttribute('Switch', switch) found.setAttribute('Flags', flags) else: logging.warning('Could not find property named %s', reference)
python
def __preprocess_arguments(root): """Preprocesses occurrences of Argument within the root. Argument XML values reference other values within the document by name. The referenced value does not contain a switch. This function will add the switch associated with the argument. """ # Set the flags to require a value flags = ','.join(vsflags(VSFlags.UserValueRequired)) # Search through the arguments arguments = root.getElementsByTagName('Argument') for argument in arguments: reference = __get_attribute(argument, 'Property') found = None # Look for the argument within the root's children for child in root.childNodes: # Ignore Text nodes if isinstance(child, Element): name = __get_attribute(child, 'Name') if name == reference: found = child break if found is not None: logging.info('Found property named %s', reference) # Get the associated switch switch = __get_attribute(argument.parentNode, 'Switch') # See if there is already a switch associated with the element. if __get_attribute(found, 'Switch'): logging.debug('Copying node %s', reference) clone = found.cloneNode(True) root.insertBefore(clone, found) found = clone found.setAttribute('Switch', switch) found.setAttribute('Flags', flags) else: logging.warning('Could not find property named %s', reference)
[ "def", "__preprocess_arguments", "(", "root", ")", ":", "# Set the flags to require a value", "flags", "=", "','", ".", "join", "(", "vsflags", "(", "VSFlags", ".", "UserValueRequired", ")", ")", "# Search through the arguments", "arguments", "=", "root", ".", "getElementsByTagName", "(", "'Argument'", ")", "for", "argument", "in", "arguments", ":", "reference", "=", "__get_attribute", "(", "argument", ",", "'Property'", ")", "found", "=", "None", "# Look for the argument within the root's children", "for", "child", "in", "root", ".", "childNodes", ":", "# Ignore Text nodes", "if", "isinstance", "(", "child", ",", "Element", ")", ":", "name", "=", "__get_attribute", "(", "child", ",", "'Name'", ")", "if", "name", "==", "reference", ":", "found", "=", "child", "break", "if", "found", "is", "not", "None", ":", "logging", ".", "info", "(", "'Found property named %s'", ",", "reference", ")", "# Get the associated switch", "switch", "=", "__get_attribute", "(", "argument", ".", "parentNode", ",", "'Switch'", ")", "# See if there is already a switch associated with the element.", "if", "__get_attribute", "(", "found", ",", "'Switch'", ")", ":", "logging", ".", "debug", "(", "'Copying node %s'", ",", "reference", ")", "clone", "=", "found", ".", "cloneNode", "(", "True", ")", "root", ".", "insertBefore", "(", "clone", ",", "found", ")", "found", "=", "clone", "found", ".", "setAttribute", "(", "'Switch'", ",", "switch", ")", "found", ".", "setAttribute", "(", "'Flags'", ",", "flags", ")", "else", ":", "logging", ".", "warning", "(", "'Could not find property named %s'", ",", "reference", ")" ]
Preprocesses occurrences of Argument within the root. Argument XML values reference other values within the document by name. The referenced value does not contain a switch. This function will add the switch associated with the argument.
[ "Preprocesses", "occurrences", "of", "Argument", "within", "the", "root", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/cmake-3.13.4/Source/cmConvertMSBuildXMLToJSON.py#L345-L387
28,731
apple/turicreate
deps/src/cmake-3.13.4/Source/cmConvertMSBuildXMLToJSON.py
__get_attribute
def __get_attribute(node, name, default_value=''): """Retrieves the attribute of the given name from the node. If not present then the default_value is used. """ if node.hasAttribute(name): return node.attributes[name].value.strip() else: return default_value
python
def __get_attribute(node, name, default_value=''): """Retrieves the attribute of the given name from the node. If not present then the default_value is used. """ if node.hasAttribute(name): return node.attributes[name].value.strip() else: return default_value
[ "def", "__get_attribute", "(", "node", ",", "name", ",", "default_value", "=", "''", ")", ":", "if", "node", ".", "hasAttribute", "(", "name", ")", ":", "return", "node", ".", "attributes", "[", "name", "]", ".", "value", ".", "strip", "(", ")", "else", ":", "return", "default_value" ]
Retrieves the attribute of the given name from the node. If not present then the default_value is used.
[ "Retrieves", "the", "attribute", "of", "the", "given", "name", "from", "the", "node", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/cmake-3.13.4/Source/cmConvertMSBuildXMLToJSON.py#L390-L398
28,732
apple/turicreate
deps/src/cmake-3.13.4/Source/cmConvertMSBuildXMLToJSON.py
__get_path
def __get_path(path): """Gets the path to the file.""" if not os.path.isabs(path): path = os.path.join(os.getcwd(), path) return os.path.normpath(path)
python
def __get_path(path): """Gets the path to the file.""" if not os.path.isabs(path): path = os.path.join(os.getcwd(), path) return os.path.normpath(path)
[ "def", "__get_path", "(", "path", ")", ":", "if", "not", "os", ".", "path", ".", "isabs", "(", "path", ")", ":", "path", "=", "os", ".", "path", ".", "join", "(", "os", ".", "getcwd", "(", ")", ",", "path", ")", "return", "os", ".", "path", ".", "normpath", "(", "path", ")" ]
Gets the path to the file.
[ "Gets", "the", "path", "to", "the", "file", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/cmake-3.13.4/Source/cmConvertMSBuildXMLToJSON.py#L403-L408
28,733
apple/turicreate
deps/src/cmake-3.13.4/Source/cmConvertMSBuildXMLToJSON.py
__output_path
def __output_path(toolchain, rule, output_dir): """Gets the output path for a file given the toolchain, rule and output_dir""" filename = '%s_%s.json' % (toolchain, rule) return os.path.join(output_dir, filename)
python
def __output_path(toolchain, rule, output_dir): """Gets the output path for a file given the toolchain, rule and output_dir""" filename = '%s_%s.json' % (toolchain, rule) return os.path.join(output_dir, filename)
[ "def", "__output_path", "(", "toolchain", ",", "rule", ",", "output_dir", ")", ":", "filename", "=", "'%s_%s.json'", "%", "(", "toolchain", ",", "rule", ")", "return", "os", ".", "path", ".", "join", "(", "output_dir", ",", "filename", ")" ]
Gets the output path for a file given the toolchain, rule and output_dir
[ "Gets", "the", "output", "path", "for", "a", "file", "given", "the", "toolchain", "rule", "and", "output_dir" ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/cmake-3.13.4/Source/cmConvertMSBuildXMLToJSON.py#L411-L414
28,734
apple/turicreate
deps/src/cmake-3.13.4/Source/cmConvertMSBuildXMLToJSON.py
__write_json_file
def __write_json_file(path, values): """Writes a JSON file at the path with the values provided.""" # Sort the keys to ensure ordering sort_order = ['name', 'switch', 'comment', 'value', 'flags'] sorted_values = [ OrderedDict( sorted( value.items(), key=lambda value: sort_order.index(value[0]))) for value in values ] with open(path, 'w') as f: json.dump(sorted_values, f, indent=2, separators=(',', ': '))
python
def __write_json_file(path, values): """Writes a JSON file at the path with the values provided.""" # Sort the keys to ensure ordering sort_order = ['name', 'switch', 'comment', 'value', 'flags'] sorted_values = [ OrderedDict( sorted( value.items(), key=lambda value: sort_order.index(value[0]))) for value in values ] with open(path, 'w') as f: json.dump(sorted_values, f, indent=2, separators=(',', ': '))
[ "def", "__write_json_file", "(", "path", ",", "values", ")", ":", "# Sort the keys to ensure ordering", "sort_order", "=", "[", "'name'", ",", "'switch'", ",", "'comment'", ",", "'value'", ",", "'flags'", "]", "sorted_values", "=", "[", "OrderedDict", "(", "sorted", "(", "value", ".", "items", "(", ")", ",", "key", "=", "lambda", "value", ":", "sort_order", ".", "index", "(", "value", "[", "0", "]", ")", ")", ")", "for", "value", "in", "values", "]", "with", "open", "(", "path", ",", "'w'", ")", "as", "f", ":", "json", ".", "dump", "(", "sorted_values", ",", "f", ",", "indent", "=", "2", ",", "separators", "=", "(", "','", ",", "': '", ")", ")" ]
Writes a JSON file at the path with the values provided.
[ "Writes", "a", "JSON", "file", "at", "the", "path", "with", "the", "values", "provided", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/cmake-3.13.4/Source/cmConvertMSBuildXMLToJSON.py#L425-L437
28,735
apple/turicreate
deps/src/cmake-3.13.4/Source/cmConvertMSBuildXMLToJSON.py
__append_list
def __append_list(append_to, value): """Appends the value to the list.""" if value is not None: if isinstance(value, list): append_to.extend(value) else: append_to.append(value)
python
def __append_list(append_to, value): """Appends the value to the list.""" if value is not None: if isinstance(value, list): append_to.extend(value) else: append_to.append(value)
[ "def", "__append_list", "(", "append_to", ",", "value", ")", ":", "if", "value", "is", "not", "None", ":", "if", "isinstance", "(", "value", ",", "list", ")", ":", "append_to", ".", "extend", "(", "value", ")", "else", ":", "append_to", ".", "append", "(", "value", ")" ]
Appends the value to the list.
[ "Appends", "the", "value", "to", "the", "list", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/cmake-3.13.4/Source/cmConvertMSBuildXMLToJSON.py#L442-L448
28,736
apple/turicreate
src/external/coremltools_wrap/coremltools/deps/protobuf/objectivec/DevTools/pddm.py
MacroCollection.ParseInput
def ParseInput(self, a_file): """Consumes input extracting definitions. Args: a_file: The file like stream to parse. Raises: PDDMError if there are any issues. """ input_lines = a_file.read().splitlines() self.ParseLines(input_lines)
python
def ParseInput(self, a_file): """Consumes input extracting definitions. Args: a_file: The file like stream to parse. Raises: PDDMError if there are any issues. """ input_lines = a_file.read().splitlines() self.ParseLines(input_lines)
[ "def", "ParseInput", "(", "self", ",", "a_file", ")", ":", "input_lines", "=", "a_file", ".", "read", "(", ")", ".", "splitlines", "(", ")", "self", ".", "ParseLines", "(", "input_lines", ")" ]
Consumes input extracting definitions. Args: a_file: The file like stream to parse. Raises: PDDMError if there are any issues.
[ "Consumes", "input", "extracting", "definitions", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/objectivec/DevTools/pddm.py#L182-L192
28,737
apple/turicreate
src/external/coremltools_wrap/coremltools/deps/protobuf/objectivec/DevTools/pddm.py
MacroCollection.ParseLines
def ParseLines(self, input_lines): """Parses list of lines. Args: input_lines: A list of strings of input to parse (no newlines on the strings). Raises: PDDMError if there are any issues. """ current_macro = None for line in input_lines: if line.startswith('PDDM-'): directive = line.split(' ', 1)[0] if directive == 'PDDM-DEFINE': name, args = self._ParseDefineLine(line) if self._macros.get(name): raise PDDMError('Attempt to redefine macro: "%s"' % line) current_macro = self.MacroDefinition(name, args) self._macros[name] = current_macro continue if directive == 'PDDM-DEFINE-END': if not current_macro: raise PDDMError('Got DEFINE-END directive without an active macro:' ' "%s"' % line) current_macro = None continue raise PDDMError('Hit a line with an unknown directive: "%s"' % line) if current_macro: current_macro.AppendLine(line) continue # Allow blank lines between macro definitions. if line.strip() == '': continue raise PDDMError('Hit a line that wasn\'t a directive and no open macro' ' definition: "%s"' % line)
python
def ParseLines(self, input_lines): """Parses list of lines. Args: input_lines: A list of strings of input to parse (no newlines on the strings). Raises: PDDMError if there are any issues. """ current_macro = None for line in input_lines: if line.startswith('PDDM-'): directive = line.split(' ', 1)[0] if directive == 'PDDM-DEFINE': name, args = self._ParseDefineLine(line) if self._macros.get(name): raise PDDMError('Attempt to redefine macro: "%s"' % line) current_macro = self.MacroDefinition(name, args) self._macros[name] = current_macro continue if directive == 'PDDM-DEFINE-END': if not current_macro: raise PDDMError('Got DEFINE-END directive without an active macro:' ' "%s"' % line) current_macro = None continue raise PDDMError('Hit a line with an unknown directive: "%s"' % line) if current_macro: current_macro.AppendLine(line) continue # Allow blank lines between macro definitions. if line.strip() == '': continue raise PDDMError('Hit a line that wasn\'t a directive and no open macro' ' definition: "%s"' % line)
[ "def", "ParseLines", "(", "self", ",", "input_lines", ")", ":", "current_macro", "=", "None", "for", "line", "in", "input_lines", ":", "if", "line", ".", "startswith", "(", "'PDDM-'", ")", ":", "directive", "=", "line", ".", "split", "(", "' '", ",", "1", ")", "[", "0", "]", "if", "directive", "==", "'PDDM-DEFINE'", ":", "name", ",", "args", "=", "self", ".", "_ParseDefineLine", "(", "line", ")", "if", "self", ".", "_macros", ".", "get", "(", "name", ")", ":", "raise", "PDDMError", "(", "'Attempt to redefine macro: \"%s\"'", "%", "line", ")", "current_macro", "=", "self", ".", "MacroDefinition", "(", "name", ",", "args", ")", "self", ".", "_macros", "[", "name", "]", "=", "current_macro", "continue", "if", "directive", "==", "'PDDM-DEFINE-END'", ":", "if", "not", "current_macro", ":", "raise", "PDDMError", "(", "'Got DEFINE-END directive without an active macro:'", "' \"%s\"'", "%", "line", ")", "current_macro", "=", "None", "continue", "raise", "PDDMError", "(", "'Hit a line with an unknown directive: \"%s\"'", "%", "line", ")", "if", "current_macro", ":", "current_macro", ".", "AppendLine", "(", "line", ")", "continue", "# Allow blank lines between macro definitions.", "if", "line", ".", "strip", "(", ")", "==", "''", ":", "continue", "raise", "PDDMError", "(", "'Hit a line that wasn\\'t a directive and no open macro'", "' definition: \"%s\"'", "%", "line", ")" ]
Parses list of lines. Args: input_lines: A list of strings of input to parse (no newlines on the strings). Raises: PDDMError if there are any issues.
[ "Parses", "list", "of", "lines", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/objectivec/DevTools/pddm.py#L194-L232
28,738
apple/turicreate
src/external/coremltools_wrap/coremltools/deps/protobuf/objectivec/DevTools/pddm.py
MacroCollection.Expand
def Expand(self, macro_ref_str): """Expands the macro reference. Args: macro_ref_str: String of a macro reference (i.e. foo(a, b)). Returns: The text from the expansion. Raises: PDDMError if there are any issues. """ match = _MACRO_RE.match(macro_ref_str) if match is None or match.group(0) != macro_ref_str: raise PDDMError('Failed to parse macro reference: "%s"' % macro_ref_str) if match.group('name') not in self._macros: raise PDDMError('No macro named "%s".' % match.group('name')) return self._Expand(match, [], macro_ref_str)
python
def Expand(self, macro_ref_str): """Expands the macro reference. Args: macro_ref_str: String of a macro reference (i.e. foo(a, b)). Returns: The text from the expansion. Raises: PDDMError if there are any issues. """ match = _MACRO_RE.match(macro_ref_str) if match is None or match.group(0) != macro_ref_str: raise PDDMError('Failed to parse macro reference: "%s"' % macro_ref_str) if match.group('name') not in self._macros: raise PDDMError('No macro named "%s".' % match.group('name')) return self._Expand(match, [], macro_ref_str)
[ "def", "Expand", "(", "self", ",", "macro_ref_str", ")", ":", "match", "=", "_MACRO_RE", ".", "match", "(", "macro_ref_str", ")", "if", "match", "is", "None", "or", "match", ".", "group", "(", "0", ")", "!=", "macro_ref_str", ":", "raise", "PDDMError", "(", "'Failed to parse macro reference: \"%s\"'", "%", "macro_ref_str", ")", "if", "match", ".", "group", "(", "'name'", ")", "not", "in", "self", ".", "_macros", ":", "raise", "PDDMError", "(", "'No macro named \"%s\".'", "%", "match", ".", "group", "(", "'name'", ")", ")", "return", "self", ".", "_Expand", "(", "match", ",", "[", "]", ",", "macro_ref_str", ")" ]
Expands the macro reference. Args: macro_ref_str: String of a macro reference (i.e. foo(a, b)). Returns: The text from the expansion. Raises: PDDMError if there are any issues.
[ "Expands", "the", "macro", "reference", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/objectivec/DevTools/pddm.py#L259-L276
28,739
apple/turicreate
src/external/coremltools_wrap/coremltools/deps/protobuf/objectivec/DevTools/pddm.py
SourceFile.ProcessContent
def ProcessContent(self, strip_expansion=False): """Processes the file contents.""" self._ParseFile() if strip_expansion: # Without a collection the expansions become blank, removing them. collection = None else: collection = MacroCollection() for section in self._sections: section.BindMacroCollection(collection) result = '' for section in self._sections: result += section.text self._processed_content = result
python
def ProcessContent(self, strip_expansion=False): """Processes the file contents.""" self._ParseFile() if strip_expansion: # Without a collection the expansions become blank, removing them. collection = None else: collection = MacroCollection() for section in self._sections: section.BindMacroCollection(collection) result = '' for section in self._sections: result += section.text self._processed_content = result
[ "def", "ProcessContent", "(", "self", ",", "strip_expansion", "=", "False", ")", ":", "self", ".", "_ParseFile", "(", ")", "if", "strip_expansion", ":", "# Without a collection the expansions become blank, removing them.", "collection", "=", "None", "else", ":", "collection", "=", "MacroCollection", "(", ")", "for", "section", "in", "self", ".", "_sections", ":", "section", ".", "BindMacroCollection", "(", "collection", ")", "result", "=", "''", "for", "section", "in", "self", ".", "_sections", ":", "result", "+=", "section", ".", "text", "self", ".", "_processed_content", "=", "result" ]
Processes the file contents.
[ "Processes", "the", "file", "contents", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/objectivec/DevTools/pddm.py#L601-L614
28,740
apple/turicreate
deps/src/boost_1_68_0/tools/build/src/build/feature.py
defaults
def defaults(features): """ Returns the default property values for the given features. """ assert is_iterable_typed(features, Feature) # FIXME: should merge feature and property modules. from . import property result = [] for f in features: if not f.free and not f.optional and f.default: result.append(property.Property(f, f.default)) return result
python
def defaults(features): """ Returns the default property values for the given features. """ assert is_iterable_typed(features, Feature) # FIXME: should merge feature and property modules. from . import property result = [] for f in features: if not f.free and not f.optional and f.default: result.append(property.Property(f, f.default)) return result
[ "def", "defaults", "(", "features", ")", ":", "assert", "is_iterable_typed", "(", "features", ",", "Feature", ")", "# FIXME: should merge feature and property modules.", "from", ".", "import", "property", "result", "=", "[", "]", "for", "f", "in", "features", ":", "if", "not", "f", ".", "free", "and", "not", "f", ".", "optional", "and", "f", ".", "default", ":", "result", ".", "append", "(", "property", ".", "Property", "(", "f", ",", "f", ".", "default", ")", ")", "return", "result" ]
Returns the default property values for the given features.
[ "Returns", "the", "default", "property", "values", "for", "the", "given", "features", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/feature.py#L186-L198
28,741
apple/turicreate
deps/src/boost_1_68_0/tools/build/src/build/feature.py
valid
def valid (names): """ Returns true iff all elements of names are valid features. """ if isinstance(names, str): names = [names] assert is_iterable_typed(names, basestring) return all(name in __all_features for name in names)
python
def valid (names): """ Returns true iff all elements of names are valid features. """ if isinstance(names, str): names = [names] assert is_iterable_typed(names, basestring) return all(name in __all_features for name in names)
[ "def", "valid", "(", "names", ")", ":", "if", "isinstance", "(", "names", ",", "str", ")", ":", "names", "=", "[", "names", "]", "assert", "is_iterable_typed", "(", "names", ",", "basestring", ")", "return", "all", "(", "name", "in", "__all_features", "for", "name", "in", "names", ")" ]
Returns true iff all elements of names are valid features.
[ "Returns", "true", "iff", "all", "elements", "of", "names", "are", "valid", "features", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/feature.py#L200-L207
28,742
apple/turicreate
deps/src/boost_1_68_0/tools/build/src/build/feature.py
values
def values (feature): """ Return the values of the given feature. """ assert isinstance(feature, basestring) validate_feature (feature) return __all_features[feature].values
python
def values (feature): """ Return the values of the given feature. """ assert isinstance(feature, basestring) validate_feature (feature) return __all_features[feature].values
[ "def", "values", "(", "feature", ")", ":", "assert", "isinstance", "(", "feature", ",", "basestring", ")", "validate_feature", "(", "feature", ")", "return", "__all_features", "[", "feature", "]", ".", "values" ]
Return the values of the given feature.
[ "Return", "the", "values", "of", "the", "given", "feature", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/feature.py#L215-L220
28,743
apple/turicreate
deps/src/boost_1_68_0/tools/build/src/build/feature.py
is_implicit_value
def is_implicit_value (value_string): """ Returns true iff 'value_string' is a value_string of an implicit feature. """ assert isinstance(value_string, basestring) if value_string in __implicit_features: return __implicit_features[value_string] v = value_string.split('-') if v[0] not in __implicit_features: return False feature = __implicit_features[v[0]] for subvalue in (v[1:]): if not __find_implied_subfeature(feature, subvalue, v[0]): return False return True
python
def is_implicit_value (value_string): """ Returns true iff 'value_string' is a value_string of an implicit feature. """ assert isinstance(value_string, basestring) if value_string in __implicit_features: return __implicit_features[value_string] v = value_string.split('-') if v[0] not in __implicit_features: return False feature = __implicit_features[v[0]] for subvalue in (v[1:]): if not __find_implied_subfeature(feature, subvalue, v[0]): return False return True
[ "def", "is_implicit_value", "(", "value_string", ")", ":", "assert", "isinstance", "(", "value_string", ",", "basestring", ")", "if", "value_string", "in", "__implicit_features", ":", "return", "__implicit_features", "[", "value_string", "]", "v", "=", "value_string", ".", "split", "(", "'-'", ")", "if", "v", "[", "0", "]", "not", "in", "__implicit_features", ":", "return", "False", "feature", "=", "__implicit_features", "[", "v", "[", "0", "]", "]", "for", "subvalue", "in", "(", "v", "[", "1", ":", "]", ")", ":", "if", "not", "__find_implied_subfeature", "(", "feature", ",", "subvalue", ",", "v", "[", "0", "]", ")", ":", "return", "False", "return", "True" ]
Returns true iff 'value_string' is a value_string of an implicit feature.
[ "Returns", "true", "iff", "value_string", "is", "a", "value_string", "of", "an", "implicit", "feature", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/feature.py#L222-L241
28,744
apple/turicreate
deps/src/boost_1_68_0/tools/build/src/build/feature.py
implied_feature
def implied_feature (implicit_value): """ Returns the implicit feature associated with the given implicit value. """ assert isinstance(implicit_value, basestring) components = implicit_value.split('-') if components[0] not in __implicit_features: raise InvalidValue ("'%s' is not a value of an implicit feature" % implicit_value) return __implicit_features[components[0]]
python
def implied_feature (implicit_value): """ Returns the implicit feature associated with the given implicit value. """ assert isinstance(implicit_value, basestring) components = implicit_value.split('-') if components[0] not in __implicit_features: raise InvalidValue ("'%s' is not a value of an implicit feature" % implicit_value) return __implicit_features[components[0]]
[ "def", "implied_feature", "(", "implicit_value", ")", ":", "assert", "isinstance", "(", "implicit_value", ",", "basestring", ")", "components", "=", "implicit_value", ".", "split", "(", "'-'", ")", "if", "components", "[", "0", "]", "not", "in", "__implicit_features", ":", "raise", "InvalidValue", "(", "\"'%s' is not a value of an implicit feature\"", "%", "implicit_value", ")", "return", "__implicit_features", "[", "components", "[", "0", "]", "]" ]
Returns the implicit feature associated with the given implicit value.
[ "Returns", "the", "implicit", "feature", "associated", "with", "the", "given", "implicit", "value", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/feature.py#L243-L252
28,745
apple/turicreate
deps/src/boost_1_68_0/tools/build/src/build/feature.py
validate_feature
def validate_feature (name): """ Checks if all name is a valid feature. Otherwise, raises an exception. """ assert isinstance(name, basestring) if name not in __all_features: raise InvalidFeature ("'%s' is not a valid feature name" % name) else: return __all_features[name]
python
def validate_feature (name): """ Checks if all name is a valid feature. Otherwise, raises an exception. """ assert isinstance(name, basestring) if name not in __all_features: raise InvalidFeature ("'%s' is not a valid feature name" % name) else: return __all_features[name]
[ "def", "validate_feature", "(", "name", ")", ":", "assert", "isinstance", "(", "name", ",", "basestring", ")", "if", "name", "not", "in", "__all_features", ":", "raise", "InvalidFeature", "(", "\"'%s' is not a valid feature name\"", "%", "name", ")", "else", ":", "return", "__all_features", "[", "name", "]" ]
Checks if all name is a valid feature. Otherwise, raises an exception.
[ "Checks", "if", "all", "name", "is", "a", "valid", "feature", ".", "Otherwise", "raises", "an", "exception", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/feature.py#L281-L288
28,746
apple/turicreate
deps/src/boost_1_68_0/tools/build/src/build/feature.py
expand_subfeatures
def expand_subfeatures(properties, dont_validate = False): """ Make all elements of properties corresponding to implicit features explicit, and express all subfeature values as separate properties in their own right. For example, the property gcc-2.95.2-linux-x86 might expand to <toolset>gcc <toolset-version>2.95.2 <toolset-os>linux <toolset-cpu>x86 properties: A sequence with elements of the form <feature>value-string or just value-string in the case of implicit features. : dont_validate: If True, no validation of value string will be done. """ if __debug__: from .property import Property assert is_iterable_typed(properties, Property) assert isinstance(dont_validate, int) # matches bools result = [] for p in properties: # Don't expand subfeatures in subfeatures if p.feature.subfeature: result.append (p) else: result.extend(__expand_subfeatures_aux (p, dont_validate)) return result
python
def expand_subfeatures(properties, dont_validate = False): """ Make all elements of properties corresponding to implicit features explicit, and express all subfeature values as separate properties in their own right. For example, the property gcc-2.95.2-linux-x86 might expand to <toolset>gcc <toolset-version>2.95.2 <toolset-os>linux <toolset-cpu>x86 properties: A sequence with elements of the form <feature>value-string or just value-string in the case of implicit features. : dont_validate: If True, no validation of value string will be done. """ if __debug__: from .property import Property assert is_iterable_typed(properties, Property) assert isinstance(dont_validate, int) # matches bools result = [] for p in properties: # Don't expand subfeatures in subfeatures if p.feature.subfeature: result.append (p) else: result.extend(__expand_subfeatures_aux (p, dont_validate)) return result
[ "def", "expand_subfeatures", "(", "properties", ",", "dont_validate", "=", "False", ")", ":", "if", "__debug__", ":", "from", ".", "property", "import", "Property", "assert", "is_iterable_typed", "(", "properties", ",", "Property", ")", "assert", "isinstance", "(", "dont_validate", ",", "int", ")", "# matches bools", "result", "=", "[", "]", "for", "p", "in", "properties", ":", "# Don't expand subfeatures in subfeatures", "if", "p", ".", "feature", ".", "subfeature", ":", "result", ".", "append", "(", "p", ")", "else", ":", "result", ".", "extend", "(", "__expand_subfeatures_aux", "(", "p", ",", "dont_validate", ")", ")", "return", "result" ]
Make all elements of properties corresponding to implicit features explicit, and express all subfeature values as separate properties in their own right. For example, the property gcc-2.95.2-linux-x86 might expand to <toolset>gcc <toolset-version>2.95.2 <toolset-os>linux <toolset-cpu>x86 properties: A sequence with elements of the form <feature>value-string or just value-string in the case of implicit features. : dont_validate: If True, no validation of value string will be done.
[ "Make", "all", "elements", "of", "properties", "corresponding", "to", "implicit", "features", "explicit", "and", "express", "all", "subfeature", "values", "as", "separate", "properties", "in", "their", "own", "right", ".", "For", "example", "the", "property" ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/feature.py#L338-L367
28,747
apple/turicreate
deps/src/boost_1_68_0/tools/build/src/build/feature.py
extend
def extend (name, values): """ Adds the given values to the given feature. """ assert isinstance(name, basestring) assert is_iterable_typed(values, basestring) name = add_grist (name) __validate_feature (name) feature = __all_features [name] if feature.implicit: for v in values: if v in __implicit_features: raise BaseException ("'%s' is already associated with the feature '%s'" % (v, __implicit_features [v])) __implicit_features[v] = feature if values and not feature.values and not(feature.free or feature.optional): # This is the first value specified for this feature, # take it as default value feature.set_default(values[0]) feature.add_values(values)
python
def extend (name, values): """ Adds the given values to the given feature. """ assert isinstance(name, basestring) assert is_iterable_typed(values, basestring) name = add_grist (name) __validate_feature (name) feature = __all_features [name] if feature.implicit: for v in values: if v in __implicit_features: raise BaseException ("'%s' is already associated with the feature '%s'" % (v, __implicit_features [v])) __implicit_features[v] = feature if values and not feature.values and not(feature.free or feature.optional): # This is the first value specified for this feature, # take it as default value feature.set_default(values[0]) feature.add_values(values)
[ "def", "extend", "(", "name", ",", "values", ")", ":", "assert", "isinstance", "(", "name", ",", "basestring", ")", "assert", "is_iterable_typed", "(", "values", ",", "basestring", ")", "name", "=", "add_grist", "(", "name", ")", "__validate_feature", "(", "name", ")", "feature", "=", "__all_features", "[", "name", "]", "if", "feature", ".", "implicit", ":", "for", "v", "in", "values", ":", "if", "v", "in", "__implicit_features", ":", "raise", "BaseException", "(", "\"'%s' is already associated with the feature '%s'\"", "%", "(", "v", ",", "__implicit_features", "[", "v", "]", ")", ")", "__implicit_features", "[", "v", "]", "=", "feature", "if", "values", "and", "not", "feature", ".", "values", "and", "not", "(", "feature", ".", "free", "or", "feature", ".", "optional", ")", ":", "# This is the first value specified for this feature,", "# take it as default value", "feature", ".", "set_default", "(", "values", "[", "0", "]", ")", "feature", ".", "add_values", "(", "values", ")" ]
Adds the given values to the given feature.
[ "Adds", "the", "given", "values", "to", "the", "given", "feature", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/feature.py#L389-L410
28,748
apple/turicreate
deps/src/boost_1_68_0/tools/build/src/build/feature.py
validate_value_string
def validate_value_string (f, value_string): """ Checks that value-string is a valid value-string for the given feature. """ assert isinstance(f, Feature) assert isinstance(value_string, basestring) if f.free or value_string in f.values: return values = [value_string] if f.subfeatures: if not value_string in f.values and \ not value_string in f.subfeatures: values = value_string.split('-') # An empty value is allowed for optional features if not values[0] in f.values and \ (values[0] or not f.optional): raise InvalidValue ("'%s' is not a known value of feature '%s'\nlegal values: '%s'" % (values [0], f.name, f.values)) for v in values [1:]: # this will validate any subfeature values in value-string implied_subfeature(f, v, values[0])
python
def validate_value_string (f, value_string): """ Checks that value-string is a valid value-string for the given feature. """ assert isinstance(f, Feature) assert isinstance(value_string, basestring) if f.free or value_string in f.values: return values = [value_string] if f.subfeatures: if not value_string in f.values and \ not value_string in f.subfeatures: values = value_string.split('-') # An empty value is allowed for optional features if not values[0] in f.values and \ (values[0] or not f.optional): raise InvalidValue ("'%s' is not a known value of feature '%s'\nlegal values: '%s'" % (values [0], f.name, f.values)) for v in values [1:]: # this will validate any subfeature values in value-string implied_subfeature(f, v, values[0])
[ "def", "validate_value_string", "(", "f", ",", "value_string", ")", ":", "assert", "isinstance", "(", "f", ",", "Feature", ")", "assert", "isinstance", "(", "value_string", ",", "basestring", ")", "if", "f", ".", "free", "or", "value_string", "in", "f", ".", "values", ":", "return", "values", "=", "[", "value_string", "]", "if", "f", ".", "subfeatures", ":", "if", "not", "value_string", "in", "f", ".", "values", "and", "not", "value_string", "in", "f", ".", "subfeatures", ":", "values", "=", "value_string", ".", "split", "(", "'-'", ")", "# An empty value is allowed for optional features", "if", "not", "values", "[", "0", "]", "in", "f", ".", "values", "and", "(", "values", "[", "0", "]", "or", "not", "f", ".", "optional", ")", ":", "raise", "InvalidValue", "(", "\"'%s' is not a known value of feature '%s'\\nlegal values: '%s'\"", "%", "(", "values", "[", "0", "]", ",", "f", ".", "name", ",", "f", ".", "values", ")", ")", "for", "v", "in", "values", "[", "1", ":", "]", ":", "# this will validate any subfeature values in value-string", "implied_subfeature", "(", "f", ",", "v", ",", "values", "[", "0", "]", ")" ]
Checks that value-string is a valid value-string for the given feature.
[ "Checks", "that", "value", "-", "string", "is", "a", "valid", "value", "-", "string", "for", "the", "given", "feature", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/feature.py#L412-L434
28,749
apple/turicreate
deps/src/boost_1_68_0/tools/build/src/build/feature.py
compose
def compose (composite_property_s, component_properties_s): """ Sets the components of the given composite property. All parameters are <feature>value strings """ from . import property component_properties_s = to_seq (component_properties_s) composite_property = property.create_from_string(composite_property_s) f = composite_property.feature if len(component_properties_s) > 0 and isinstance(component_properties_s[0], property.Property): component_properties = component_properties_s else: component_properties = [property.create_from_string(p) for p in component_properties_s] if not f.composite: raise BaseException ("'%s' is not a composite feature" % f) if property in __composite_properties: raise BaseException ('components of "%s" already set: %s' % (composite_property, str (__composite_properties[composite_property]))) if composite_property in component_properties: raise BaseException ('composite property "%s" cannot have itself as a component' % composite_property) __composite_properties[composite_property] = component_properties
python
def compose (composite_property_s, component_properties_s): """ Sets the components of the given composite property. All parameters are <feature>value strings """ from . import property component_properties_s = to_seq (component_properties_s) composite_property = property.create_from_string(composite_property_s) f = composite_property.feature if len(component_properties_s) > 0 and isinstance(component_properties_s[0], property.Property): component_properties = component_properties_s else: component_properties = [property.create_from_string(p) for p in component_properties_s] if not f.composite: raise BaseException ("'%s' is not a composite feature" % f) if property in __composite_properties: raise BaseException ('components of "%s" already set: %s' % (composite_property, str (__composite_properties[composite_property]))) if composite_property in component_properties: raise BaseException ('composite property "%s" cannot have itself as a component' % composite_property) __composite_properties[composite_property] = component_properties
[ "def", "compose", "(", "composite_property_s", ",", "component_properties_s", ")", ":", "from", ".", "import", "property", "component_properties_s", "=", "to_seq", "(", "component_properties_s", ")", "composite_property", "=", "property", ".", "create_from_string", "(", "composite_property_s", ")", "f", "=", "composite_property", ".", "feature", "if", "len", "(", "component_properties_s", ")", ">", "0", "and", "isinstance", "(", "component_properties_s", "[", "0", "]", ",", "property", ".", "Property", ")", ":", "component_properties", "=", "component_properties_s", "else", ":", "component_properties", "=", "[", "property", ".", "create_from_string", "(", "p", ")", "for", "p", "in", "component_properties_s", "]", "if", "not", "f", ".", "composite", ":", "raise", "BaseException", "(", "\"'%s' is not a composite feature\"", "%", "f", ")", "if", "property", "in", "__composite_properties", ":", "raise", "BaseException", "(", "'components of \"%s\" already set: %s'", "%", "(", "composite_property", ",", "str", "(", "__composite_properties", "[", "composite_property", "]", ")", ")", ")", "if", "composite_property", "in", "component_properties", ":", "raise", "BaseException", "(", "'composite property \"%s\" cannot have itself as a component'", "%", "composite_property", ")", "__composite_properties", "[", "composite_property", "]", "=", "component_properties" ]
Sets the components of the given composite property. All parameters are <feature>value strings
[ "Sets", "the", "components", "of", "the", "given", "composite", "property", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/feature.py#L513-L538
28,750
apple/turicreate
deps/src/boost_1_68_0/tools/build/src/build/feature.py
get_values
def get_values (feature, properties): """ Returns all values of the given feature specified by the given property set. """ if feature[0] != '<': feature = '<' + feature + '>' result = [] for p in properties: if get_grist (p) == feature: result.append (replace_grist (p, '')) return result
python
def get_values (feature, properties): """ Returns all values of the given feature specified by the given property set. """ if feature[0] != '<': feature = '<' + feature + '>' result = [] for p in properties: if get_grist (p) == feature: result.append (replace_grist (p, '')) return result
[ "def", "get_values", "(", "feature", ",", "properties", ")", ":", "if", "feature", "[", "0", "]", "!=", "'<'", ":", "feature", "=", "'<'", "+", "feature", "+", "'>'", "result", "=", "[", "]", "for", "p", "in", "properties", ":", "if", "get_grist", "(", "p", ")", "==", "feature", ":", "result", ".", "append", "(", "replace_grist", "(", "p", ",", "''", ")", ")", "return", "result" ]
Returns all values of the given feature specified by the given property set.
[ "Returns", "all", "values", "of", "the", "given", "feature", "specified", "by", "the", "given", "property", "set", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/feature.py#L552-L562
28,751
apple/turicreate
deps/src/boost_1_68_0/tools/build/src/build/feature.py
expand_composites
def expand_composites (properties): """ Expand all composite properties in the set so that all components are explicitly expressed. """ if __debug__: from .property import Property assert is_iterable_typed(properties, Property) explicit_features = set(p.feature for p in properties) result = [] # now expand composite features for p in properties: expanded = expand_composite(p) for x in expanded: if not x in result: f = x.feature if f.free: result.append (x) elif not x in properties: # x is the result of expansion if not f in explicit_features: # not explicitly-specified if any(r.feature == f for r in result): raise FeatureConflict( "expansions of composite features result in " "conflicting values for '%s'\nvalues: '%s'\none contributing composite property was '%s'" % (f.name, [r.value for r in result if r.feature == f] + [x.value], p)) else: result.append (x) elif any(r.feature == f for r in result): raise FeatureConflict ("explicitly-specified values of non-free feature '%s' conflict\n" "existing values: '%s'\nvalue from expanding '%s': '%s'" % (f, [r.value for r in result if r.feature == f], p, x.value)) else: result.append (x) return result
python
def expand_composites (properties): """ Expand all composite properties in the set so that all components are explicitly expressed. """ if __debug__: from .property import Property assert is_iterable_typed(properties, Property) explicit_features = set(p.feature for p in properties) result = [] # now expand composite features for p in properties: expanded = expand_composite(p) for x in expanded: if not x in result: f = x.feature if f.free: result.append (x) elif not x in properties: # x is the result of expansion if not f in explicit_features: # not explicitly-specified if any(r.feature == f for r in result): raise FeatureConflict( "expansions of composite features result in " "conflicting values for '%s'\nvalues: '%s'\none contributing composite property was '%s'" % (f.name, [r.value for r in result if r.feature == f] + [x.value], p)) else: result.append (x) elif any(r.feature == f for r in result): raise FeatureConflict ("explicitly-specified values of non-free feature '%s' conflict\n" "existing values: '%s'\nvalue from expanding '%s': '%s'" % (f, [r.value for r in result if r.feature == f], p, x.value)) else: result.append (x) return result
[ "def", "expand_composites", "(", "properties", ")", ":", "if", "__debug__", ":", "from", ".", "property", "import", "Property", "assert", "is_iterable_typed", "(", "properties", ",", "Property", ")", "explicit_features", "=", "set", "(", "p", ".", "feature", "for", "p", "in", "properties", ")", "result", "=", "[", "]", "# now expand composite features", "for", "p", "in", "properties", ":", "expanded", "=", "expand_composite", "(", "p", ")", "for", "x", "in", "expanded", ":", "if", "not", "x", "in", "result", ":", "f", "=", "x", ".", "feature", "if", "f", ".", "free", ":", "result", ".", "append", "(", "x", ")", "elif", "not", "x", "in", "properties", ":", "# x is the result of expansion", "if", "not", "f", "in", "explicit_features", ":", "# not explicitly-specified", "if", "any", "(", "r", ".", "feature", "==", "f", "for", "r", "in", "result", ")", ":", "raise", "FeatureConflict", "(", "\"expansions of composite features result in \"", "\"conflicting values for '%s'\\nvalues: '%s'\\none contributing composite property was '%s'\"", "%", "(", "f", ".", "name", ",", "[", "r", ".", "value", "for", "r", "in", "result", "if", "r", ".", "feature", "==", "f", "]", "+", "[", "x", ".", "value", "]", ",", "p", ")", ")", "else", ":", "result", ".", "append", "(", "x", ")", "elif", "any", "(", "r", ".", "feature", "==", "f", "for", "r", "in", "result", ")", ":", "raise", "FeatureConflict", "(", "\"explicitly-specified values of non-free feature '%s' conflict\\n\"", "\"existing values: '%s'\\nvalue from expanding '%s': '%s'\"", "%", "(", "f", ",", "[", "r", ".", "value", "for", "r", "in", "result", "if", "r", ".", "feature", "==", "f", "]", ",", "p", ",", "x", ".", "value", ")", ")", "else", ":", "result", ".", "append", "(", "x", ")", "return", "result" ]
Expand all composite properties in the set so that all components are explicitly expressed.
[ "Expand", "all", "composite", "properties", "in", "the", "set", "so", "that", "all", "components", "are", "explicitly", "expressed", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/feature.py#L569-L606
28,752
apple/turicreate
deps/src/boost_1_68_0/tools/build/src/build/feature.py
is_subfeature_of
def is_subfeature_of (parent_property, f): """ Return true iff f is an ordinary subfeature of the parent_property's feature, or if f is a subfeature of the parent_property's feature specific to the parent_property's value. """ if __debug__: from .property import Property assert isinstance(parent_property, Property) assert isinstance(f, Feature) if not f.subfeature: return False p = f.parent if not p: return False parent_feature = p[0] parent_value = p[1] if parent_feature != parent_property.feature: return False if parent_value and parent_value != parent_property.value: return False return True
python
def is_subfeature_of (parent_property, f): """ Return true iff f is an ordinary subfeature of the parent_property's feature, or if f is a subfeature of the parent_property's feature specific to the parent_property's value. """ if __debug__: from .property import Property assert isinstance(parent_property, Property) assert isinstance(f, Feature) if not f.subfeature: return False p = f.parent if not p: return False parent_feature = p[0] parent_value = p[1] if parent_feature != parent_property.feature: return False if parent_value and parent_value != parent_property.value: return False return True
[ "def", "is_subfeature_of", "(", "parent_property", ",", "f", ")", ":", "if", "__debug__", ":", "from", ".", "property", "import", "Property", "assert", "isinstance", "(", "parent_property", ",", "Property", ")", "assert", "isinstance", "(", "f", ",", "Feature", ")", "if", "not", "f", ".", "subfeature", ":", "return", "False", "p", "=", "f", ".", "parent", "if", "not", "p", ":", "return", "False", "parent_feature", "=", "p", "[", "0", "]", "parent_value", "=", "p", "[", "1", "]", "if", "parent_feature", "!=", "parent_property", ".", "feature", ":", "return", "False", "if", "parent_value", "and", "parent_value", "!=", "parent_property", ".", "value", ":", "return", "False", "return", "True" ]
Return true iff f is an ordinary subfeature of the parent_property's feature, or if f is a subfeature of the parent_property's feature specific to the parent_property's value.
[ "Return", "true", "iff", "f", "is", "an", "ordinary", "subfeature", "of", "the", "parent_property", "s", "feature", "or", "if", "f", "is", "a", "subfeature", "of", "the", "parent_property", "s", "feature", "specific", "to", "the", "parent_property", "s", "value", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/feature.py#L609-L635
28,753
apple/turicreate
deps/src/boost_1_68_0/tools/build/src/build/feature.py
__is_subproperty_of
def __is_subproperty_of (parent_property, p): """ As is_subfeature_of, for subproperties. """ if __debug__: from .property import Property assert isinstance(parent_property, Property) assert isinstance(p, Property) return is_subfeature_of (parent_property, p.feature)
python
def __is_subproperty_of (parent_property, p): """ As is_subfeature_of, for subproperties. """ if __debug__: from .property import Property assert isinstance(parent_property, Property) assert isinstance(p, Property) return is_subfeature_of (parent_property, p.feature)
[ "def", "__is_subproperty_of", "(", "parent_property", ",", "p", ")", ":", "if", "__debug__", ":", "from", ".", "property", "import", "Property", "assert", "isinstance", "(", "parent_property", ",", "Property", ")", "assert", "isinstance", "(", "p", ",", "Property", ")", "return", "is_subfeature_of", "(", "parent_property", ",", "p", ".", "feature", ")" ]
As is_subfeature_of, for subproperties.
[ "As", "is_subfeature_of", "for", "subproperties", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/feature.py#L637-L644
28,754
apple/turicreate
deps/src/boost_1_68_0/tools/build/src/build/feature.py
expand
def expand (properties): """ Given a property set which may consist of composite and implicit properties and combined subfeature values, returns an expanded, normalized property set with all implicit features expressed explicitly, all subfeature values individually expressed, and all components of composite properties expanded. Non-free features directly expressed in the input properties cause any values of those features due to composite feature expansion to be dropped. If two values of a given non-free feature are directly expressed in the input, an error is issued. """ if __debug__: from .property import Property assert is_iterable_typed(properties, Property) expanded = expand_subfeatures(properties) return expand_composites (expanded)
python
def expand (properties): """ Given a property set which may consist of composite and implicit properties and combined subfeature values, returns an expanded, normalized property set with all implicit features expressed explicitly, all subfeature values individually expressed, and all components of composite properties expanded. Non-free features directly expressed in the input properties cause any values of those features due to composite feature expansion to be dropped. If two values of a given non-free feature are directly expressed in the input, an error is issued. """ if __debug__: from .property import Property assert is_iterable_typed(properties, Property) expanded = expand_subfeatures(properties) return expand_composites (expanded)
[ "def", "expand", "(", "properties", ")", ":", "if", "__debug__", ":", "from", ".", "property", "import", "Property", "assert", "is_iterable_typed", "(", "properties", ",", "Property", ")", "expanded", "=", "expand_subfeatures", "(", "properties", ")", "return", "expand_composites", "(", "expanded", ")" ]
Given a property set which may consist of composite and implicit properties and combined subfeature values, returns an expanded, normalized property set with all implicit features expressed explicitly, all subfeature values individually expressed, and all components of composite properties expanded. Non-free features directly expressed in the input properties cause any values of those features due to composite feature expansion to be dropped. If two values of a given non-free feature are directly expressed in the input, an error is issued.
[ "Given", "a", "property", "set", "which", "may", "consist", "of", "composite", "and", "implicit", "properties", "and", "combined", "subfeature", "values", "returns", "an", "expanded", "normalized", "property", "set", "with", "all", "implicit", "features", "expressed", "explicitly", "all", "subfeature", "values", "individually", "expressed", "and", "all", "components", "of", "composite", "properties", "expanded", ".", "Non", "-", "free", "features", "directly", "expressed", "in", "the", "input", "properties", "cause", "any", "values", "of", "those", "features", "due", "to", "composite", "feature", "expansion", "to", "be", "dropped", ".", "If", "two", "values", "of", "a", "given", "non", "-", "free", "feature", "are", "directly", "expressed", "in", "the", "input", "an", "error", "is", "issued", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/feature.py#L664-L679
28,755
apple/turicreate
deps/src/boost_1_68_0/tools/build/src/build/feature.py
compress_subproperties
def compress_subproperties (properties): """ Combine all subproperties into their parent properties Requires: for every subproperty, there is a parent property. All features are explicitly expressed. This rule probably shouldn't be needed, but build-request.expand-no-defaults is being abused for unintended purposes and it needs help """ from .property import Property assert is_iterable_typed(properties, Property) result = [] matched_subs = set() all_subs = set() for p in properties: f = p.feature if not f.subfeature: subs = [x for x in properties if is_subfeature_of(p, x.feature)] if subs: matched_subs.update(subs) subvalues = '-'.join (sub.value for sub in subs) result.append(Property( p.feature, p.value + '-' + subvalues, p.condition)) else: result.append(p) else: all_subs.add(p) # TODO: this variables are used just for debugging. What's the overhead? assert all_subs == matched_subs return result
python
def compress_subproperties (properties): """ Combine all subproperties into their parent properties Requires: for every subproperty, there is a parent property. All features are explicitly expressed. This rule probably shouldn't be needed, but build-request.expand-no-defaults is being abused for unintended purposes and it needs help """ from .property import Property assert is_iterable_typed(properties, Property) result = [] matched_subs = set() all_subs = set() for p in properties: f = p.feature if not f.subfeature: subs = [x for x in properties if is_subfeature_of(p, x.feature)] if subs: matched_subs.update(subs) subvalues = '-'.join (sub.value for sub in subs) result.append(Property( p.feature, p.value + '-' + subvalues, p.condition)) else: result.append(p) else: all_subs.add(p) # TODO: this variables are used just for debugging. What's the overhead? assert all_subs == matched_subs return result
[ "def", "compress_subproperties", "(", "properties", ")", ":", "from", ".", "property", "import", "Property", "assert", "is_iterable_typed", "(", "properties", ",", "Property", ")", "result", "=", "[", "]", "matched_subs", "=", "set", "(", ")", "all_subs", "=", "set", "(", ")", "for", "p", "in", "properties", ":", "f", "=", "p", ".", "feature", "if", "not", "f", ".", "subfeature", ":", "subs", "=", "[", "x", "for", "x", "in", "properties", "if", "is_subfeature_of", "(", "p", ",", "x", ".", "feature", ")", "]", "if", "subs", ":", "matched_subs", ".", "update", "(", "subs", ")", "subvalues", "=", "'-'", ".", "join", "(", "sub", ".", "value", "for", "sub", "in", "subs", ")", "result", ".", "append", "(", "Property", "(", "p", ".", "feature", ",", "p", ".", "value", "+", "'-'", "+", "subvalues", ",", "p", ".", "condition", ")", ")", "else", ":", "result", ".", "append", "(", "p", ")", "else", ":", "all_subs", ".", "add", "(", "p", ")", "# TODO: this variables are used just for debugging. What's the overhead?", "assert", "all_subs", "==", "matched_subs", "return", "result" ]
Combine all subproperties into their parent properties Requires: for every subproperty, there is a parent property. All features are explicitly expressed. This rule probably shouldn't be needed, but build-request.expand-no-defaults is being abused for unintended purposes and it needs help
[ "Combine", "all", "subproperties", "into", "their", "parent", "properties" ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/feature.py#L819-L856
28,756
apple/turicreate
deps/src/boost_1_68_0/tools/build/src/build/feature.py
__select_subfeatures
def __select_subfeatures (parent_property, features): """ Given a property, return the subset of features consisting of all ordinary subfeatures of the property's feature, and all specific subfeatures of the property's feature which are conditional on the property's value. """ if __debug__: from .property import Property assert isinstance(parent_property, Property) assert is_iterable_typed(features, Feature) return [f for f in features if is_subfeature_of (parent_property, f)]
python
def __select_subfeatures (parent_property, features): """ Given a property, return the subset of features consisting of all ordinary subfeatures of the property's feature, and all specific subfeatures of the property's feature which are conditional on the property's value. """ if __debug__: from .property import Property assert isinstance(parent_property, Property) assert is_iterable_typed(features, Feature) return [f for f in features if is_subfeature_of (parent_property, f)]
[ "def", "__select_subfeatures", "(", "parent_property", ",", "features", ")", ":", "if", "__debug__", ":", "from", ".", "property", "import", "Property", "assert", "isinstance", "(", "parent_property", ",", "Property", ")", "assert", "is_iterable_typed", "(", "features", ",", "Feature", ")", "return", "[", "f", "for", "f", "in", "features", "if", "is_subfeature_of", "(", "parent_property", ",", "f", ")", "]" ]
Given a property, return the subset of features consisting of all ordinary subfeatures of the property's feature, and all specific subfeatures of the property's feature which are conditional on the property's value.
[ "Given", "a", "property", "return", "the", "subset", "of", "features", "consisting", "of", "all", "ordinary", "subfeatures", "of", "the", "property", "s", "feature", "and", "all", "specific", "subfeatures", "of", "the", "property", "s", "feature", "which", "are", "conditional", "on", "the", "property", "s", "value", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/feature.py#L902-L912
28,757
apple/turicreate
src/unity/python/turicreate/toolkits/_feature_engineering/_autovectorizer.py
_get_interpretation_function
def _get_interpretation_function(interpretation, dtype): """ Retrieves the interpretation function used. """ type_string = dtype.__name__ name = "%s__%s" % (interpretation, type_string) global _interpretations if not hasattr(_interpretations, name): raise ValueError("No transform available for type '%s' with interpretation '%s'." % (type_string, interpretation)) return getattr(_interpretations, name)
python
def _get_interpretation_function(interpretation, dtype): """ Retrieves the interpretation function used. """ type_string = dtype.__name__ name = "%s__%s" % (interpretation, type_string) global _interpretations if not hasattr(_interpretations, name): raise ValueError("No transform available for type '%s' with interpretation '%s'." % (type_string, interpretation)) return getattr(_interpretations, name)
[ "def", "_get_interpretation_function", "(", "interpretation", ",", "dtype", ")", ":", "type_string", "=", "dtype", ".", "__name__", "name", "=", "\"%s__%s\"", "%", "(", "interpretation", ",", "type_string", ")", "global", "_interpretations", "if", "not", "hasattr", "(", "_interpretations", ",", "name", ")", ":", "raise", "ValueError", "(", "\"No transform available for type '%s' with interpretation '%s'.\"", "%", "(", "type_string", ",", "interpretation", ")", ")", "return", "getattr", "(", "_interpretations", ",", "name", ")" ]
Retrieves the interpretation function used.
[ "Retrieves", "the", "interpretation", "function", "used", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_feature_engineering/_autovectorizer.py#L380-L394
28,758
apple/turicreate
src/unity/python/turicreate/toolkits/_feature_engineering/_autovectorizer.py
_get_interpretation_description_and_output_type
def _get_interpretation_description_and_output_type(interpretation, dtype): """ Returns the description and output type for a given interpretation. """ type_string = dtype.__name__ name = "%s__%s" % (interpretation, type_string) if not hasattr(_interpretations_class, name): raise ValueError("No transform available for type '%s' with interpretation '%s'." % (type_string, interpretation)) # Need unbound method to get the attributes func = getattr(_interpretations_class, name) return func.description, func.output_type
python
def _get_interpretation_description_and_output_type(interpretation, dtype): """ Returns the description and output type for a given interpretation. """ type_string = dtype.__name__ name = "%s__%s" % (interpretation, type_string) if not hasattr(_interpretations_class, name): raise ValueError("No transform available for type '%s' with interpretation '%s'." % (type_string, interpretation)) # Need unbound method to get the attributes func = getattr(_interpretations_class, name) return func.description, func.output_type
[ "def", "_get_interpretation_description_and_output_type", "(", "interpretation", ",", "dtype", ")", ":", "type_string", "=", "dtype", ".", "__name__", "name", "=", "\"%s__%s\"", "%", "(", "interpretation", ",", "type_string", ")", "if", "not", "hasattr", "(", "_interpretations_class", ",", "name", ")", ":", "raise", "ValueError", "(", "\"No transform available for type '%s' with interpretation '%s'.\"", "%", "(", "type_string", ",", "interpretation", ")", ")", "# Need unbound method to get the attributes", "func", "=", "getattr", "(", "_interpretations_class", ",", "name", ")", "return", "func", ".", "description", ",", "func", ".", "output_type" ]
Returns the description and output type for a given interpretation.
[ "Returns", "the", "description", "and", "output", "type", "for", "a", "given", "interpretation", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_feature_engineering/_autovectorizer.py#L396-L411
28,759
apple/turicreate
src/unity/python/turicreate/toolkits/_feature_engineering/_autovectorizer.py
_get_embeddable_interpretation_doc
def _get_embeddable_interpretation_doc(indent = 0): """ Returns a list of the available interpretations and what they do. If indent is specified, then the entire doc string is indented by that amount. """ output_rows = [] # Pull out the doc string and put it in a table. for name in sorted(dir(_interpretations)): if name.startswith("_") or "__" not in name: continue interpretation, type_str = name.split("__") func = getattr(_interpretations, name) output_rows.append("%s (%s type):" % (interpretation, type_str)) output_rows += [(" " + line) for line in _textwrap.dedent(func.__doc__).strip().split("\n")] output_rows.append("") return "\n".join(" "*indent + line for line in output_rows)
python
def _get_embeddable_interpretation_doc(indent = 0): """ Returns a list of the available interpretations and what they do. If indent is specified, then the entire doc string is indented by that amount. """ output_rows = [] # Pull out the doc string and put it in a table. for name in sorted(dir(_interpretations)): if name.startswith("_") or "__" not in name: continue interpretation, type_str = name.split("__") func = getattr(_interpretations, name) output_rows.append("%s (%s type):" % (interpretation, type_str)) output_rows += [(" " + line) for line in _textwrap.dedent(func.__doc__).strip().split("\n")] output_rows.append("") return "\n".join(" "*indent + line for line in output_rows)
[ "def", "_get_embeddable_interpretation_doc", "(", "indent", "=", "0", ")", ":", "output_rows", "=", "[", "]", "# Pull out the doc string and put it in a table.", "for", "name", "in", "sorted", "(", "dir", "(", "_interpretations", ")", ")", ":", "if", "name", ".", "startswith", "(", "\"_\"", ")", "or", "\"__\"", "not", "in", "name", ":", "continue", "interpretation", ",", "type_str", "=", "name", ".", "split", "(", "\"__\"", ")", "func", "=", "getattr", "(", "_interpretations", ",", "name", ")", "output_rows", ".", "append", "(", "\"%s (%s type):\"", "%", "(", "interpretation", ",", "type_str", ")", ")", "output_rows", "+=", "[", "(", "\" \"", "+", "line", ")", "for", "line", "in", "_textwrap", ".", "dedent", "(", "func", ".", "__doc__", ")", ".", "strip", "(", ")", ".", "split", "(", "\"\\n\"", ")", "]", "output_rows", ".", "append", "(", "\"\"", ")", "return", "\"\\n\"", ".", "join", "(", "\" \"", "*", "indent", "+", "line", "for", "line", "in", "output_rows", ")" ]
Returns a list of the available interpretations and what they do. If indent is specified, then the entire doc string is indented by that amount.
[ "Returns", "a", "list", "of", "the", "available", "interpretations", "and", "what", "they", "do", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_feature_engineering/_autovectorizer.py#L413-L436
28,760
apple/turicreate
src/unity/python/turicreate/toolkits/_feature_engineering/_autovectorizer.py
_ColumnFunctionTransformation._load_version
def _load_version(cls, unpickler, version): """ A function to load a previously saved SentenceSplitter instance. Parameters ---------- unpickler : GLUnpickler A GLUnpickler file handler. version : int Version number maintained by the class writer. """ state, _exclude, _features = unpickler.load() features = state['features'] excluded_features = state['excluded_features'] model = cls.__new__(cls) model._setup() model.__proxy__.update(state) model._exclude = _exclude model._features = _features return model
python
def _load_version(cls, unpickler, version): """ A function to load a previously saved SentenceSplitter instance. Parameters ---------- unpickler : GLUnpickler A GLUnpickler file handler. version : int Version number maintained by the class writer. """ state, _exclude, _features = unpickler.load() features = state['features'] excluded_features = state['excluded_features'] model = cls.__new__(cls) model._setup() model.__proxy__.update(state) model._exclude = _exclude model._features = _features return model
[ "def", "_load_version", "(", "cls", ",", "unpickler", ",", "version", ")", ":", "state", ",", "_exclude", ",", "_features", "=", "unpickler", ".", "load", "(", ")", "features", "=", "state", "[", "'features'", "]", "excluded_features", "=", "state", "[", "'excluded_features'", "]", "model", "=", "cls", ".", "__new__", "(", "cls", ")", "model", ".", "_setup", "(", ")", "model", ".", "__proxy__", ".", "update", "(", "state", ")", "model", ".", "_exclude", "=", "_exclude", "model", ".", "_features", "=", "_features", "return", "model" ]
A function to load a previously saved SentenceSplitter instance. Parameters ---------- unpickler : GLUnpickler A GLUnpickler file handler. version : int Version number maintained by the class writer.
[ "A", "function", "to", "load", "a", "previously", "saved", "SentenceSplitter", "instance", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_feature_engineering/_autovectorizer.py#L72-L95
28,761
apple/turicreate
src/unity/python/turicreate/toolkits/_feature_engineering/_autovectorizer.py
_ColumnFunctionTransformation.fit
def fit(self, data): """ Fits the transformer using the given data. """ _raise_error_if_not_sframe(data, "data") fitted_state = {} feature_columns = _internal_utils.get_column_names(data, self._exclude, self._features) if not feature_columns: raise RuntimeError("No valid feature columns specified in transformation.") fitted_state['features'] = feature_columns fitted_state['fitted'] = True self.__proxy__.update(fitted_state) return self
python
def fit(self, data): """ Fits the transformer using the given data. """ _raise_error_if_not_sframe(data, "data") fitted_state = {} feature_columns = _internal_utils.get_column_names(data, self._exclude, self._features) if not feature_columns: raise RuntimeError("No valid feature columns specified in transformation.") fitted_state['features'] = feature_columns fitted_state['fitted'] = True self.__proxy__.update(fitted_state) return self
[ "def", "fit", "(", "self", ",", "data", ")", ":", "_raise_error_if_not_sframe", "(", "data", ",", "\"data\"", ")", "fitted_state", "=", "{", "}", "feature_columns", "=", "_internal_utils", ".", "get_column_names", "(", "data", ",", "self", ".", "_exclude", ",", "self", ".", "_features", ")", "if", "not", "feature_columns", ":", "raise", "RuntimeError", "(", "\"No valid feature columns specified in transformation.\"", ")", "fitted_state", "[", "'features'", "]", "=", "feature_columns", "fitted_state", "[", "'fitted'", "]", "=", "True", "self", ".", "__proxy__", ".", "update", "(", "fitted_state", ")", "return", "self" ]
Fits the transformer using the given data.
[ "Fits", "the", "transformer", "using", "the", "given", "data", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_feature_engineering/_autovectorizer.py#L151-L169
28,762
apple/turicreate
src/unity/python/turicreate/toolkits/_feature_engineering/_autovectorizer.py
_ColumnFunctionTransformation.transform
def transform(self, data): """ Transforms the data. """ if not self._get("fitted"): raise RuntimeError("`transform` called before `fit` or `fit_transform`.") data = data.copy() output_column_prefix = self._get("output_column_prefix") if output_column_prefix is None: prefix = "" else: prefix = output_column_prefix + '.' transform_function = self._get("transform_function") feature_columns = self._get("features") feature_columns = _internal_utils.select_feature_subset(data, feature_columns) for f in feature_columns: data[prefix + f] = transform_function(data[f]) return data
python
def transform(self, data): """ Transforms the data. """ if not self._get("fitted"): raise RuntimeError("`transform` called before `fit` or `fit_transform`.") data = data.copy() output_column_prefix = self._get("output_column_prefix") if output_column_prefix is None: prefix = "" else: prefix = output_column_prefix + '.' transform_function = self._get("transform_function") feature_columns = self._get("features") feature_columns = _internal_utils.select_feature_subset(data, feature_columns) for f in feature_columns: data[prefix + f] = transform_function(data[f]) return data
[ "def", "transform", "(", "self", ",", "data", ")", ":", "if", "not", "self", ".", "_get", "(", "\"fitted\"", ")", ":", "raise", "RuntimeError", "(", "\"`transform` called before `fit` or `fit_transform`.\"", ")", "data", "=", "data", ".", "copy", "(", ")", "output_column_prefix", "=", "self", ".", "_get", "(", "\"output_column_prefix\"", ")", "if", "output_column_prefix", "is", "None", ":", "prefix", "=", "\"\"", "else", ":", "prefix", "=", "output_column_prefix", "+", "'.'", "transform_function", "=", "self", ".", "_get", "(", "\"transform_function\"", ")", "feature_columns", "=", "self", ".", "_get", "(", "\"features\"", ")", "feature_columns", "=", "_internal_utils", ".", "select_feature_subset", "(", "data", ",", "feature_columns", ")", "for", "f", "in", "feature_columns", ":", "data", "[", "prefix", "+", "f", "]", "=", "transform_function", "(", "data", "[", "f", "]", ")", "return", "data" ]
Transforms the data.
[ "Transforms", "the", "data", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_feature_engineering/_autovectorizer.py#L171-L195
28,763
apple/turicreate
src/unity/python/turicreate/toolkits/_feature_engineering/_autovectorizer.py
_interpretations_class.short_text__str
def short_text__str(self, column_name, output_column_prefix): """ Transforms short text into a dictionary of TFIDF-weighted 3-gram character counts. """ from ._ngram_counter import NGramCounter from ._tfidf import TFIDF return [NGramCounter(features=[column_name], n = 3, method = "character", output_column_prefix = output_column_prefix), TFIDF(features=[column_name], min_document_frequency=0.01, max_document_frequency=0.5, output_column_prefix = output_column_prefix)]
python
def short_text__str(self, column_name, output_column_prefix): """ Transforms short text into a dictionary of TFIDF-weighted 3-gram character counts. """ from ._ngram_counter import NGramCounter from ._tfidf import TFIDF return [NGramCounter(features=[column_name], n = 3, method = "character", output_column_prefix = output_column_prefix), TFIDF(features=[column_name], min_document_frequency=0.01, max_document_frequency=0.5, output_column_prefix = output_column_prefix)]
[ "def", "short_text__str", "(", "self", ",", "column_name", ",", "output_column_prefix", ")", ":", "from", ".", "_ngram_counter", "import", "NGramCounter", "from", ".", "_tfidf", "import", "TFIDF", "return", "[", "NGramCounter", "(", "features", "=", "[", "column_name", "]", ",", "n", "=", "3", ",", "method", "=", "\"character\"", ",", "output_column_prefix", "=", "output_column_prefix", ")", ",", "TFIDF", "(", "features", "=", "[", "column_name", "]", ",", "min_document_frequency", "=", "0.01", ",", "max_document_frequency", "=", "0.5", ",", "output_column_prefix", "=", "output_column_prefix", ")", "]" ]
Transforms short text into a dictionary of TFIDF-weighted 3-gram character counts.
[ "Transforms", "short", "text", "into", "a", "dictionary", "of", "TFIDF", "-", "weighted", "3", "-", "gram", "character", "counts", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_feature_engineering/_autovectorizer.py#L224-L241
28,764
apple/turicreate
src/unity/python/turicreate/toolkits/_feature_engineering/_autovectorizer.py
_interpretations_class.categorical__int
def categorical__int(self, column_name, output_column_prefix): """ Interprets an integer column as a categorical variable. """ return [_ColumnFunctionTransformation( features = [column_name], output_column_prefix = output_column_prefix, transform_function = lambda col: col.astype(str), transform_function_name = "astype(str)")]
python
def categorical__int(self, column_name, output_column_prefix): """ Interprets an integer column as a categorical variable. """ return [_ColumnFunctionTransformation( features = [column_name], output_column_prefix = output_column_prefix, transform_function = lambda col: col.astype(str), transform_function_name = "astype(str)")]
[ "def", "categorical__int", "(", "self", ",", "column_name", ",", "output_column_prefix", ")", ":", "return", "[", "_ColumnFunctionTransformation", "(", "features", "=", "[", "column_name", "]", ",", "output_column_prefix", "=", "output_column_prefix", ",", "transform_function", "=", "lambda", "col", ":", "col", ".", "astype", "(", "str", ")", ",", "transform_function_name", "=", "\"astype(str)\"", ")", "]" ]
Interprets an integer column as a categorical variable.
[ "Interprets", "an", "integer", "column", "as", "a", "categorical", "variable", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_feature_engineering/_autovectorizer.py#L283-L292
28,765
apple/turicreate
src/unity/python/turicreate/toolkits/_feature_engineering/_autovectorizer.py
AutoVectorizer._setup_from_data
def _setup_from_data(self, data): """ Sets up the content transforms. """ fitted_state = {} _raise_error_if_not_of_type(data, [_SFrame]) feature_columns = _internal_utils.get_column_names(data, self._exclude, self._features) if not feature_columns: raise RuntimeError("No valid feature columns specified in transformation.") fitted_state["features"] = feature_columns ################################################################################ # Helper functions def get_valid_interpretations(): return list(n.split("__")[0] for n in dir(_interpretations) if not n.startswith("_")) ################################################################################ # Check input data. if not isinstance(data, _SFrame): raise TypeError("`data` parameter must be an SFrame.") all_col_names = set(feature_columns) column_interpretations = self._get("column_interpretations").copy() # Make sure all the interpretations are valid. for k, v in column_interpretations.items(): if k not in all_col_names: raise ValueError("Column '%s' in column_interpretations, but not found in `data`." % k) # Get the automatic column interpretations. for col_name in feature_columns: if col_name not in column_interpretations: n = column_interpretations[col_name] = infer_column_interpretation(data[col_name]) if n.startswith("unknown"): raise ValueError("Interpretation inference failed on column '%s'; %s" % (col_name, n[len("unknown"):].strip())) # Now, build up the feature transforms. transforms = {} input_types = {} output_column_prefix = self._get("output_column_prefix") assert output_column_prefix is None or type(output_column_prefix) is str tr_chain = [] for col_name in feature_columns: in_type = input_types[col_name] = data[col_name].dtype intr_func = _get_interpretation_function(column_interpretations[col_name], in_type) tr_list = intr_func(col_name, output_column_prefix) transforms[col_name] = tr_list tr_chain += tr_list fitted_state["transform_chain"] = _TransformerChain(tr_chain) fitted_state["transforms"] = transforms fitted_state["input_types"] = input_types fitted_state["column_interpretations"] = column_interpretations self.__proxy__.update(fitted_state)
python
def _setup_from_data(self, data): """ Sets up the content transforms. """ fitted_state = {} _raise_error_if_not_of_type(data, [_SFrame]) feature_columns = _internal_utils.get_column_names(data, self._exclude, self._features) if not feature_columns: raise RuntimeError("No valid feature columns specified in transformation.") fitted_state["features"] = feature_columns ################################################################################ # Helper functions def get_valid_interpretations(): return list(n.split("__")[0] for n in dir(_interpretations) if not n.startswith("_")) ################################################################################ # Check input data. if not isinstance(data, _SFrame): raise TypeError("`data` parameter must be an SFrame.") all_col_names = set(feature_columns) column_interpretations = self._get("column_interpretations").copy() # Make sure all the interpretations are valid. for k, v in column_interpretations.items(): if k not in all_col_names: raise ValueError("Column '%s' in column_interpretations, but not found in `data`." % k) # Get the automatic column interpretations. for col_name in feature_columns: if col_name not in column_interpretations: n = column_interpretations[col_name] = infer_column_interpretation(data[col_name]) if n.startswith("unknown"): raise ValueError("Interpretation inference failed on column '%s'; %s" % (col_name, n[len("unknown"):].strip())) # Now, build up the feature transforms. transforms = {} input_types = {} output_column_prefix = self._get("output_column_prefix") assert output_column_prefix is None or type(output_column_prefix) is str tr_chain = [] for col_name in feature_columns: in_type = input_types[col_name] = data[col_name].dtype intr_func = _get_interpretation_function(column_interpretations[col_name], in_type) tr_list = intr_func(col_name, output_column_prefix) transforms[col_name] = tr_list tr_chain += tr_list fitted_state["transform_chain"] = _TransformerChain(tr_chain) fitted_state["transforms"] = transforms fitted_state["input_types"] = input_types fitted_state["column_interpretations"] = column_interpretations self.__proxy__.update(fitted_state)
[ "def", "_setup_from_data", "(", "self", ",", "data", ")", ":", "fitted_state", "=", "{", "}", "_raise_error_if_not_of_type", "(", "data", ",", "[", "_SFrame", "]", ")", "feature_columns", "=", "_internal_utils", ".", "get_column_names", "(", "data", ",", "self", ".", "_exclude", ",", "self", ".", "_features", ")", "if", "not", "feature_columns", ":", "raise", "RuntimeError", "(", "\"No valid feature columns specified in transformation.\"", ")", "fitted_state", "[", "\"features\"", "]", "=", "feature_columns", "################################################################################", "# Helper functions", "def", "get_valid_interpretations", "(", ")", ":", "return", "list", "(", "n", ".", "split", "(", "\"__\"", ")", "[", "0", "]", "for", "n", "in", "dir", "(", "_interpretations", ")", "if", "not", "n", ".", "startswith", "(", "\"_\"", ")", ")", "################################################################################", "# Check input data.", "if", "not", "isinstance", "(", "data", ",", "_SFrame", ")", ":", "raise", "TypeError", "(", "\"`data` parameter must be an SFrame.\"", ")", "all_col_names", "=", "set", "(", "feature_columns", ")", "column_interpretations", "=", "self", ".", "_get", "(", "\"column_interpretations\"", ")", ".", "copy", "(", ")", "# Make sure all the interpretations are valid.", "for", "k", ",", "v", "in", "column_interpretations", ".", "items", "(", ")", ":", "if", "k", "not", "in", "all_col_names", ":", "raise", "ValueError", "(", "\"Column '%s' in column_interpretations, but not found in `data`.\"", "%", "k", ")", "# Get the automatic column interpretations.", "for", "col_name", "in", "feature_columns", ":", "if", "col_name", "not", "in", "column_interpretations", ":", "n", "=", "column_interpretations", "[", "col_name", "]", "=", "infer_column_interpretation", "(", "data", "[", "col_name", "]", ")", "if", "n", ".", "startswith", "(", "\"unknown\"", ")", ":", "raise", "ValueError", "(", "\"Interpretation inference failed on column '%s'; %s\"", "%", "(", "col_name", ",", "n", "[", "len", "(", "\"unknown\"", ")", ":", "]", ".", "strip", "(", ")", ")", ")", "# Now, build up the feature transforms.", "transforms", "=", "{", "}", "input_types", "=", "{", "}", "output_column_prefix", "=", "self", ".", "_get", "(", "\"output_column_prefix\"", ")", "assert", "output_column_prefix", "is", "None", "or", "type", "(", "output_column_prefix", ")", "is", "str", "tr_chain", "=", "[", "]", "for", "col_name", "in", "feature_columns", ":", "in_type", "=", "input_types", "[", "col_name", "]", "=", "data", "[", "col_name", "]", ".", "dtype", "intr_func", "=", "_get_interpretation_function", "(", "column_interpretations", "[", "col_name", "]", ",", "in_type", ")", "tr_list", "=", "intr_func", "(", "col_name", ",", "output_column_prefix", ")", "transforms", "[", "col_name", "]", "=", "tr_list", "tr_chain", "+=", "tr_list", "fitted_state", "[", "\"transform_chain\"", "]", "=", "_TransformerChain", "(", "tr_chain", ")", "fitted_state", "[", "\"transforms\"", "]", "=", "transforms", "fitted_state", "[", "\"input_types\"", "]", "=", "input_types", "fitted_state", "[", "\"column_interpretations\"", "]", "=", "column_interpretations", "self", ".", "__proxy__", ".", "update", "(", "fitted_state", ")" ]
Sets up the content transforms.
[ "Sets", "up", "the", "content", "transforms", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_feature_engineering/_autovectorizer.py#L550-L619
28,766
apple/turicreate
src/unity/python/turicreate/toolkits/_feature_engineering/_autovectorizer.py
AutoVectorizer.fit_transform
def fit_transform(self, data): """ Fits and transforms the SFrame `data` using a fitted model. Parameters ---------- data : SFrame The data to be transformed. Returns ------- A transformed SFrame. Returns ------- out: SFrame A transformed SFrame. See Also -------- fit, transform """ self._setup_from_data(data) ret = self.transform_chain.fit_transform(data) self.__proxy__.update({"fitted" : True}) return ret
python
def fit_transform(self, data): """ Fits and transforms the SFrame `data` using a fitted model. Parameters ---------- data : SFrame The data to be transformed. Returns ------- A transformed SFrame. Returns ------- out: SFrame A transformed SFrame. See Also -------- fit, transform """ self._setup_from_data(data) ret = self.transform_chain.fit_transform(data) self.__proxy__.update({"fitted" : True}) return ret
[ "def", "fit_transform", "(", "self", ",", "data", ")", ":", "self", ".", "_setup_from_data", "(", "data", ")", "ret", "=", "self", ".", "transform_chain", ".", "fit_transform", "(", "data", ")", "self", ".", "__proxy__", ".", "update", "(", "{", "\"fitted\"", ":", "True", "}", ")", "return", "ret" ]
Fits and transforms the SFrame `data` using a fitted model. Parameters ---------- data : SFrame The data to be transformed. Returns ------- A transformed SFrame. Returns ------- out: SFrame A transformed SFrame. See Also -------- fit, transform
[ "Fits", "and", "transforms", "the", "SFrame", "data", "using", "a", "fitted", "model", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_feature_engineering/_autovectorizer.py#L645-L671
28,767
apple/turicreate
src/external/coremltools_wrap/coremltools/deps/protobuf/python/mox.py
Mox.CreateMock
def CreateMock(self, class_to_mock): """Create a new mock object. Args: # class_to_mock: the class to be mocked class_to_mock: class Returns: MockObject that can be used as the class_to_mock would be. """ new_mock = MockObject(class_to_mock) self._mock_objects.append(new_mock) return new_mock
python
def CreateMock(self, class_to_mock): """Create a new mock object. Args: # class_to_mock: the class to be mocked class_to_mock: class Returns: MockObject that can be used as the class_to_mock would be. """ new_mock = MockObject(class_to_mock) self._mock_objects.append(new_mock) return new_mock
[ "def", "CreateMock", "(", "self", ",", "class_to_mock", ")", ":", "new_mock", "=", "MockObject", "(", "class_to_mock", ")", "self", ".", "_mock_objects", ".", "append", "(", "new_mock", ")", "return", "new_mock" ]
Create a new mock object. Args: # class_to_mock: the class to be mocked class_to_mock: class Returns: MockObject that can be used as the class_to_mock would be.
[ "Create", "a", "new", "mock", "object", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/mox.py#L164-L177
28,768
apple/turicreate
src/external/coremltools_wrap/coremltools/deps/protobuf/python/mox.py
Mox.StubOutWithMock
def StubOutWithMock(self, obj, attr_name, use_mock_anything=False): """Replace a method, attribute, etc. with a Mock. This will replace a class or module with a MockObject, and everything else (method, function, etc) with a MockAnything. This can be overridden to always use a MockAnything by setting use_mock_anything to True. Args: obj: A Python object (class, module, instance, callable). attr_name: str. The name of the attribute to replace with a mock. use_mock_anything: bool. True if a MockAnything should be used regardless of the type of attribute. """ attr_to_replace = getattr(obj, attr_name) if type(attr_to_replace) in self._USE_MOCK_OBJECT and not use_mock_anything: stub = self.CreateMock(attr_to_replace) else: stub = self.CreateMockAnything() self.stubs.Set(obj, attr_name, stub)
python
def StubOutWithMock(self, obj, attr_name, use_mock_anything=False): """Replace a method, attribute, etc. with a Mock. This will replace a class or module with a MockObject, and everything else (method, function, etc) with a MockAnything. This can be overridden to always use a MockAnything by setting use_mock_anything to True. Args: obj: A Python object (class, module, instance, callable). attr_name: str. The name of the attribute to replace with a mock. use_mock_anything: bool. True if a MockAnything should be used regardless of the type of attribute. """ attr_to_replace = getattr(obj, attr_name) if type(attr_to_replace) in self._USE_MOCK_OBJECT and not use_mock_anything: stub = self.CreateMock(attr_to_replace) else: stub = self.CreateMockAnything() self.stubs.Set(obj, attr_name, stub)
[ "def", "StubOutWithMock", "(", "self", ",", "obj", ",", "attr_name", ",", "use_mock_anything", "=", "False", ")", ":", "attr_to_replace", "=", "getattr", "(", "obj", ",", "attr_name", ")", "if", "type", "(", "attr_to_replace", ")", "in", "self", ".", "_USE_MOCK_OBJECT", "and", "not", "use_mock_anything", ":", "stub", "=", "self", ".", "CreateMock", "(", "attr_to_replace", ")", "else", ":", "stub", "=", "self", ".", "CreateMockAnything", "(", ")", "self", ".", "stubs", ".", "Set", "(", "obj", ",", "attr_name", ",", "stub", ")" ]
Replace a method, attribute, etc. with a Mock. This will replace a class or module with a MockObject, and everything else (method, function, etc) with a MockAnything. This can be overridden to always use a MockAnything by setting use_mock_anything to True. Args: obj: A Python object (class, module, instance, callable). attr_name: str. The name of the attribute to replace with a mock. use_mock_anything: bool. True if a MockAnything should be used regardless of the type of attribute.
[ "Replace", "a", "method", "attribute", "etc", ".", "with", "a", "Mock", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/mox.py#L208-L228
28,769
apple/turicreate
src/external/coremltools_wrap/coremltools/deps/protobuf/python/mox.py
MockAnything._Verify
def _Verify(self): """Verify that all of the expected calls have been made. Raises: ExpectedMethodCallsError: if there are still more method calls in the expected queue. """ # If the list of expected calls is not empty, raise an exception if self._expected_calls_queue: # The last MultipleTimesGroup is not popped from the queue. if (len(self._expected_calls_queue) == 1 and isinstance(self._expected_calls_queue[0], MultipleTimesGroup) and self._expected_calls_queue[0].IsSatisfied()): pass else: raise ExpectedMethodCallsError(self._expected_calls_queue)
python
def _Verify(self): """Verify that all of the expected calls have been made. Raises: ExpectedMethodCallsError: if there are still more method calls in the expected queue. """ # If the list of expected calls is not empty, raise an exception if self._expected_calls_queue: # The last MultipleTimesGroup is not popped from the queue. if (len(self._expected_calls_queue) == 1 and isinstance(self._expected_calls_queue[0], MultipleTimesGroup) and self._expected_calls_queue[0].IsSatisfied()): pass else: raise ExpectedMethodCallsError(self._expected_calls_queue)
[ "def", "_Verify", "(", "self", ")", ":", "# If the list of expected calls is not empty, raise an exception", "if", "self", ".", "_expected_calls_queue", ":", "# The last MultipleTimesGroup is not popped from the queue.", "if", "(", "len", "(", "self", ".", "_expected_calls_queue", ")", "==", "1", "and", "isinstance", "(", "self", ".", "_expected_calls_queue", "[", "0", "]", ",", "MultipleTimesGroup", ")", "and", "self", ".", "_expected_calls_queue", "[", "0", "]", ".", "IsSatisfied", "(", ")", ")", ":", "pass", "else", ":", "raise", "ExpectedMethodCallsError", "(", "self", ".", "_expected_calls_queue", ")" ]
Verify that all of the expected calls have been made. Raises: ExpectedMethodCallsError: if there are still more method calls in the expected queue.
[ "Verify", "that", "all", "of", "the", "expected", "calls", "have", "been", "made", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/mox.py#L331-L347
28,770
apple/turicreate
src/external/coremltools_wrap/coremltools/deps/protobuf/python/mox.py
MockMethod._VerifyMethodCall
def _VerifyMethodCall(self): """Verify the called method is expected. This can be an ordered method, or part of an unordered set. Returns: The expected mock method. Raises: UnexpectedMethodCall if the method called was not expected. """ expected = self._PopNextMethod() # Loop here, because we might have a MethodGroup followed by another # group. while isinstance(expected, MethodGroup): expected, method = expected.MethodCalled(self) if method is not None: return method # This is a mock method, so just check equality. if expected != self: raise UnexpectedMethodCallError(self, expected) return expected
python
def _VerifyMethodCall(self): """Verify the called method is expected. This can be an ordered method, or part of an unordered set. Returns: The expected mock method. Raises: UnexpectedMethodCall if the method called was not expected. """ expected = self._PopNextMethod() # Loop here, because we might have a MethodGroup followed by another # group. while isinstance(expected, MethodGroup): expected, method = expected.MethodCalled(self) if method is not None: return method # This is a mock method, so just check equality. if expected != self: raise UnexpectedMethodCallError(self, expected) return expected
[ "def", "_VerifyMethodCall", "(", "self", ")", ":", "expected", "=", "self", ".", "_PopNextMethod", "(", ")", "# Loop here, because we might have a MethodGroup followed by another", "# group.", "while", "isinstance", "(", "expected", ",", "MethodGroup", ")", ":", "expected", ",", "method", "=", "expected", ".", "MethodCalled", "(", "self", ")", "if", "method", "is", "not", "None", ":", "return", "method", "# This is a mock method, so just check equality.", "if", "expected", "!=", "self", ":", "raise", "UnexpectedMethodCallError", "(", "self", ",", "expected", ")", "return", "expected" ]
Verify the called method is expected. This can be an ordered method, or part of an unordered set. Returns: The expected mock method. Raises: UnexpectedMethodCall if the method called was not expected.
[ "Verify", "the", "called", "method", "is", "expected", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/mox.py#L588-L613
28,771
apple/turicreate
src/external/coremltools_wrap/coremltools/deps/protobuf/python/mox.py
MockMethod.GetPossibleGroup
def GetPossibleGroup(self): """Returns a possible group from the end of the call queue or None if no other methods are on the stack. """ # Remove this method from the tail of the queue so we can add it to a group. this_method = self._call_queue.pop() assert this_method == self # Determine if the tail of the queue is a group, or just a regular ordered # mock method. group = None try: group = self._call_queue[-1] except IndexError: pass return group
python
def GetPossibleGroup(self): """Returns a possible group from the end of the call queue or None if no other methods are on the stack. """ # Remove this method from the tail of the queue so we can add it to a group. this_method = self._call_queue.pop() assert this_method == self # Determine if the tail of the queue is a group, or just a regular ordered # mock method. group = None try: group = self._call_queue[-1] except IndexError: pass return group
[ "def", "GetPossibleGroup", "(", "self", ")", ":", "# Remove this method from the tail of the queue so we can add it to a group.", "this_method", "=", "self", ".", "_call_queue", ".", "pop", "(", ")", "assert", "this_method", "==", "self", "# Determine if the tail of the queue is a group, or just a regular ordered", "# mock method.", "group", "=", "None", "try", ":", "group", "=", "self", ".", "_call_queue", "[", "-", "1", "]", "except", "IndexError", ":", "pass", "return", "group" ]
Returns a possible group from the end of the call queue or None if no other methods are on the stack.
[ "Returns", "a", "possible", "group", "from", "the", "end", "of", "the", "call", "queue", "or", "None", "if", "no", "other", "methods", "are", "on", "the", "stack", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/mox.py#L645-L662
28,772
apple/turicreate
src/external/coremltools_wrap/coremltools/deps/protobuf/python/mox.py
IsA.equals
def equals(self, rhs): """Check to see if the RHS is an instance of class_name. Args: # rhs: the right hand side of the test rhs: object Returns: bool """ try: return isinstance(rhs, self._class_name) except TypeError: # Check raw types if there was a type error. This is helpful for # things like cStringIO.StringIO. return type(rhs) == type(self._class_name)
python
def equals(self, rhs): """Check to see if the RHS is an instance of class_name. Args: # rhs: the right hand side of the test rhs: object Returns: bool """ try: return isinstance(rhs, self._class_name) except TypeError: # Check raw types if there was a type error. This is helpful for # things like cStringIO.StringIO. return type(rhs) == type(self._class_name)
[ "def", "equals", "(", "self", ",", "rhs", ")", ":", "try", ":", "return", "isinstance", "(", "rhs", ",", "self", ".", "_class_name", ")", "except", "TypeError", ":", "# Check raw types if there was a type error. This is helpful for", "# things like cStringIO.StringIO.", "return", "type", "(", "rhs", ")", "==", "type", "(", "self", ".", "_class_name", ")" ]
Check to see if the RHS is an instance of class_name. Args: # rhs: the right hand side of the test rhs: object Returns: bool
[ "Check", "to", "see", "if", "the", "RHS", "is", "an", "instance", "of", "class_name", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/mox.py#L807-L823
28,773
apple/turicreate
src/external/coremltools_wrap/coremltools/deps/protobuf/python/mox.py
IsAlmost.equals
def equals(self, rhs): """Check to see if RHS is almost equal to float_value Args: rhs: the value to compare to float_value Returns: bool """ try: return round(rhs-self._float_value, self._places) == 0 except TypeError: # This is probably because either float_value or rhs is not a number. return False
python
def equals(self, rhs): """Check to see if RHS is almost equal to float_value Args: rhs: the value to compare to float_value Returns: bool """ try: return round(rhs-self._float_value, self._places) == 0 except TypeError: # This is probably because either float_value or rhs is not a number. return False
[ "def", "equals", "(", "self", ",", "rhs", ")", ":", "try", ":", "return", "round", "(", "rhs", "-", "self", ".", "_float_value", ",", "self", ".", "_places", ")", "==", "0", "except", "TypeError", ":", "# This is probably because either float_value or rhs is not a number.", "return", "False" ]
Check to see if RHS is almost equal to float_value Args: rhs: the value to compare to float_value Returns: bool
[ "Check", "to", "see", "if", "RHS", "is", "almost", "equal", "to", "float_value" ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/mox.py#L846-L860
28,774
apple/turicreate
src/external/coremltools_wrap/coremltools/deps/protobuf/python/mox.py
SameElementsAs.equals
def equals(self, actual_seq): """Check to see whether actual_seq has same elements as expected_seq. Args: actual_seq: sequence Returns: bool """ try: expected = dict([(element, None) for element in self._expected_seq]) actual = dict([(element, None) for element in actual_seq]) except TypeError: # Fall back to slower list-compare if any of the objects are unhashable. expected = list(self._expected_seq) actual = list(actual_seq) expected.sort() actual.sort() return expected == actual
python
def equals(self, actual_seq): """Check to see whether actual_seq has same elements as expected_seq. Args: actual_seq: sequence Returns: bool """ try: expected = dict([(element, None) for element in self._expected_seq]) actual = dict([(element, None) for element in actual_seq]) except TypeError: # Fall back to slower list-compare if any of the objects are unhashable. expected = list(self._expected_seq) actual = list(actual_seq) expected.sort() actual.sort() return expected == actual
[ "def", "equals", "(", "self", ",", "actual_seq", ")", ":", "try", ":", "expected", "=", "dict", "(", "[", "(", "element", ",", "None", ")", "for", "element", "in", "self", ".", "_expected_seq", "]", ")", "actual", "=", "dict", "(", "[", "(", "element", ",", "None", ")", "for", "element", "in", "actual_seq", "]", ")", "except", "TypeError", ":", "# Fall back to slower list-compare if any of the objects are unhashable.", "expected", "=", "list", "(", "self", ".", "_expected_seq", ")", "actual", "=", "list", "(", "actual_seq", ")", "expected", ".", "sort", "(", ")", "actual", ".", "sort", "(", ")", "return", "expected", "==", "actual" ]
Check to see whether actual_seq has same elements as expected_seq. Args: actual_seq: sequence Returns: bool
[ "Check", "to", "see", "whether", "actual_seq", "has", "same", "elements", "as", "expected_seq", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/mox.py#L1021-L1040
28,775
apple/turicreate
src/external/coremltools_wrap/coremltools/deps/protobuf/python/mox.py
Or.equals
def equals(self, rhs): """Checks whether any Comparator is equal to rhs. Args: # rhs: can be anything Returns: bool """ for comparator in self._comparators: if comparator.equals(rhs): return True return False
python
def equals(self, rhs): """Checks whether any Comparator is equal to rhs. Args: # rhs: can be anything Returns: bool """ for comparator in self._comparators: if comparator.equals(rhs): return True return False
[ "def", "equals", "(", "self", ",", "rhs", ")", ":", "for", "comparator", "in", "self", ".", "_comparators", ":", "if", "comparator", ".", "equals", "(", "rhs", ")", ":", "return", "True", "return", "False" ]
Checks whether any Comparator is equal to rhs. Args: # rhs: can be anything Returns: bool
[ "Checks", "whether", "any", "Comparator", "is", "equal", "to", "rhs", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/mox.py#L1092-L1106
28,776
apple/turicreate
src/external/coremltools_wrap/coremltools/deps/protobuf/python/mox.py
MultipleTimesGroup.IsSatisfied
def IsSatisfied(self): """Return True if all methods in this group are called at least once.""" # NOTE(psycho): We can't use the simple set difference here because we want # to match different parameters which are considered the same e.g. IsA(str) # and some string. This solution is O(n^2) but n should be small. tmp = self._methods.copy() for called in self._methods_called: for expected in tmp: if called == expected: tmp.remove(expected) if not tmp: return True break return False
python
def IsSatisfied(self): """Return True if all methods in this group are called at least once.""" # NOTE(psycho): We can't use the simple set difference here because we want # to match different parameters which are considered the same e.g. IsA(str) # and some string. This solution is O(n^2) but n should be small. tmp = self._methods.copy() for called in self._methods_called: for expected in tmp: if called == expected: tmp.remove(expected) if not tmp: return True break return False
[ "def", "IsSatisfied", "(", "self", ")", ":", "# NOTE(psycho): We can't use the simple set difference here because we want", "# to match different parameters which are considered the same e.g. IsA(str)", "# and some string. This solution is O(n^2) but n should be small.", "tmp", "=", "self", ".", "_methods", ".", "copy", "(", ")", "for", "called", "in", "self", ".", "_methods_called", ":", "for", "expected", "in", "tmp", ":", "if", "called", "==", "expected", ":", "tmp", ".", "remove", "(", "expected", ")", "if", "not", "tmp", ":", "return", "True", "break", "return", "False" ]
Return True if all methods in this group are called at least once.
[ "Return", "True", "if", "all", "methods", "in", "this", "group", "are", "called", "at", "least", "once", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/mox.py#L1318-L1331
28,777
apple/turicreate
src/external/coremltools_wrap/coremltools/coremltools/models/_interface_management.py
set_classifier_interface_params
def set_classifier_interface_params(spec, features, class_labels, model_accessor_for_class_labels, output_features = None): """ Common utilities to set the regression interface params. """ # Normalize the features list. features = _fm.process_or_validate_features(features) if class_labels is None: raise ValueError("List of class labels must be provided.") n_classes = len(class_labels) output_features = _fm.process_or_validate_classifier_output_features(output_features, class_labels) if len(output_features) == 1: predicted_class_output, pred_cl_type = output_features[0] score_output = None elif len(output_features) == 2: predicted_class_output, pred_cl_type = output_features[0] score_output, score_output_type = output_features[1] else: raise ValueError("Provided output classes for a classifier must be " "a list of features, predicted class and (optionally) class_score.") spec.description.predictedFeatureName = predicted_class_output # Are they out of order? if not (pred_cl_type == datatypes.Int64() or pred_cl_type == datatypes.String()): raise ValueError("Provided predicted class output type not Int64 or String (%s)." % repr(pred_cl_type)) if score_output is not None: if not isinstance(score_output_type, datatypes.Dictionary): raise ValueError("Provided class score output type not a Dictionary (%s)." % repr(score_output_type)) if score_output_type.key_type != pred_cl_type: raise ValueError(("Provided class score output (%s) key_type (%s) does not " "match type of class prediction (%s).") % (score_output, repr(score_output_type.key_type), repr(pred_cl_type))) spec.description.predictedProbabilitiesName = score_output # add input for index, (cur_input_name, input_type) in enumerate(features): input_ = spec.description.input.add() input_.name = cur_input_name datatypes._set_datatype(input_.type, input_type) # add output for index, (cur_output_name, output_type) in enumerate(output_features): output_ = spec.description.output.add() output_.name = cur_output_name datatypes._set_datatype(output_.type, output_type) # Worry about the class labels if pred_cl_type == datatypes.String(): try: for c in class_labels: getattr(spec, model_accessor_for_class_labels).stringClassLabels.vector.append(str(c)) # Not all the classifiers have class labels; in particular the pipeline # classifier. Thus it's not an error if we can't actually set them. except AttributeError: pass else: for c in class_labels: conv_error = False try: if not (int(c) == c): conv_error = True except: conv_error = True if conv_error: raise TypeError(("Cannot cast '%s' class to an int type " % str(c)) + "(class type determined by type of first class).") try: getattr(spec, model_accessor_for_class_labels).int64ClassLabels.vector.append(int(c)) # Not all the classifiers have class labels; in particular the pipeline # classifier. Thus it's not an error if we can't actually set them. except AttributeError: break # And we are done! return spec
python
def set_classifier_interface_params(spec, features, class_labels, model_accessor_for_class_labels, output_features = None): """ Common utilities to set the regression interface params. """ # Normalize the features list. features = _fm.process_or_validate_features(features) if class_labels is None: raise ValueError("List of class labels must be provided.") n_classes = len(class_labels) output_features = _fm.process_or_validate_classifier_output_features(output_features, class_labels) if len(output_features) == 1: predicted_class_output, pred_cl_type = output_features[0] score_output = None elif len(output_features) == 2: predicted_class_output, pred_cl_type = output_features[0] score_output, score_output_type = output_features[1] else: raise ValueError("Provided output classes for a classifier must be " "a list of features, predicted class and (optionally) class_score.") spec.description.predictedFeatureName = predicted_class_output # Are they out of order? if not (pred_cl_type == datatypes.Int64() or pred_cl_type == datatypes.String()): raise ValueError("Provided predicted class output type not Int64 or String (%s)." % repr(pred_cl_type)) if score_output is not None: if not isinstance(score_output_type, datatypes.Dictionary): raise ValueError("Provided class score output type not a Dictionary (%s)." % repr(score_output_type)) if score_output_type.key_type != pred_cl_type: raise ValueError(("Provided class score output (%s) key_type (%s) does not " "match type of class prediction (%s).") % (score_output, repr(score_output_type.key_type), repr(pred_cl_type))) spec.description.predictedProbabilitiesName = score_output # add input for index, (cur_input_name, input_type) in enumerate(features): input_ = spec.description.input.add() input_.name = cur_input_name datatypes._set_datatype(input_.type, input_type) # add output for index, (cur_output_name, output_type) in enumerate(output_features): output_ = spec.description.output.add() output_.name = cur_output_name datatypes._set_datatype(output_.type, output_type) # Worry about the class labels if pred_cl_type == datatypes.String(): try: for c in class_labels: getattr(spec, model_accessor_for_class_labels).stringClassLabels.vector.append(str(c)) # Not all the classifiers have class labels; in particular the pipeline # classifier. Thus it's not an error if we can't actually set them. except AttributeError: pass else: for c in class_labels: conv_error = False try: if not (int(c) == c): conv_error = True except: conv_error = True if conv_error: raise TypeError(("Cannot cast '%s' class to an int type " % str(c)) + "(class type determined by type of first class).") try: getattr(spec, model_accessor_for_class_labels).int64ClassLabels.vector.append(int(c)) # Not all the classifiers have class labels; in particular the pipeline # classifier. Thus it's not an error if we can't actually set them. except AttributeError: break # And we are done! return spec
[ "def", "set_classifier_interface_params", "(", "spec", ",", "features", ",", "class_labels", ",", "model_accessor_for_class_labels", ",", "output_features", "=", "None", ")", ":", "# Normalize the features list.", "features", "=", "_fm", ".", "process_or_validate_features", "(", "features", ")", "if", "class_labels", "is", "None", ":", "raise", "ValueError", "(", "\"List of class labels must be provided.\"", ")", "n_classes", "=", "len", "(", "class_labels", ")", "output_features", "=", "_fm", ".", "process_or_validate_classifier_output_features", "(", "output_features", ",", "class_labels", ")", "if", "len", "(", "output_features", ")", "==", "1", ":", "predicted_class_output", ",", "pred_cl_type", "=", "output_features", "[", "0", "]", "score_output", "=", "None", "elif", "len", "(", "output_features", ")", "==", "2", ":", "predicted_class_output", ",", "pred_cl_type", "=", "output_features", "[", "0", "]", "score_output", ",", "score_output_type", "=", "output_features", "[", "1", "]", "else", ":", "raise", "ValueError", "(", "\"Provided output classes for a classifier must be \"", "\"a list of features, predicted class and (optionally) class_score.\"", ")", "spec", ".", "description", ".", "predictedFeatureName", "=", "predicted_class_output", "# Are they out of order?", "if", "not", "(", "pred_cl_type", "==", "datatypes", ".", "Int64", "(", ")", "or", "pred_cl_type", "==", "datatypes", ".", "String", "(", ")", ")", ":", "raise", "ValueError", "(", "\"Provided predicted class output type not Int64 or String (%s).\"", "%", "repr", "(", "pred_cl_type", ")", ")", "if", "score_output", "is", "not", "None", ":", "if", "not", "isinstance", "(", "score_output_type", ",", "datatypes", ".", "Dictionary", ")", ":", "raise", "ValueError", "(", "\"Provided class score output type not a Dictionary (%s).\"", "%", "repr", "(", "score_output_type", ")", ")", "if", "score_output_type", ".", "key_type", "!=", "pred_cl_type", ":", "raise", "ValueError", "(", "(", "\"Provided class score output (%s) key_type (%s) does not \"", "\"match type of class prediction (%s).\"", ")", "%", "(", "score_output", ",", "repr", "(", "score_output_type", ".", "key_type", ")", ",", "repr", "(", "pred_cl_type", ")", ")", ")", "spec", ".", "description", ".", "predictedProbabilitiesName", "=", "score_output", "# add input", "for", "index", ",", "(", "cur_input_name", ",", "input_type", ")", "in", "enumerate", "(", "features", ")", ":", "input_", "=", "spec", ".", "description", ".", "input", ".", "add", "(", ")", "input_", ".", "name", "=", "cur_input_name", "datatypes", ".", "_set_datatype", "(", "input_", ".", "type", ",", "input_type", ")", "# add output", "for", "index", ",", "(", "cur_output_name", ",", "output_type", ")", "in", "enumerate", "(", "output_features", ")", ":", "output_", "=", "spec", ".", "description", ".", "output", ".", "add", "(", ")", "output_", ".", "name", "=", "cur_output_name", "datatypes", ".", "_set_datatype", "(", "output_", ".", "type", ",", "output_type", ")", "# Worry about the class labels", "if", "pred_cl_type", "==", "datatypes", ".", "String", "(", ")", ":", "try", ":", "for", "c", "in", "class_labels", ":", "getattr", "(", "spec", ",", "model_accessor_for_class_labels", ")", ".", "stringClassLabels", ".", "vector", ".", "append", "(", "str", "(", "c", ")", ")", "# Not all the classifiers have class labels; in particular the pipeline", "# classifier. Thus it's not an error if we can't actually set them.", "except", "AttributeError", ":", "pass", "else", ":", "for", "c", "in", "class_labels", ":", "conv_error", "=", "False", "try", ":", "if", "not", "(", "int", "(", "c", ")", "==", "c", ")", ":", "conv_error", "=", "True", "except", ":", "conv_error", "=", "True", "if", "conv_error", ":", "raise", "TypeError", "(", "(", "\"Cannot cast '%s' class to an int type \"", "%", "str", "(", "c", ")", ")", "+", "\"(class type determined by type of first class).\"", ")", "try", ":", "getattr", "(", "spec", ",", "model_accessor_for_class_labels", ")", ".", "int64ClassLabels", ".", "vector", ".", "append", "(", "int", "(", "c", ")", ")", "# Not all the classifiers have class labels; in particular the pipeline", "# classifier. Thus it's not an error if we can't actually set them.", "except", "AttributeError", ":", "break", "# And we are done!", "return", "spec" ]
Common utilities to set the regression interface params.
[ "Common", "utilities", "to", "set", "the", "regression", "interface", "params", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/_interface_management.py#L13-L100
28,778
apple/turicreate
src/external/coremltools_wrap/coremltools/coremltools/models/_interface_management.py
set_regressor_interface_params
def set_regressor_interface_params(spec, features, output_features): """ Common utilities to set the regressor interface params. """ if output_features is None: output_features = [("predicted_class", datatypes.Double())] else: output_features = _fm.process_or_validate_features(output_features, 1) if len(output_features) != 1: raise ValueError("Provided output features for a regressor must be " "one Double feature.") if output_features[0][1] != datatypes.Double(): raise ValueError("Output type of a regressor must be a Double.") prediction_name = output_features[0][0] spec.description.predictedFeatureName = prediction_name # Normalize the features list. features = _fm.process_or_validate_features(features) # add input and output features for cur_input_name, feature_type in features: input_ = spec.description.input.add() input_.name = cur_input_name datatypes._set_datatype(input_.type, feature_type) output_ = spec.description.output.add() output_.name = prediction_name datatypes._set_datatype(output_.type, 'Double') return spec
python
def set_regressor_interface_params(spec, features, output_features): """ Common utilities to set the regressor interface params. """ if output_features is None: output_features = [("predicted_class", datatypes.Double())] else: output_features = _fm.process_or_validate_features(output_features, 1) if len(output_features) != 1: raise ValueError("Provided output features for a regressor must be " "one Double feature.") if output_features[0][1] != datatypes.Double(): raise ValueError("Output type of a regressor must be a Double.") prediction_name = output_features[0][0] spec.description.predictedFeatureName = prediction_name # Normalize the features list. features = _fm.process_or_validate_features(features) # add input and output features for cur_input_name, feature_type in features: input_ = spec.description.input.add() input_.name = cur_input_name datatypes._set_datatype(input_.type, feature_type) output_ = spec.description.output.add() output_.name = prediction_name datatypes._set_datatype(output_.type, 'Double') return spec
[ "def", "set_regressor_interface_params", "(", "spec", ",", "features", ",", "output_features", ")", ":", "if", "output_features", "is", "None", ":", "output_features", "=", "[", "(", "\"predicted_class\"", ",", "datatypes", ".", "Double", "(", ")", ")", "]", "else", ":", "output_features", "=", "_fm", ".", "process_or_validate_features", "(", "output_features", ",", "1", ")", "if", "len", "(", "output_features", ")", "!=", "1", ":", "raise", "ValueError", "(", "\"Provided output features for a regressor must be \"", "\"one Double feature.\"", ")", "if", "output_features", "[", "0", "]", "[", "1", "]", "!=", "datatypes", ".", "Double", "(", ")", ":", "raise", "ValueError", "(", "\"Output type of a regressor must be a Double.\"", ")", "prediction_name", "=", "output_features", "[", "0", "]", "[", "0", "]", "spec", ".", "description", ".", "predictedFeatureName", "=", "prediction_name", "# Normalize the features list.", "features", "=", "_fm", ".", "process_or_validate_features", "(", "features", ")", "# add input and output features", "for", "cur_input_name", ",", "feature_type", "in", "features", ":", "input_", "=", "spec", ".", "description", ".", "input", ".", "add", "(", ")", "input_", ".", "name", "=", "cur_input_name", "datatypes", ".", "_set_datatype", "(", "input_", ".", "type", ",", "feature_type", ")", "output_", "=", "spec", ".", "description", ".", "output", ".", "add", "(", ")", "output_", ".", "name", "=", "prediction_name", "datatypes", ".", "_set_datatype", "(", "output_", ".", "type", ",", "'Double'", ")", "return", "spec" ]
Common utilities to set the regressor interface params.
[ "Common", "utilities", "to", "set", "the", "regressor", "interface", "params", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/_interface_management.py#L102-L132
28,779
apple/turicreate
src/external/coremltools_wrap/coremltools/coremltools/models/_interface_management.py
set_transform_interface_params
def set_transform_interface_params(spec, input_features, output_features, are_optional = False): """ Common utilities to set transform interface params. """ input_features = _fm.process_or_validate_features(input_features) output_features = _fm.process_or_validate_features(output_features) # Add input and output features for (fname, ftype) in input_features: input_ = spec.description.input.add() input_.name = fname datatypes._set_datatype(input_.type, ftype) if are_optional: input_.type.isOptional = are_optional for (fname, ftype) in output_features: output_ = spec.description.output.add() output_.name = fname datatypes._set_datatype(output_.type, ftype) return spec
python
def set_transform_interface_params(spec, input_features, output_features, are_optional = False): """ Common utilities to set transform interface params. """ input_features = _fm.process_or_validate_features(input_features) output_features = _fm.process_or_validate_features(output_features) # Add input and output features for (fname, ftype) in input_features: input_ = spec.description.input.add() input_.name = fname datatypes._set_datatype(input_.type, ftype) if are_optional: input_.type.isOptional = are_optional for (fname, ftype) in output_features: output_ = spec.description.output.add() output_.name = fname datatypes._set_datatype(output_.type, ftype) return spec
[ "def", "set_transform_interface_params", "(", "spec", ",", "input_features", ",", "output_features", ",", "are_optional", "=", "False", ")", ":", "input_features", "=", "_fm", ".", "process_or_validate_features", "(", "input_features", ")", "output_features", "=", "_fm", ".", "process_or_validate_features", "(", "output_features", ")", "# Add input and output features", "for", "(", "fname", ",", "ftype", ")", "in", "input_features", ":", "input_", "=", "spec", ".", "description", ".", "input", ".", "add", "(", ")", "input_", ".", "name", "=", "fname", "datatypes", ".", "_set_datatype", "(", "input_", ".", "type", ",", "ftype", ")", "if", "are_optional", ":", "input_", ".", "type", ".", "isOptional", "=", "are_optional", "for", "(", "fname", ",", "ftype", ")", "in", "output_features", ":", "output_", "=", "spec", ".", "description", ".", "output", ".", "add", "(", ")", "output_", ".", "name", "=", "fname", "datatypes", ".", "_set_datatype", "(", "output_", ".", "type", ",", "ftype", ")", "return", "spec" ]
Common utilities to set transform interface params.
[ "Common", "utilities", "to", "set", "transform", "interface", "params", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/_interface_management.py#L134-L153
28,780
apple/turicreate
src/unity/python/turicreate/toolkits/activity_classifier/_sframe_sequence_iterator.py
_load_into_numpy
def _load_into_numpy(sf, np_array, start, end, strides=None, shape=None): """Loads into numpy array from SFrame, assuming SFrame stores data flattened""" np_array[:] = 0.0 np_array_2d = np_array.reshape((np_array.shape[0], np_array.shape[1] * np_array.shape[2])) _extensions.sframe_load_to_numpy(sf, np_array.ctypes.data, np_array_2d.strides, np_array_2d.shape, start, end)
python
def _load_into_numpy(sf, np_array, start, end, strides=None, shape=None): """Loads into numpy array from SFrame, assuming SFrame stores data flattened""" np_array[:] = 0.0 np_array_2d = np_array.reshape((np_array.shape[0], np_array.shape[1] * np_array.shape[2])) _extensions.sframe_load_to_numpy(sf, np_array.ctypes.data, np_array_2d.strides, np_array_2d.shape, start, end)
[ "def", "_load_into_numpy", "(", "sf", ",", "np_array", ",", "start", ",", "end", ",", "strides", "=", "None", ",", "shape", "=", "None", ")", ":", "np_array", "[", ":", "]", "=", "0.0", "np_array_2d", "=", "np_array", ".", "reshape", "(", "(", "np_array", ".", "shape", "[", "0", "]", ",", "np_array", ".", "shape", "[", "1", "]", "*", "np_array", ".", "shape", "[", "2", "]", ")", ")", "_extensions", ".", "sframe_load_to_numpy", "(", "sf", ",", "np_array", ".", "ctypes", ".", "data", ",", "np_array_2d", ".", "strides", ",", "np_array_2d", ".", "shape", ",", "start", ",", "end", ")" ]
Loads into numpy array from SFrame, assuming SFrame stores data flattened
[ "Loads", "into", "numpy", "array", "from", "SFrame", "assuming", "SFrame", "stores", "data", "flattened" ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/activity_classifier/_sframe_sequence_iterator.py#L49-L55
28,781
apple/turicreate
src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py
NeuralNetworkBuilder.set_input
def set_input(self, input_names, input_dims): """ Set the inputs of the network spec. Parameters ---------- input_names: [str] List of input names of the network. input_dims: [tuple] List of input dimensions of the network. The ordering of input_dims is the same as input_names. Examples -------- .. sourcecode:: python # Set the neural network spec inputs to be 3 dimensional vector data1 and # 4 dimensional vector data2. >>> builder.set_input(input_names = ['data1', 'data2'], [(3,), (4,)]) See Also -------- set_output, set_class_labels """ spec = self.spec nn_spec = self.nn_spec for idx, dim in enumerate(input_dims): if len(dim) == 3: input_shape = (dim[0], dim[1], dim[2]) elif len(dim) == 2: input_shape = (dim[1], ) elif len(dim) == 1: input_shape = tuple(dim) else: raise RuntimeError("Attempting to add a neural network " + "input with rank " + str(len(dim)) + ". All networks should take inputs of rank 1 or 3.") spec.description.input[idx].type.multiArrayType.ClearField("shape") spec.description.input[idx].type.multiArrayType.shape.extend(input_shape) # TODO: if it's an embedding, this should be integer spec.description.input[idx].type.multiArrayType.dataType = _Model_pb2.ArrayFeatureType.DOUBLE
python
def set_input(self, input_names, input_dims): """ Set the inputs of the network spec. Parameters ---------- input_names: [str] List of input names of the network. input_dims: [tuple] List of input dimensions of the network. The ordering of input_dims is the same as input_names. Examples -------- .. sourcecode:: python # Set the neural network spec inputs to be 3 dimensional vector data1 and # 4 dimensional vector data2. >>> builder.set_input(input_names = ['data1', 'data2'], [(3,), (4,)]) See Also -------- set_output, set_class_labels """ spec = self.spec nn_spec = self.nn_spec for idx, dim in enumerate(input_dims): if len(dim) == 3: input_shape = (dim[0], dim[1], dim[2]) elif len(dim) == 2: input_shape = (dim[1], ) elif len(dim) == 1: input_shape = tuple(dim) else: raise RuntimeError("Attempting to add a neural network " + "input with rank " + str(len(dim)) + ". All networks should take inputs of rank 1 or 3.") spec.description.input[idx].type.multiArrayType.ClearField("shape") spec.description.input[idx].type.multiArrayType.shape.extend(input_shape) # TODO: if it's an embedding, this should be integer spec.description.input[idx].type.multiArrayType.dataType = _Model_pb2.ArrayFeatureType.DOUBLE
[ "def", "set_input", "(", "self", ",", "input_names", ",", "input_dims", ")", ":", "spec", "=", "self", ".", "spec", "nn_spec", "=", "self", ".", "nn_spec", "for", "idx", ",", "dim", "in", "enumerate", "(", "input_dims", ")", ":", "if", "len", "(", "dim", ")", "==", "3", ":", "input_shape", "=", "(", "dim", "[", "0", "]", ",", "dim", "[", "1", "]", ",", "dim", "[", "2", "]", ")", "elif", "len", "(", "dim", ")", "==", "2", ":", "input_shape", "=", "(", "dim", "[", "1", "]", ",", ")", "elif", "len", "(", "dim", ")", "==", "1", ":", "input_shape", "=", "tuple", "(", "dim", ")", "else", ":", "raise", "RuntimeError", "(", "\"Attempting to add a neural network \"", "+", "\"input with rank \"", "+", "str", "(", "len", "(", "dim", ")", ")", "+", "\". All networks should take inputs of rank 1 or 3.\"", ")", "spec", ".", "description", ".", "input", "[", "idx", "]", ".", "type", ".", "multiArrayType", ".", "ClearField", "(", "\"shape\"", ")", "spec", ".", "description", ".", "input", "[", "idx", "]", ".", "type", ".", "multiArrayType", ".", "shape", ".", "extend", "(", "input_shape", ")", "# TODO: if it's an embedding, this should be integer", "spec", ".", "description", ".", "input", "[", "idx", "]", ".", "type", ".", "multiArrayType", ".", "dataType", "=", "_Model_pb2", ".", "ArrayFeatureType", ".", "DOUBLE" ]
Set the inputs of the network spec. Parameters ---------- input_names: [str] List of input names of the network. input_dims: [tuple] List of input dimensions of the network. The ordering of input_dims is the same as input_names. Examples -------- .. sourcecode:: python # Set the neural network spec inputs to be 3 dimensional vector data1 and # 4 dimensional vector data2. >>> builder.set_input(input_names = ['data1', 'data2'], [(3,), (4,)]) See Also -------- set_output, set_class_labels
[ "Set", "the", "inputs", "of", "the", "network", "spec", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py#L159-L202
28,782
apple/turicreate
src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py
NeuralNetworkBuilder.set_output
def set_output(self, output_names, output_dims): """ Set the outputs of the network spec. Parameters ---------- output_names: [str] List of output names of the network. output_dims: [tuple] List of output dimensions of the network. The ordering of output_dims is the same as output_names. Examples -------- .. sourcecode:: python # Set the neural network spec outputs to be 3 dimensional vector feature1 and # 4 dimensional vector feature2. >>> builder.set_output(output_names = ['feature1', 'feature2'], [(3,), (4,)]) See Also -------- set_input, set_class_labels """ spec = self.spec nn_spec = self.nn_spec for idx, dim in enumerate(output_dims): spec.description.output[idx].type.multiArrayType.ClearField("shape") spec.description.output[idx].type.multiArrayType.shape.extend(dim) spec.description.output[idx].type.multiArrayType.dataType = \ _Model_pb2.ArrayFeatureType.DOUBLE
python
def set_output(self, output_names, output_dims): """ Set the outputs of the network spec. Parameters ---------- output_names: [str] List of output names of the network. output_dims: [tuple] List of output dimensions of the network. The ordering of output_dims is the same as output_names. Examples -------- .. sourcecode:: python # Set the neural network spec outputs to be 3 dimensional vector feature1 and # 4 dimensional vector feature2. >>> builder.set_output(output_names = ['feature1', 'feature2'], [(3,), (4,)]) See Also -------- set_input, set_class_labels """ spec = self.spec nn_spec = self.nn_spec for idx, dim in enumerate(output_dims): spec.description.output[idx].type.multiArrayType.ClearField("shape") spec.description.output[idx].type.multiArrayType.shape.extend(dim) spec.description.output[idx].type.multiArrayType.dataType = \ _Model_pb2.ArrayFeatureType.DOUBLE
[ "def", "set_output", "(", "self", ",", "output_names", ",", "output_dims", ")", ":", "spec", "=", "self", ".", "spec", "nn_spec", "=", "self", ".", "nn_spec", "for", "idx", ",", "dim", "in", "enumerate", "(", "output_dims", ")", ":", "spec", ".", "description", ".", "output", "[", "idx", "]", ".", "type", ".", "multiArrayType", ".", "ClearField", "(", "\"shape\"", ")", "spec", ".", "description", ".", "output", "[", "idx", "]", ".", "type", ".", "multiArrayType", ".", "shape", ".", "extend", "(", "dim", ")", "spec", ".", "description", ".", "output", "[", "idx", "]", ".", "type", ".", "multiArrayType", ".", "dataType", "=", "_Model_pb2", ".", "ArrayFeatureType", ".", "DOUBLE" ]
Set the outputs of the network spec. Parameters ---------- output_names: [str] List of output names of the network. output_dims: [tuple] List of output dimensions of the network. The ordering of output_dims is the same as output_names. Examples -------- .. sourcecode:: python # Set the neural network spec outputs to be 3 dimensional vector feature1 and # 4 dimensional vector feature2. >>> builder.set_output(output_names = ['feature1', 'feature2'], [(3,), (4,)]) See Also -------- set_input, set_class_labels
[ "Set", "the", "outputs", "of", "the", "network", "spec", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py#L204-L235
28,783
apple/turicreate
src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py
NeuralNetworkBuilder.set_class_labels
def set_class_labels(self, class_labels, predicted_feature_name = 'classLabel', prediction_blob = ''): """ Set class labels to the model spec to make it a neural network classifier. Parameters ---------- class_labels: list[int or str] A list of integers or strings that map the index of the output of a neural network to labels in a classifier. predicted_feature_name: str Name of the output feature for the class labels exposed in the Core ML neural network classifier. Defaults to 'class_output'. prediction_blob: str If provided, then this is the name of the neural network blob which generates the probabilities for each class label (typically the output of a softmax layer). If not provided, then the last output layer is assumed. See Also -------- set_input, set_output, set_pre_processing_parameters """ spec = self.spec nn_spec = self.nn_spec if len(spec.description.output) == 0: raise ValueError( "Model should have at least one output (the probabilities) to automatically make it a classifier.") probOutput = spec.description.output[0] probOutput.type.dictionaryType.MergeFromString(b'') if len(class_labels) == 0: return class_type = type(class_labels[0]) if class_type not in [int, str]: raise TypeError("Class labels must be of type Integer or String. (not %s)" % class_type) spec.description.predictedProbabilitiesName = probOutput.name spec.description.predictedFeatureName = predicted_feature_name classLabel = spec.description.output.add() classLabel.name = predicted_feature_name if class_type == int: nn_spec.ClearField('int64ClassLabels') probOutput.type.dictionaryType.int64KeyType.MergeFromString(b'') classLabel.type.int64Type.MergeFromString(b'') for c in class_labels: nn_spec.int64ClassLabels.vector.append(c) else: nn_spec.ClearField('stringClassLabels') probOutput.type.dictionaryType.stringKeyType.MergeFromString(b'') classLabel.type.stringType.MergeFromString(b'') for c in class_labels: nn_spec.stringClassLabels.vector.append(c) if prediction_blob != '': # correctness here will be checked in the validator -- i.e. to # make sure this string corresponds to a real blob nn_spec.labelProbabilityLayerName = prediction_blob else: #not provided # assume it's the last blob produced in the network nn_spec.labelProbabilityLayerName = nn_spec.layers[-1].output[0]
python
def set_class_labels(self, class_labels, predicted_feature_name = 'classLabel', prediction_blob = ''): """ Set class labels to the model spec to make it a neural network classifier. Parameters ---------- class_labels: list[int or str] A list of integers or strings that map the index of the output of a neural network to labels in a classifier. predicted_feature_name: str Name of the output feature for the class labels exposed in the Core ML neural network classifier. Defaults to 'class_output'. prediction_blob: str If provided, then this is the name of the neural network blob which generates the probabilities for each class label (typically the output of a softmax layer). If not provided, then the last output layer is assumed. See Also -------- set_input, set_output, set_pre_processing_parameters """ spec = self.spec nn_spec = self.nn_spec if len(spec.description.output) == 0: raise ValueError( "Model should have at least one output (the probabilities) to automatically make it a classifier.") probOutput = spec.description.output[0] probOutput.type.dictionaryType.MergeFromString(b'') if len(class_labels) == 0: return class_type = type(class_labels[0]) if class_type not in [int, str]: raise TypeError("Class labels must be of type Integer or String. (not %s)" % class_type) spec.description.predictedProbabilitiesName = probOutput.name spec.description.predictedFeatureName = predicted_feature_name classLabel = spec.description.output.add() classLabel.name = predicted_feature_name if class_type == int: nn_spec.ClearField('int64ClassLabels') probOutput.type.dictionaryType.int64KeyType.MergeFromString(b'') classLabel.type.int64Type.MergeFromString(b'') for c in class_labels: nn_spec.int64ClassLabels.vector.append(c) else: nn_spec.ClearField('stringClassLabels') probOutput.type.dictionaryType.stringKeyType.MergeFromString(b'') classLabel.type.stringType.MergeFromString(b'') for c in class_labels: nn_spec.stringClassLabels.vector.append(c) if prediction_blob != '': # correctness here will be checked in the validator -- i.e. to # make sure this string corresponds to a real blob nn_spec.labelProbabilityLayerName = prediction_blob else: #not provided # assume it's the last blob produced in the network nn_spec.labelProbabilityLayerName = nn_spec.layers[-1].output[0]
[ "def", "set_class_labels", "(", "self", ",", "class_labels", ",", "predicted_feature_name", "=", "'classLabel'", ",", "prediction_blob", "=", "''", ")", ":", "spec", "=", "self", ".", "spec", "nn_spec", "=", "self", ".", "nn_spec", "if", "len", "(", "spec", ".", "description", ".", "output", ")", "==", "0", ":", "raise", "ValueError", "(", "\"Model should have at least one output (the probabilities) to automatically make it a classifier.\"", ")", "probOutput", "=", "spec", ".", "description", ".", "output", "[", "0", "]", "probOutput", ".", "type", ".", "dictionaryType", ".", "MergeFromString", "(", "b''", ")", "if", "len", "(", "class_labels", ")", "==", "0", ":", "return", "class_type", "=", "type", "(", "class_labels", "[", "0", "]", ")", "if", "class_type", "not", "in", "[", "int", ",", "str", "]", ":", "raise", "TypeError", "(", "\"Class labels must be of type Integer or String. (not %s)\"", "%", "class_type", ")", "spec", ".", "description", ".", "predictedProbabilitiesName", "=", "probOutput", ".", "name", "spec", ".", "description", ".", "predictedFeatureName", "=", "predicted_feature_name", "classLabel", "=", "spec", ".", "description", ".", "output", ".", "add", "(", ")", "classLabel", ".", "name", "=", "predicted_feature_name", "if", "class_type", "==", "int", ":", "nn_spec", ".", "ClearField", "(", "'int64ClassLabels'", ")", "probOutput", ".", "type", ".", "dictionaryType", ".", "int64KeyType", ".", "MergeFromString", "(", "b''", ")", "classLabel", ".", "type", ".", "int64Type", ".", "MergeFromString", "(", "b''", ")", "for", "c", "in", "class_labels", ":", "nn_spec", ".", "int64ClassLabels", ".", "vector", ".", "append", "(", "c", ")", "else", ":", "nn_spec", ".", "ClearField", "(", "'stringClassLabels'", ")", "probOutput", ".", "type", ".", "dictionaryType", ".", "stringKeyType", ".", "MergeFromString", "(", "b''", ")", "classLabel", ".", "type", ".", "stringType", ".", "MergeFromString", "(", "b''", ")", "for", "c", "in", "class_labels", ":", "nn_spec", ".", "stringClassLabels", ".", "vector", ".", "append", "(", "c", ")", "if", "prediction_blob", "!=", "''", ":", "# correctness here will be checked in the validator -- i.e. to", "# make sure this string corresponds to a real blob", "nn_spec", ".", "labelProbabilityLayerName", "=", "prediction_blob", "else", ":", "#not provided", "# assume it's the last blob produced in the network", "nn_spec", ".", "labelProbabilityLayerName", "=", "nn_spec", ".", "layers", "[", "-", "1", "]", ".", "output", "[", "0", "]" ]
Set class labels to the model spec to make it a neural network classifier. Parameters ---------- class_labels: list[int or str] A list of integers or strings that map the index of the output of a neural network to labels in a classifier. predicted_feature_name: str Name of the output feature for the class labels exposed in the Core ML neural network classifier. Defaults to 'class_output'. prediction_blob: str If provided, then this is the name of the neural network blob which generates the probabilities for each class label (typically the output of a softmax layer). If not provided, then the last output layer is assumed. See Also -------- set_input, set_output, set_pre_processing_parameters
[ "Set", "class", "labels", "to", "the", "model", "spec", "to", "make", "it", "a", "neural", "network", "classifier", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py#L237-L299
28,784
apple/turicreate
src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py
NeuralNetworkBuilder.add_optionals
def add_optionals(self, optionals_in, optionals_out): """ Add optional inputs and outputs to the model spec. Parameters ---------- optionals_in: [str] List of inputs that are optionals. optionals_out: [str] List of outputs that are optionals. See Also -------- set_input, set_output """ spec = self.spec if (not optionals_in) and (not optionals_out): return # assuming single sizes here input_types = [datatypes.Array(dim) for (name, dim) in optionals_in] output_types = [datatypes.Array(dim) for (name, dim) in optionals_out] input_names = [str(name) for (name, dim) in optionals_in] output_names = [str(name) for (name, dim) in optionals_out] input_features = list(zip(input_names, input_types)) output_features = list(zip(output_names, output_types)) len_before_in = len(spec.description.input) len_before_out = len(spec.description.output) # this appends to the existing model interface set_transform_interface_params(spec, input_features, output_features, True) # add types for any extra hidden inputs for idx in range(len_before_in, len(spec.description.input)): spec.description.input[idx].type.multiArrayType.dataType = _Model_pb2.ArrayFeatureType.DOUBLE for idx in range(len_before_out, len(spec.description.output)): spec.description.output[idx].type.multiArrayType.dataType = _Model_pb2.ArrayFeatureType.DOUBLE
python
def add_optionals(self, optionals_in, optionals_out): """ Add optional inputs and outputs to the model spec. Parameters ---------- optionals_in: [str] List of inputs that are optionals. optionals_out: [str] List of outputs that are optionals. See Also -------- set_input, set_output """ spec = self.spec if (not optionals_in) and (not optionals_out): return # assuming single sizes here input_types = [datatypes.Array(dim) for (name, dim) in optionals_in] output_types = [datatypes.Array(dim) for (name, dim) in optionals_out] input_names = [str(name) for (name, dim) in optionals_in] output_names = [str(name) for (name, dim) in optionals_out] input_features = list(zip(input_names, input_types)) output_features = list(zip(output_names, output_types)) len_before_in = len(spec.description.input) len_before_out = len(spec.description.output) # this appends to the existing model interface set_transform_interface_params(spec, input_features, output_features, True) # add types for any extra hidden inputs for idx in range(len_before_in, len(spec.description.input)): spec.description.input[idx].type.multiArrayType.dataType = _Model_pb2.ArrayFeatureType.DOUBLE for idx in range(len_before_out, len(spec.description.output)): spec.description.output[idx].type.multiArrayType.dataType = _Model_pb2.ArrayFeatureType.DOUBLE
[ "def", "add_optionals", "(", "self", ",", "optionals_in", ",", "optionals_out", ")", ":", "spec", "=", "self", ".", "spec", "if", "(", "not", "optionals_in", ")", "and", "(", "not", "optionals_out", ")", ":", "return", "# assuming single sizes here", "input_types", "=", "[", "datatypes", ".", "Array", "(", "dim", ")", "for", "(", "name", ",", "dim", ")", "in", "optionals_in", "]", "output_types", "=", "[", "datatypes", ".", "Array", "(", "dim", ")", "for", "(", "name", ",", "dim", ")", "in", "optionals_out", "]", "input_names", "=", "[", "str", "(", "name", ")", "for", "(", "name", ",", "dim", ")", "in", "optionals_in", "]", "output_names", "=", "[", "str", "(", "name", ")", "for", "(", "name", ",", "dim", ")", "in", "optionals_out", "]", "input_features", "=", "list", "(", "zip", "(", "input_names", ",", "input_types", ")", ")", "output_features", "=", "list", "(", "zip", "(", "output_names", ",", "output_types", ")", ")", "len_before_in", "=", "len", "(", "spec", ".", "description", ".", "input", ")", "len_before_out", "=", "len", "(", "spec", ".", "description", ".", "output", ")", "# this appends to the existing model interface", "set_transform_interface_params", "(", "spec", ",", "input_features", ",", "output_features", ",", "True", ")", "# add types for any extra hidden inputs", "for", "idx", "in", "range", "(", "len_before_in", ",", "len", "(", "spec", ".", "description", ".", "input", ")", ")", ":", "spec", ".", "description", ".", "input", "[", "idx", "]", ".", "type", ".", "multiArrayType", ".", "dataType", "=", "_Model_pb2", ".", "ArrayFeatureType", ".", "DOUBLE", "for", "idx", "in", "range", "(", "len_before_out", ",", "len", "(", "spec", ".", "description", ".", "output", ")", ")", ":", "spec", ".", "description", ".", "output", "[", "idx", "]", ".", "type", ".", "multiArrayType", ".", "dataType", "=", "_Model_pb2", ".", "ArrayFeatureType", ".", "DOUBLE" ]
Add optional inputs and outputs to the model spec. Parameters ---------- optionals_in: [str] List of inputs that are optionals. optionals_out: [str] List of outputs that are optionals. See Also -------- set_input, set_output
[ "Add", "optional", "inputs", "and", "outputs", "to", "the", "model", "spec", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py#L302-L343
28,785
apple/turicreate
src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py
NeuralNetworkBuilder.add_embedding
def add_embedding(self, name, W, b, input_dim, output_channels, has_bias, input_name, output_name): """ Add an embedding layer to the model. Parameters ---------- name: str The name of this layer W: numpy.array Weight matrix of shape (output_channels, input_dim). b: numpy.array Bias vector of shape (output_channels, ). input_dim: int Size of the vocabulary (1 + maximum integer index of the words). output_channels: int Number of output channels. has_bias: boolean Whether the bias vector of this layer is ignored in the spec. - If True, the bias vector of this layer is not ignored. - If False, the bias vector is ignored. input_name: str The input blob name of this layer. output_name: str The output blob name of this layer. See Also -------- add_inner_product """ spec = self.spec nn_spec = self.nn_spec # Add a new layer spec_layer = nn_spec.layers.add() spec_layer.name = name spec_layer.input.append(input_name) spec_layer.output.append(output_name) # Fill in the parameters spec_layer_params = spec_layer.embedding spec_layer_params.inputDim = input_dim spec_layer_params.outputChannels = output_channels spec_layer_params.hasBias = has_bias weights = spec_layer_params.weights weights.floatValue.extend(map(float, W.flatten())) if has_bias: bias = spec_layer_params.bias bias.floatValue.extend(map(float, b.flatten()))
python
def add_embedding(self, name, W, b, input_dim, output_channels, has_bias, input_name, output_name): """ Add an embedding layer to the model. Parameters ---------- name: str The name of this layer W: numpy.array Weight matrix of shape (output_channels, input_dim). b: numpy.array Bias vector of shape (output_channels, ). input_dim: int Size of the vocabulary (1 + maximum integer index of the words). output_channels: int Number of output channels. has_bias: boolean Whether the bias vector of this layer is ignored in the spec. - If True, the bias vector of this layer is not ignored. - If False, the bias vector is ignored. input_name: str The input blob name of this layer. output_name: str The output blob name of this layer. See Also -------- add_inner_product """ spec = self.spec nn_spec = self.nn_spec # Add a new layer spec_layer = nn_spec.layers.add() spec_layer.name = name spec_layer.input.append(input_name) spec_layer.output.append(output_name) # Fill in the parameters spec_layer_params = spec_layer.embedding spec_layer_params.inputDim = input_dim spec_layer_params.outputChannels = output_channels spec_layer_params.hasBias = has_bias weights = spec_layer_params.weights weights.floatValue.extend(map(float, W.flatten())) if has_bias: bias = spec_layer_params.bias bias.floatValue.extend(map(float, b.flatten()))
[ "def", "add_embedding", "(", "self", ",", "name", ",", "W", ",", "b", ",", "input_dim", ",", "output_channels", ",", "has_bias", ",", "input_name", ",", "output_name", ")", ":", "spec", "=", "self", ".", "spec", "nn_spec", "=", "self", ".", "nn_spec", "# Add a new layer", "spec_layer", "=", "nn_spec", ".", "layers", ".", "add", "(", ")", "spec_layer", ".", "name", "=", "name", "spec_layer", ".", "input", ".", "append", "(", "input_name", ")", "spec_layer", ".", "output", ".", "append", "(", "output_name", ")", "# Fill in the parameters", "spec_layer_params", "=", "spec_layer", ".", "embedding", "spec_layer_params", ".", "inputDim", "=", "input_dim", "spec_layer_params", ".", "outputChannels", "=", "output_channels", "spec_layer_params", ".", "hasBias", "=", "has_bias", "weights", "=", "spec_layer_params", ".", "weights", "weights", ".", "floatValue", ".", "extend", "(", "map", "(", "float", ",", "W", ".", "flatten", "(", ")", ")", ")", "if", "has_bias", ":", "bias", "=", "spec_layer_params", ".", "bias", "bias", ".", "floatValue", ".", "extend", "(", "map", "(", "float", ",", "b", ".", "flatten", "(", ")", ")", ")" ]
Add an embedding layer to the model. Parameters ---------- name: str The name of this layer W: numpy.array Weight matrix of shape (output_channels, input_dim). b: numpy.array Bias vector of shape (output_channels, ). input_dim: int Size of the vocabulary (1 + maximum integer index of the words). output_channels: int Number of output channels. has_bias: boolean Whether the bias vector of this layer is ignored in the spec. - If True, the bias vector of this layer is not ignored. - If False, the bias vector is ignored. input_name: str The input blob name of this layer. output_name: str The output blob name of this layer. See Also -------- add_inner_product
[ "Add", "an", "embedding", "layer", "to", "the", "model", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py#L400-L453
28,786
apple/turicreate
src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py
NeuralNetworkBuilder.add_softmax
def add_softmax(self, name, input_name, output_name): """ Add a softmax layer to the model. Parameters ---------- name: str The name of this layer. input_name: str The input blob name of this layer. output_name: str The output blob name of this layer. See Also -------- add_activation, add_inner_product, add_convolution """ spec = self.spec nn_spec = self.nn_spec # Add a new layer spec_layer = nn_spec.layers.add() spec_layer.name = name spec_layer.input.append(input_name) spec_layer.output.append(output_name) spec_layer_params = spec_layer.softmax.MergeFromString(b'')
python
def add_softmax(self, name, input_name, output_name): """ Add a softmax layer to the model. Parameters ---------- name: str The name of this layer. input_name: str The input blob name of this layer. output_name: str The output blob name of this layer. See Also -------- add_activation, add_inner_product, add_convolution """ spec = self.spec nn_spec = self.nn_spec # Add a new layer spec_layer = nn_spec.layers.add() spec_layer.name = name spec_layer.input.append(input_name) spec_layer.output.append(output_name) spec_layer_params = spec_layer.softmax.MergeFromString(b'')
[ "def", "add_softmax", "(", "self", ",", "name", ",", "input_name", ",", "output_name", ")", ":", "spec", "=", "self", ".", "spec", "nn_spec", "=", "self", ".", "nn_spec", "# Add a new layer", "spec_layer", "=", "nn_spec", ".", "layers", ".", "add", "(", ")", "spec_layer", ".", "name", "=", "name", "spec_layer", ".", "input", ".", "append", "(", "input_name", ")", "spec_layer", ".", "output", ".", "append", "(", "output_name", ")", "spec_layer_params", "=", "spec_layer", ".", "softmax", ".", "MergeFromString", "(", "b''", ")" ]
Add a softmax layer to the model. Parameters ---------- name: str The name of this layer. input_name: str The input blob name of this layer. output_name: str The output blob name of this layer. See Also -------- add_activation, add_inner_product, add_convolution
[ "Add", "a", "softmax", "layer", "to", "the", "model", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py#L456-L482
28,787
apple/turicreate
src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py
NeuralNetworkBuilder.add_activation
def add_activation(self, name, non_linearity, input_name, output_name, params=None): """ Add an activation layer to the model. Parameters ---------- name: str The name of this layer non_linearity: str The non_linearity (activation) function of this layer. It can be one of the following: - 'RELU': Rectified Linear Unit (ReLU) function. - 'SIGMOID': sigmoid function. - 'TANH': tanh function. - 'SCALED_TANH': scaled tanh function, defined as: `f(x) = alpha * tanh(beta * x)` where alpha and beta are constant scalars. - 'SOFTPLUS': softplus function. - 'SOFTSIGN': softsign function. - 'SIGMOID_HARD': hard sigmoid function, defined as: `f(x) = min(max(alpha * x + beta, -1), 1)` where alpha and beta are constant scalars. - 'LEAKYRELU': leaky relu function, defined as: `f(x) = (x >= 0) * x + (x < 0) * alpha * x` where alpha is a constant scalar. - 'PRELU': Parametric ReLU function, defined as: `f(x) = (x >= 0) * x + (x < 0) * alpha * x` where alpha is a multi-dimensional array of same size as x. - 'ELU': Exponential linear unit function, defined as: `f(x) = (x >= 0) * x + (x < 0) * (alpha * exp(x) - 1)` where alpha is a constant scalar. - 'PARAMETRICSOFTPLUS': Parametric softplus function, defined as: `f(x) = alpha * log(1 + exp(beta * x))` where alpha and beta are two multi-dimensional arrays of same size as x. - 'THRESHOLDEDRELU': Thresholded ReLU function, defined as: `f(x) = (x >= alpha) * x` where alpha is a constant scalar. - 'LINEAR': linear function. `f(x) = alpha * x + beta` input_name: str The input blob name of this layer. output_name: str The output blob name of this layer. params: [float] | [numpy.array] Parameters for the activation, depending on non_linearity. Kindly refer to NeuralNetwork.proto for details. - When non_linearity is one of ['RELU', 'SIGMOID', 'TANH', 'SCALED_TANH', 'SOFTPLUS', 'SOFTSIGN'], params is ignored. - When non_linearity is one of ['SCALED_TANH', 'SIGMOID_HARD', 'LINEAR'], param is a list of 2 floats [alpha, beta]. - When non_linearity is one of ['LEAKYRELU', 'ELU', 'THRESHOLDEDRELU'], param is a list of 1 float [alpha]. - When non_linearity is 'PRELU', param is a list of 1 numpy array [alpha]. The shape of alpha is (C,), where C is either the number of input channels or 1. When C = 1, same alpha is applied to all channels. - When non_linearity is 'PARAMETRICSOFTPLUS', param is a list of 2 numpy arrays [alpha, beta]. The shape of alpha and beta is (C, ), where C is either the number of input channels or 1. When C = 1, same alpha and beta are applied to all channels. See Also -------- add_convolution, add_softmax """ spec = self.spec nn_spec = self.nn_spec # Add a new layer spec_layer = nn_spec.layers.add() spec_layer.name = name spec_layer.input.append(input_name) spec_layer.output.append(output_name) spec_layer_params = spec_layer.activation # Fill in the parameters if non_linearity == 'RELU': spec_layer_params.ReLU.MergeFromString(b'') elif non_linearity == 'SIGMOID': spec_layer_params.sigmoid.MergeFromString(b'') elif non_linearity == 'TANH': spec_layer_params.tanh.MergeFromString(b'') elif non_linearity == 'SCALED_TANH': spec_layer_params.scaledTanh.MergeFromString(b'') if params is None: alpha, beta = (0.0, 0.0) else: alpha, beta = params[0], params[1] spec_layer_params.scaledTanh.alpha = alpha spec_layer_params.scaledTanh.beta = beta elif non_linearity == 'SOFTPLUS': spec_layer_params.softplus.MergeFromString(b'') elif non_linearity == 'SOFTSIGN': spec_layer_params.softsign.MergeFromString(b'') elif non_linearity == 'SIGMOID_HARD': if params is None: alpha, beta = (0.2, 0.5) else: alpha, beta = params[0], params[1] spec_layer_params.sigmoidHard.alpha = alpha spec_layer_params.sigmoidHard.beta = beta elif non_linearity == 'LEAKYRELU': if params is None: alpha = 0.3 else: alpha = params[0] spec_layer_params.leakyReLU.alpha = float(alpha) elif non_linearity == 'PRELU': # PReLU must provide an np array in params[0] spec_layer_params.PReLU.alpha.floatValue.extend(map(float, params.flatten())) elif non_linearity == 'ELU': # ELU must provide an alpha in params[0] spec_layer_params.ELU.alpha = float(params) elif non_linearity == 'PARAMETRICSOFTPLUS': # Parametric softplus must provide two np arrays for alpha and beta alphas, betas = (params[0], params[1]) # Weight alignment: Keras [H,W,C,F], Espresso [ spec_layer_params.parametricSoftplus.alpha.floatValue.extend(map(float, alphas.flatten())) spec_layer_params.parametricSoftplus.beta.floatValue.extend(map(float, betas.flatten())) elif non_linearity == 'THRESHOLDEDRELU': if params is None: theta = 1.0 else: theta = params spec_layer_params.thresholdedReLU.alpha = float(theta) elif non_linearity == 'LINEAR': if params is None: alpha, beta = (1.0, 0.0) else: alpha, beta = params[0], params[1] spec_layer_params.linear.alpha = alpha spec_layer_params.linear.beta = beta else: raise TypeError("Unknown activation type %s." %(non_linearity))
python
def add_activation(self, name, non_linearity, input_name, output_name, params=None): """ Add an activation layer to the model. Parameters ---------- name: str The name of this layer non_linearity: str The non_linearity (activation) function of this layer. It can be one of the following: - 'RELU': Rectified Linear Unit (ReLU) function. - 'SIGMOID': sigmoid function. - 'TANH': tanh function. - 'SCALED_TANH': scaled tanh function, defined as: `f(x) = alpha * tanh(beta * x)` where alpha and beta are constant scalars. - 'SOFTPLUS': softplus function. - 'SOFTSIGN': softsign function. - 'SIGMOID_HARD': hard sigmoid function, defined as: `f(x) = min(max(alpha * x + beta, -1), 1)` where alpha and beta are constant scalars. - 'LEAKYRELU': leaky relu function, defined as: `f(x) = (x >= 0) * x + (x < 0) * alpha * x` where alpha is a constant scalar. - 'PRELU': Parametric ReLU function, defined as: `f(x) = (x >= 0) * x + (x < 0) * alpha * x` where alpha is a multi-dimensional array of same size as x. - 'ELU': Exponential linear unit function, defined as: `f(x) = (x >= 0) * x + (x < 0) * (alpha * exp(x) - 1)` where alpha is a constant scalar. - 'PARAMETRICSOFTPLUS': Parametric softplus function, defined as: `f(x) = alpha * log(1 + exp(beta * x))` where alpha and beta are two multi-dimensional arrays of same size as x. - 'THRESHOLDEDRELU': Thresholded ReLU function, defined as: `f(x) = (x >= alpha) * x` where alpha is a constant scalar. - 'LINEAR': linear function. `f(x) = alpha * x + beta` input_name: str The input blob name of this layer. output_name: str The output blob name of this layer. params: [float] | [numpy.array] Parameters for the activation, depending on non_linearity. Kindly refer to NeuralNetwork.proto for details. - When non_linearity is one of ['RELU', 'SIGMOID', 'TANH', 'SCALED_TANH', 'SOFTPLUS', 'SOFTSIGN'], params is ignored. - When non_linearity is one of ['SCALED_TANH', 'SIGMOID_HARD', 'LINEAR'], param is a list of 2 floats [alpha, beta]. - When non_linearity is one of ['LEAKYRELU', 'ELU', 'THRESHOLDEDRELU'], param is a list of 1 float [alpha]. - When non_linearity is 'PRELU', param is a list of 1 numpy array [alpha]. The shape of alpha is (C,), where C is either the number of input channels or 1. When C = 1, same alpha is applied to all channels. - When non_linearity is 'PARAMETRICSOFTPLUS', param is a list of 2 numpy arrays [alpha, beta]. The shape of alpha and beta is (C, ), where C is either the number of input channels or 1. When C = 1, same alpha and beta are applied to all channels. See Also -------- add_convolution, add_softmax """ spec = self.spec nn_spec = self.nn_spec # Add a new layer spec_layer = nn_spec.layers.add() spec_layer.name = name spec_layer.input.append(input_name) spec_layer.output.append(output_name) spec_layer_params = spec_layer.activation # Fill in the parameters if non_linearity == 'RELU': spec_layer_params.ReLU.MergeFromString(b'') elif non_linearity == 'SIGMOID': spec_layer_params.sigmoid.MergeFromString(b'') elif non_linearity == 'TANH': spec_layer_params.tanh.MergeFromString(b'') elif non_linearity == 'SCALED_TANH': spec_layer_params.scaledTanh.MergeFromString(b'') if params is None: alpha, beta = (0.0, 0.0) else: alpha, beta = params[0], params[1] spec_layer_params.scaledTanh.alpha = alpha spec_layer_params.scaledTanh.beta = beta elif non_linearity == 'SOFTPLUS': spec_layer_params.softplus.MergeFromString(b'') elif non_linearity == 'SOFTSIGN': spec_layer_params.softsign.MergeFromString(b'') elif non_linearity == 'SIGMOID_HARD': if params is None: alpha, beta = (0.2, 0.5) else: alpha, beta = params[0], params[1] spec_layer_params.sigmoidHard.alpha = alpha spec_layer_params.sigmoidHard.beta = beta elif non_linearity == 'LEAKYRELU': if params is None: alpha = 0.3 else: alpha = params[0] spec_layer_params.leakyReLU.alpha = float(alpha) elif non_linearity == 'PRELU': # PReLU must provide an np array in params[0] spec_layer_params.PReLU.alpha.floatValue.extend(map(float, params.flatten())) elif non_linearity == 'ELU': # ELU must provide an alpha in params[0] spec_layer_params.ELU.alpha = float(params) elif non_linearity == 'PARAMETRICSOFTPLUS': # Parametric softplus must provide two np arrays for alpha and beta alphas, betas = (params[0], params[1]) # Weight alignment: Keras [H,W,C,F], Espresso [ spec_layer_params.parametricSoftplus.alpha.floatValue.extend(map(float, alphas.flatten())) spec_layer_params.parametricSoftplus.beta.floatValue.extend(map(float, betas.flatten())) elif non_linearity == 'THRESHOLDEDRELU': if params is None: theta = 1.0 else: theta = params spec_layer_params.thresholdedReLU.alpha = float(theta) elif non_linearity == 'LINEAR': if params is None: alpha, beta = (1.0, 0.0) else: alpha, beta = params[0], params[1] spec_layer_params.linear.alpha = alpha spec_layer_params.linear.beta = beta else: raise TypeError("Unknown activation type %s." %(non_linearity))
[ "def", "add_activation", "(", "self", ",", "name", ",", "non_linearity", ",", "input_name", ",", "output_name", ",", "params", "=", "None", ")", ":", "spec", "=", "self", ".", "spec", "nn_spec", "=", "self", ".", "nn_spec", "# Add a new layer", "spec_layer", "=", "nn_spec", ".", "layers", ".", "add", "(", ")", "spec_layer", ".", "name", "=", "name", "spec_layer", ".", "input", ".", "append", "(", "input_name", ")", "spec_layer", ".", "output", ".", "append", "(", "output_name", ")", "spec_layer_params", "=", "spec_layer", ".", "activation", "# Fill in the parameters", "if", "non_linearity", "==", "'RELU'", ":", "spec_layer_params", ".", "ReLU", ".", "MergeFromString", "(", "b''", ")", "elif", "non_linearity", "==", "'SIGMOID'", ":", "spec_layer_params", ".", "sigmoid", ".", "MergeFromString", "(", "b''", ")", "elif", "non_linearity", "==", "'TANH'", ":", "spec_layer_params", ".", "tanh", ".", "MergeFromString", "(", "b''", ")", "elif", "non_linearity", "==", "'SCALED_TANH'", ":", "spec_layer_params", ".", "scaledTanh", ".", "MergeFromString", "(", "b''", ")", "if", "params", "is", "None", ":", "alpha", ",", "beta", "=", "(", "0.0", ",", "0.0", ")", "else", ":", "alpha", ",", "beta", "=", "params", "[", "0", "]", ",", "params", "[", "1", "]", "spec_layer_params", ".", "scaledTanh", ".", "alpha", "=", "alpha", "spec_layer_params", ".", "scaledTanh", ".", "beta", "=", "beta", "elif", "non_linearity", "==", "'SOFTPLUS'", ":", "spec_layer_params", ".", "softplus", ".", "MergeFromString", "(", "b''", ")", "elif", "non_linearity", "==", "'SOFTSIGN'", ":", "spec_layer_params", ".", "softsign", ".", "MergeFromString", "(", "b''", ")", "elif", "non_linearity", "==", "'SIGMOID_HARD'", ":", "if", "params", "is", "None", ":", "alpha", ",", "beta", "=", "(", "0.2", ",", "0.5", ")", "else", ":", "alpha", ",", "beta", "=", "params", "[", "0", "]", ",", "params", "[", "1", "]", "spec_layer_params", ".", "sigmoidHard", ".", "alpha", "=", "alpha", "spec_layer_params", ".", "sigmoidHard", ".", "beta", "=", "beta", "elif", "non_linearity", "==", "'LEAKYRELU'", ":", "if", "params", "is", "None", ":", "alpha", "=", "0.3", "else", ":", "alpha", "=", "params", "[", "0", "]", "spec_layer_params", ".", "leakyReLU", ".", "alpha", "=", "float", "(", "alpha", ")", "elif", "non_linearity", "==", "'PRELU'", ":", "# PReLU must provide an np array in params[0]", "spec_layer_params", ".", "PReLU", ".", "alpha", ".", "floatValue", ".", "extend", "(", "map", "(", "float", ",", "params", ".", "flatten", "(", ")", ")", ")", "elif", "non_linearity", "==", "'ELU'", ":", "# ELU must provide an alpha in params[0]", "spec_layer_params", ".", "ELU", ".", "alpha", "=", "float", "(", "params", ")", "elif", "non_linearity", "==", "'PARAMETRICSOFTPLUS'", ":", "# Parametric softplus must provide two np arrays for alpha and beta", "alphas", ",", "betas", "=", "(", "params", "[", "0", "]", ",", "params", "[", "1", "]", ")", "# Weight alignment: Keras [H,W,C,F], Espresso [", "spec_layer_params", ".", "parametricSoftplus", ".", "alpha", ".", "floatValue", ".", "extend", "(", "map", "(", "float", ",", "alphas", ".", "flatten", "(", ")", ")", ")", "spec_layer_params", ".", "parametricSoftplus", ".", "beta", ".", "floatValue", ".", "extend", "(", "map", "(", "float", ",", "betas", ".", "flatten", "(", ")", ")", ")", "elif", "non_linearity", "==", "'THRESHOLDEDRELU'", ":", "if", "params", "is", "None", ":", "theta", "=", "1.0", "else", ":", "theta", "=", "params", "spec_layer_params", ".", "thresholdedReLU", ".", "alpha", "=", "float", "(", "theta", ")", "elif", "non_linearity", "==", "'LINEAR'", ":", "if", "params", "is", "None", ":", "alpha", ",", "beta", "=", "(", "1.0", ",", "0.0", ")", "else", ":", "alpha", ",", "beta", "=", "params", "[", "0", "]", ",", "params", "[", "1", "]", "spec_layer_params", ".", "linear", ".", "alpha", "=", "alpha", "spec_layer_params", ".", "linear", ".", "beta", "=", "beta", "else", ":", "raise", "TypeError", "(", "\"Unknown activation type %s.\"", "%", "(", "non_linearity", ")", ")" ]
Add an activation layer to the model. Parameters ---------- name: str The name of this layer non_linearity: str The non_linearity (activation) function of this layer. It can be one of the following: - 'RELU': Rectified Linear Unit (ReLU) function. - 'SIGMOID': sigmoid function. - 'TANH': tanh function. - 'SCALED_TANH': scaled tanh function, defined as: `f(x) = alpha * tanh(beta * x)` where alpha and beta are constant scalars. - 'SOFTPLUS': softplus function. - 'SOFTSIGN': softsign function. - 'SIGMOID_HARD': hard sigmoid function, defined as: `f(x) = min(max(alpha * x + beta, -1), 1)` where alpha and beta are constant scalars. - 'LEAKYRELU': leaky relu function, defined as: `f(x) = (x >= 0) * x + (x < 0) * alpha * x` where alpha is a constant scalar. - 'PRELU': Parametric ReLU function, defined as: `f(x) = (x >= 0) * x + (x < 0) * alpha * x` where alpha is a multi-dimensional array of same size as x. - 'ELU': Exponential linear unit function, defined as: `f(x) = (x >= 0) * x + (x < 0) * (alpha * exp(x) - 1)` where alpha is a constant scalar. - 'PARAMETRICSOFTPLUS': Parametric softplus function, defined as: `f(x) = alpha * log(1 + exp(beta * x))` where alpha and beta are two multi-dimensional arrays of same size as x. - 'THRESHOLDEDRELU': Thresholded ReLU function, defined as: `f(x) = (x >= alpha) * x` where alpha is a constant scalar. - 'LINEAR': linear function. `f(x) = alpha * x + beta` input_name: str The input blob name of this layer. output_name: str The output blob name of this layer. params: [float] | [numpy.array] Parameters for the activation, depending on non_linearity. Kindly refer to NeuralNetwork.proto for details. - When non_linearity is one of ['RELU', 'SIGMOID', 'TANH', 'SCALED_TANH', 'SOFTPLUS', 'SOFTSIGN'], params is ignored. - When non_linearity is one of ['SCALED_TANH', 'SIGMOID_HARD', 'LINEAR'], param is a list of 2 floats [alpha, beta]. - When non_linearity is one of ['LEAKYRELU', 'ELU', 'THRESHOLDEDRELU'], param is a list of 1 float [alpha]. - When non_linearity is 'PRELU', param is a list of 1 numpy array [alpha]. The shape of alpha is (C,), where C is either the number of input channels or 1. When C = 1, same alpha is applied to all channels. - When non_linearity is 'PARAMETRICSOFTPLUS', param is a list of 2 numpy arrays [alpha, beta]. The shape of alpha and beta is (C, ), where C is either the number of input channels or 1. When C = 1, same alpha and beta are applied to all channels. See Also -------- add_convolution, add_softmax
[ "Add", "an", "activation", "layer", "to", "the", "model", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py#L484-L648
28,788
apple/turicreate
src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py
NeuralNetworkBuilder.add_elementwise
def add_elementwise(self, name, input_names, output_name, mode, alpha = None): """ Add an element-wise operation layer to the model. Parameters ---------- The name of this layer name: str input_names: [str] A list of input blob names of this layer. The input blobs should have the same shape. output_name: str The output blob name of this layer. mode: str A string specifying the mode of the elementwise layer. It can be one of the following: - 'CONCAT': concatenate input blobs along the channel axis. - 'SEQUENCE_CONCAT': concatenate input blobs along the sequence axis. - 'ADD': perform an element-wise summation over the input blobs. - 'MULTIPLY': perform an element-wise multiplication over the input blobs. - 'DOT': compute the dot product of the two input blobs. In this mode, the length of input_names should be 2. - 'COS': compute the cosine similarity of the two input blobs. In this mode, the length of input_names should be 2. - 'MAX': compute the element-wise maximum over the input blobs. - 'MIN': compute the element-wise minimum over the input blobs. - 'AVE': compute the element-wise average over the input blobs. alpha: float if mode == 'ADD' and there is only one input_name, alpha is added to the input if mode == 'MULTIPLY' and there is only one input_name, alpha is multiplied to the input See Also -------- add_upsample, add_sequence_repeat """ spec = self.spec nn_spec = self.nn_spec spec_layer = nn_spec.layers.add() spec_layer.name = name if isinstance(input_names, list): for input_name in input_names: spec_layer.input.append(input_name) else: spec_layer.input.append(input_names) spec_layer.output.append(output_name) ## Add the following layers. if mode == 'CONCAT': spec_layer.concat.sequenceConcat = False elif mode == 'SEQUENCE_CONCAT': spec_layer.concat.sequenceConcat = True elif mode == 'ADD': spec_layer.add.MergeFromString(b'') if alpha: spec_layer.add.alpha = alpha elif mode == 'MULTIPLY': spec_layer.multiply.MergeFromString(b'') if alpha: spec_layer.multiply.alpha = alpha elif mode == 'COS': spec_layer.dot.cosineSimilarity = True elif mode == 'DOT': spec_layer.dot.cosineSimilarity = False elif mode == 'MAX': spec_layer.max.MergeFromString(b'') elif mode == 'MIN': spec_layer.min.MergeFromString(b'') elif mode == 'AVE': spec_layer.average.MergeFromString(b'') else: raise ValueError("Unsupported elementwise mode %s" % mode)
python
def add_elementwise(self, name, input_names, output_name, mode, alpha = None): """ Add an element-wise operation layer to the model. Parameters ---------- The name of this layer name: str input_names: [str] A list of input blob names of this layer. The input blobs should have the same shape. output_name: str The output blob name of this layer. mode: str A string specifying the mode of the elementwise layer. It can be one of the following: - 'CONCAT': concatenate input blobs along the channel axis. - 'SEQUENCE_CONCAT': concatenate input blobs along the sequence axis. - 'ADD': perform an element-wise summation over the input blobs. - 'MULTIPLY': perform an element-wise multiplication over the input blobs. - 'DOT': compute the dot product of the two input blobs. In this mode, the length of input_names should be 2. - 'COS': compute the cosine similarity of the two input blobs. In this mode, the length of input_names should be 2. - 'MAX': compute the element-wise maximum over the input blobs. - 'MIN': compute the element-wise minimum over the input blobs. - 'AVE': compute the element-wise average over the input blobs. alpha: float if mode == 'ADD' and there is only one input_name, alpha is added to the input if mode == 'MULTIPLY' and there is only one input_name, alpha is multiplied to the input See Also -------- add_upsample, add_sequence_repeat """ spec = self.spec nn_spec = self.nn_spec spec_layer = nn_spec.layers.add() spec_layer.name = name if isinstance(input_names, list): for input_name in input_names: spec_layer.input.append(input_name) else: spec_layer.input.append(input_names) spec_layer.output.append(output_name) ## Add the following layers. if mode == 'CONCAT': spec_layer.concat.sequenceConcat = False elif mode == 'SEQUENCE_CONCAT': spec_layer.concat.sequenceConcat = True elif mode == 'ADD': spec_layer.add.MergeFromString(b'') if alpha: spec_layer.add.alpha = alpha elif mode == 'MULTIPLY': spec_layer.multiply.MergeFromString(b'') if alpha: spec_layer.multiply.alpha = alpha elif mode == 'COS': spec_layer.dot.cosineSimilarity = True elif mode == 'DOT': spec_layer.dot.cosineSimilarity = False elif mode == 'MAX': spec_layer.max.MergeFromString(b'') elif mode == 'MIN': spec_layer.min.MergeFromString(b'') elif mode == 'AVE': spec_layer.average.MergeFromString(b'') else: raise ValueError("Unsupported elementwise mode %s" % mode)
[ "def", "add_elementwise", "(", "self", ",", "name", ",", "input_names", ",", "output_name", ",", "mode", ",", "alpha", "=", "None", ")", ":", "spec", "=", "self", ".", "spec", "nn_spec", "=", "self", ".", "nn_spec", "spec_layer", "=", "nn_spec", ".", "layers", ".", "add", "(", ")", "spec_layer", ".", "name", "=", "name", "if", "isinstance", "(", "input_names", ",", "list", ")", ":", "for", "input_name", "in", "input_names", ":", "spec_layer", ".", "input", ".", "append", "(", "input_name", ")", "else", ":", "spec_layer", ".", "input", ".", "append", "(", "input_names", ")", "spec_layer", ".", "output", ".", "append", "(", "output_name", ")", "## Add the following layers.", "if", "mode", "==", "'CONCAT'", ":", "spec_layer", ".", "concat", ".", "sequenceConcat", "=", "False", "elif", "mode", "==", "'SEQUENCE_CONCAT'", ":", "spec_layer", ".", "concat", ".", "sequenceConcat", "=", "True", "elif", "mode", "==", "'ADD'", ":", "spec_layer", ".", "add", ".", "MergeFromString", "(", "b''", ")", "if", "alpha", ":", "spec_layer", ".", "add", ".", "alpha", "=", "alpha", "elif", "mode", "==", "'MULTIPLY'", ":", "spec_layer", ".", "multiply", ".", "MergeFromString", "(", "b''", ")", "if", "alpha", ":", "spec_layer", ".", "multiply", ".", "alpha", "=", "alpha", "elif", "mode", "==", "'COS'", ":", "spec_layer", ".", "dot", ".", "cosineSimilarity", "=", "True", "elif", "mode", "==", "'DOT'", ":", "spec_layer", ".", "dot", ".", "cosineSimilarity", "=", "False", "elif", "mode", "==", "'MAX'", ":", "spec_layer", ".", "max", ".", "MergeFromString", "(", "b''", ")", "elif", "mode", "==", "'MIN'", ":", "spec_layer", ".", "min", ".", "MergeFromString", "(", "b''", ")", "elif", "mode", "==", "'AVE'", ":", "spec_layer", ".", "average", ".", "MergeFromString", "(", "b''", ")", "else", ":", "raise", "ValueError", "(", "\"Unsupported elementwise mode %s\"", "%", "mode", ")" ]
Add an element-wise operation layer to the model. Parameters ---------- The name of this layer name: str input_names: [str] A list of input blob names of this layer. The input blobs should have the same shape. output_name: str The output blob name of this layer. mode: str A string specifying the mode of the elementwise layer. It can be one of the following: - 'CONCAT': concatenate input blobs along the channel axis. - 'SEQUENCE_CONCAT': concatenate input blobs along the sequence axis. - 'ADD': perform an element-wise summation over the input blobs. - 'MULTIPLY': perform an element-wise multiplication over the input blobs. - 'DOT': compute the dot product of the two input blobs. In this mode, the length of input_names should be 2. - 'COS': compute the cosine similarity of the two input blobs. In this mode, the length of input_names should be 2. - 'MAX': compute the element-wise maximum over the input blobs. - 'MIN': compute the element-wise minimum over the input blobs. - 'AVE': compute the element-wise average over the input blobs. alpha: float if mode == 'ADD' and there is only one input_name, alpha is added to the input if mode == 'MULTIPLY' and there is only one input_name, alpha is multiplied to the input See Also -------- add_upsample, add_sequence_repeat
[ "Add", "an", "element", "-", "wise", "operation", "layer", "to", "the", "model", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py#L650-L721
28,789
apple/turicreate
src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py
NeuralNetworkBuilder.add_upsample
def add_upsample(self, name, scaling_factor_h, scaling_factor_w, input_name, output_name, mode = 'NN'): """ Add upsample layer to the model. Parameters ---------- name: str The name of this layer. scaling_factor_h: int Scaling factor on the vertical direction. scaling_factor_w: int Scaling factor on the horizontal direction. input_name: str The input blob name of this layer. output_name: str The output blob name of this layer. mode: str Following values are supported: 'NN': nearest neighbour 'BILINEAR' : bilinear interpolation See Also -------- add_sequence_repeat, add_elementwise """ spec = self.spec nn_spec = self.nn_spec # Add a new inner-product layer spec_layer = nn_spec.layers.add() spec_layer.name = name spec_layer.input.append(input_name) spec_layer.output.append(output_name) spec_layer_params = spec_layer.upsample spec_layer_params.scalingFactor.append(scaling_factor_h) spec_layer_params.scalingFactor.append(scaling_factor_w) if mode == 'NN': spec_layer_params.mode = _NeuralNetwork_pb2.UpsampleLayerParams.InterpolationMode.Value('NN') elif mode == 'BILINEAR': spec_layer_params.mode = _NeuralNetwork_pb2.UpsampleLayerParams.InterpolationMode.Value('BILINEAR') else: raise ValueError("Unsupported upsampling mode %s" % mode)
python
def add_upsample(self, name, scaling_factor_h, scaling_factor_w, input_name, output_name, mode = 'NN'): """ Add upsample layer to the model. Parameters ---------- name: str The name of this layer. scaling_factor_h: int Scaling factor on the vertical direction. scaling_factor_w: int Scaling factor on the horizontal direction. input_name: str The input blob name of this layer. output_name: str The output blob name of this layer. mode: str Following values are supported: 'NN': nearest neighbour 'BILINEAR' : bilinear interpolation See Also -------- add_sequence_repeat, add_elementwise """ spec = self.spec nn_spec = self.nn_spec # Add a new inner-product layer spec_layer = nn_spec.layers.add() spec_layer.name = name spec_layer.input.append(input_name) spec_layer.output.append(output_name) spec_layer_params = spec_layer.upsample spec_layer_params.scalingFactor.append(scaling_factor_h) spec_layer_params.scalingFactor.append(scaling_factor_w) if mode == 'NN': spec_layer_params.mode = _NeuralNetwork_pb2.UpsampleLayerParams.InterpolationMode.Value('NN') elif mode == 'BILINEAR': spec_layer_params.mode = _NeuralNetwork_pb2.UpsampleLayerParams.InterpolationMode.Value('BILINEAR') else: raise ValueError("Unsupported upsampling mode %s" % mode)
[ "def", "add_upsample", "(", "self", ",", "name", ",", "scaling_factor_h", ",", "scaling_factor_w", ",", "input_name", ",", "output_name", ",", "mode", "=", "'NN'", ")", ":", "spec", "=", "self", ".", "spec", "nn_spec", "=", "self", ".", "nn_spec", "# Add a new inner-product layer", "spec_layer", "=", "nn_spec", ".", "layers", ".", "add", "(", ")", "spec_layer", ".", "name", "=", "name", "spec_layer", ".", "input", ".", "append", "(", "input_name", ")", "spec_layer", ".", "output", ".", "append", "(", "output_name", ")", "spec_layer_params", "=", "spec_layer", ".", "upsample", "spec_layer_params", ".", "scalingFactor", ".", "append", "(", "scaling_factor_h", ")", "spec_layer_params", ".", "scalingFactor", ".", "append", "(", "scaling_factor_w", ")", "if", "mode", "==", "'NN'", ":", "spec_layer_params", ".", "mode", "=", "_NeuralNetwork_pb2", ".", "UpsampleLayerParams", ".", "InterpolationMode", ".", "Value", "(", "'NN'", ")", "elif", "mode", "==", "'BILINEAR'", ":", "spec_layer_params", ".", "mode", "=", "_NeuralNetwork_pb2", ".", "UpsampleLayerParams", ".", "InterpolationMode", ".", "Value", "(", "'BILINEAR'", ")", "else", ":", "raise", "ValueError", "(", "\"Unsupported upsampling mode %s\"", "%", "mode", ")" ]
Add upsample layer to the model. Parameters ---------- name: str The name of this layer. scaling_factor_h: int Scaling factor on the vertical direction. scaling_factor_w: int Scaling factor on the horizontal direction. input_name: str The input blob name of this layer. output_name: str The output blob name of this layer. mode: str Following values are supported: 'NN': nearest neighbour 'BILINEAR' : bilinear interpolation See Also -------- add_sequence_repeat, add_elementwise
[ "Add", "upsample", "layer", "to", "the", "model", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py#L723-L764
28,790
apple/turicreate
src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py
NeuralNetworkBuilder.add_scale
def add_scale(self, name, W, b, has_bias, input_name, output_name, shape_scale = [1], shape_bias = [1]): """ Add scale layer to the model. Parameters ---------- name: str The name of this layer. W: int | numpy.array Scale of the input. b: int | numpy.array Bias to add to the input. has_bias: boolean Whether the bias vector of this layer is ignored in the spec. input_name: str The input blob name of this layer. output_name: str The output blob name of this layer. shape_scale: [int] List of ints that specifies the shape of the scale parameter. Can be [1] or [C] or [1,H,W] or [C,H,W]. shape_bias: [int] List of ints that specifies the shape of the bias parameter (if present). Can be [1] or [C] or [1,H,W] or [C,H,W]. See Also -------- add_bias """ spec = self.spec nn_spec = self.nn_spec spec_layer = nn_spec.layers.add() spec_layer.name = name spec_layer.input.append(input_name) spec_layer.output.append(output_name) spec_layer_params = spec_layer.scale spec_layer_params.hasBias = has_bias #add scale and its shape scale = spec_layer_params.scale spec_layer_params.shapeScale.extend(shape_scale) if isinstance(W, int): scale.floatValue.append(float(W)) else: scale.floatValue.extend(map(float, W.flatten())) if len(scale.floatValue) != np.prod(shape_scale): raise ValueError("Dimensions of 'shape_scale' do not match the size of the provided 'scale' parameter") #add bias and its shape if has_bias: bias = spec_layer_params.bias spec_layer_params.shapeBias.extend(shape_bias) if isinstance(b, int): bias.floatValue.append(float(b)) else: bias.floatValue.extend(map(float, b.flatten())) if len(bias.floatValue) != np.prod(shape_bias): raise ValueError("Dimensions of 'shape_bias' do not match the size of the provided 'b' parameter")
python
def add_scale(self, name, W, b, has_bias, input_name, output_name, shape_scale = [1], shape_bias = [1]): """ Add scale layer to the model. Parameters ---------- name: str The name of this layer. W: int | numpy.array Scale of the input. b: int | numpy.array Bias to add to the input. has_bias: boolean Whether the bias vector of this layer is ignored in the spec. input_name: str The input blob name of this layer. output_name: str The output blob name of this layer. shape_scale: [int] List of ints that specifies the shape of the scale parameter. Can be [1] or [C] or [1,H,W] or [C,H,W]. shape_bias: [int] List of ints that specifies the shape of the bias parameter (if present). Can be [1] or [C] or [1,H,W] or [C,H,W]. See Also -------- add_bias """ spec = self.spec nn_spec = self.nn_spec spec_layer = nn_spec.layers.add() spec_layer.name = name spec_layer.input.append(input_name) spec_layer.output.append(output_name) spec_layer_params = spec_layer.scale spec_layer_params.hasBias = has_bias #add scale and its shape scale = spec_layer_params.scale spec_layer_params.shapeScale.extend(shape_scale) if isinstance(W, int): scale.floatValue.append(float(W)) else: scale.floatValue.extend(map(float, W.flatten())) if len(scale.floatValue) != np.prod(shape_scale): raise ValueError("Dimensions of 'shape_scale' do not match the size of the provided 'scale' parameter") #add bias and its shape if has_bias: bias = spec_layer_params.bias spec_layer_params.shapeBias.extend(shape_bias) if isinstance(b, int): bias.floatValue.append(float(b)) else: bias.floatValue.extend(map(float, b.flatten())) if len(bias.floatValue) != np.prod(shape_bias): raise ValueError("Dimensions of 'shape_bias' do not match the size of the provided 'b' parameter")
[ "def", "add_scale", "(", "self", ",", "name", ",", "W", ",", "b", ",", "has_bias", ",", "input_name", ",", "output_name", ",", "shape_scale", "=", "[", "1", "]", ",", "shape_bias", "=", "[", "1", "]", ")", ":", "spec", "=", "self", ".", "spec", "nn_spec", "=", "self", ".", "nn_spec", "spec_layer", "=", "nn_spec", ".", "layers", ".", "add", "(", ")", "spec_layer", ".", "name", "=", "name", "spec_layer", ".", "input", ".", "append", "(", "input_name", ")", "spec_layer", ".", "output", ".", "append", "(", "output_name", ")", "spec_layer_params", "=", "spec_layer", ".", "scale", "spec_layer_params", ".", "hasBias", "=", "has_bias", "#add scale and its shape", "scale", "=", "spec_layer_params", ".", "scale", "spec_layer_params", ".", "shapeScale", ".", "extend", "(", "shape_scale", ")", "if", "isinstance", "(", "W", ",", "int", ")", ":", "scale", ".", "floatValue", ".", "append", "(", "float", "(", "W", ")", ")", "else", ":", "scale", ".", "floatValue", ".", "extend", "(", "map", "(", "float", ",", "W", ".", "flatten", "(", ")", ")", ")", "if", "len", "(", "scale", ".", "floatValue", ")", "!=", "np", ".", "prod", "(", "shape_scale", ")", ":", "raise", "ValueError", "(", "\"Dimensions of 'shape_scale' do not match the size of the provided 'scale' parameter\"", ")", "#add bias and its shape", "if", "has_bias", ":", "bias", "=", "spec_layer_params", ".", "bias", "spec_layer_params", ".", "shapeBias", ".", "extend", "(", "shape_bias", ")", "if", "isinstance", "(", "b", ",", "int", ")", ":", "bias", ".", "floatValue", ".", "append", "(", "float", "(", "b", ")", ")", "else", ":", "bias", ".", "floatValue", ".", "extend", "(", "map", "(", "float", ",", "b", ".", "flatten", "(", ")", ")", ")", "if", "len", "(", "bias", ".", "floatValue", ")", "!=", "np", ".", "prod", "(", "shape_bias", ")", ":", "raise", "ValueError", "(", "\"Dimensions of 'shape_bias' do not match the size of the provided 'b' parameter\"", ")" ]
Add scale layer to the model. Parameters ---------- name: str The name of this layer. W: int | numpy.array Scale of the input. b: int | numpy.array Bias to add to the input. has_bias: boolean Whether the bias vector of this layer is ignored in the spec. input_name: str The input blob name of this layer. output_name: str The output blob name of this layer. shape_scale: [int] List of ints that specifies the shape of the scale parameter. Can be [1] or [C] or [1,H,W] or [C,H,W]. shape_bias: [int] List of ints that specifies the shape of the bias parameter (if present). Can be [1] or [C] or [1,H,W] or [C,H,W]. See Also -------- add_bias
[ "Add", "scale", "layer", "to", "the", "model", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py#L766-L824
28,791
apple/turicreate
src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py
NeuralNetworkBuilder.add_bias
def add_bias(self, name, b, input_name, output_name, shape_bias = [1]): """ Add bias layer to the model. Parameters ---------- name: str The name of this layer. b: int | numpy.array Bias to add to the input. input_name: str The input blob name of this layer. output_name: str The output blob name of this layer. shape_bias: [int] List of ints that specifies the shape of the bias parameter (if present). Can be [1] or [C] or [1,H,W] or [C,H,W]. See Also -------- add_scale """ spec = self.spec nn_spec = self.nn_spec spec_layer = nn_spec.layers.add() spec_layer.name = name spec_layer.input.append(input_name) spec_layer.output.append(output_name) spec_layer_params = spec_layer.bias #add bias and its shape bias = spec_layer_params.bias spec_layer_params.shape.extend(shape_bias) if isinstance(b, int): bias.floatValue.append(float(b)) else: bias.floatValue.extend(map(float, b.flatten())) if len(bias.floatValue) != np.prod(shape_bias): raise ValueError("Dimensions of 'shape_bias' do not match the size of the provided 'b' parameter")
python
def add_bias(self, name, b, input_name, output_name, shape_bias = [1]): """ Add bias layer to the model. Parameters ---------- name: str The name of this layer. b: int | numpy.array Bias to add to the input. input_name: str The input blob name of this layer. output_name: str The output blob name of this layer. shape_bias: [int] List of ints that specifies the shape of the bias parameter (if present). Can be [1] or [C] or [1,H,W] or [C,H,W]. See Also -------- add_scale """ spec = self.spec nn_spec = self.nn_spec spec_layer = nn_spec.layers.add() spec_layer.name = name spec_layer.input.append(input_name) spec_layer.output.append(output_name) spec_layer_params = spec_layer.bias #add bias and its shape bias = spec_layer_params.bias spec_layer_params.shape.extend(shape_bias) if isinstance(b, int): bias.floatValue.append(float(b)) else: bias.floatValue.extend(map(float, b.flatten())) if len(bias.floatValue) != np.prod(shape_bias): raise ValueError("Dimensions of 'shape_bias' do not match the size of the provided 'b' parameter")
[ "def", "add_bias", "(", "self", ",", "name", ",", "b", ",", "input_name", ",", "output_name", ",", "shape_bias", "=", "[", "1", "]", ")", ":", "spec", "=", "self", ".", "spec", "nn_spec", "=", "self", ".", "nn_spec", "spec_layer", "=", "nn_spec", ".", "layers", ".", "add", "(", ")", "spec_layer", ".", "name", "=", "name", "spec_layer", ".", "input", ".", "append", "(", "input_name", ")", "spec_layer", ".", "output", ".", "append", "(", "output_name", ")", "spec_layer_params", "=", "spec_layer", ".", "bias", "#add bias and its shape", "bias", "=", "spec_layer_params", ".", "bias", "spec_layer_params", ".", "shape", ".", "extend", "(", "shape_bias", ")", "if", "isinstance", "(", "b", ",", "int", ")", ":", "bias", ".", "floatValue", ".", "append", "(", "float", "(", "b", ")", ")", "else", ":", "bias", ".", "floatValue", ".", "extend", "(", "map", "(", "float", ",", "b", ".", "flatten", "(", ")", ")", ")", "if", "len", "(", "bias", ".", "floatValue", ")", "!=", "np", ".", "prod", "(", "shape_bias", ")", ":", "raise", "ValueError", "(", "\"Dimensions of 'shape_bias' do not match the size of the provided 'b' parameter\"", ")" ]
Add bias layer to the model. Parameters ---------- name: str The name of this layer. b: int | numpy.array Bias to add to the input. input_name: str The input blob name of this layer. output_name: str The output blob name of this layer. shape_bias: [int] List of ints that specifies the shape of the bias parameter (if present). Can be [1] or [C] or [1,H,W] or [C,H,W]. See Also -------- add_scale
[ "Add", "bias", "layer", "to", "the", "model", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py#L826-L864
28,792
apple/turicreate
src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py
NeuralNetworkBuilder.add_sequence_repeat
def add_sequence_repeat(self, name, nrep, input_name, output_name): """ Add sequence repeat layer to the model. Parameters ---------- name: str The name of this layer. nrep: int Number of repetitions of the input blob along the sequence axis. input_name: str The input blob name of this layer. output_name: str The output blob name of this layer. See Also -------- add_upsample, add_elementwise """ spec = self.spec nn_spec = self.nn_spec spec_layer = nn_spec.layers.add() spec_layer.name = name spec_layer.input.append(input_name) spec_layer.output.append(output_name) spec_layer_params = spec_layer.sequenceRepeat spec_layer_params.nRepetitions = nrep
python
def add_sequence_repeat(self, name, nrep, input_name, output_name): """ Add sequence repeat layer to the model. Parameters ---------- name: str The name of this layer. nrep: int Number of repetitions of the input blob along the sequence axis. input_name: str The input blob name of this layer. output_name: str The output blob name of this layer. See Also -------- add_upsample, add_elementwise """ spec = self.spec nn_spec = self.nn_spec spec_layer = nn_spec.layers.add() spec_layer.name = name spec_layer.input.append(input_name) spec_layer.output.append(output_name) spec_layer_params = spec_layer.sequenceRepeat spec_layer_params.nRepetitions = nrep
[ "def", "add_sequence_repeat", "(", "self", ",", "name", ",", "nrep", ",", "input_name", ",", "output_name", ")", ":", "spec", "=", "self", ".", "spec", "nn_spec", "=", "self", ".", "nn_spec", "spec_layer", "=", "nn_spec", ".", "layers", ".", "add", "(", ")", "spec_layer", ".", "name", "=", "name", "spec_layer", ".", "input", ".", "append", "(", "input_name", ")", "spec_layer", ".", "output", ".", "append", "(", "output_name", ")", "spec_layer_params", "=", "spec_layer", ".", "sequenceRepeat", "spec_layer_params", ".", "nRepetitions", "=", "nrep" ]
Add sequence repeat layer to the model. Parameters ---------- name: str The name of this layer. nrep: int Number of repetitions of the input blob along the sequence axis. input_name: str The input blob name of this layer. output_name: str The output blob name of this layer. See Also -------- add_upsample, add_elementwise
[ "Add", "sequence", "repeat", "layer", "to", "the", "model", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py#L866-L892
28,793
apple/turicreate
src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py
NeuralNetworkBuilder.add_padding
def add_padding(self, name, left = 0, right = 0, top = 0, bottom = 0, value = 0, input_name = 'data', output_name = 'out', padding_type = 'constant'): """ Add a padding layer to the model. Kindly refer to NeuralNetwork.proto for details. Parameters ---------- name: str The name of this layer. left: int Number of elements to be padded on the left side of the input blob. right: int Number of elements to be padded on the right side of the input blob. top: int Number of elements to be padded on the top of the input blob. bottom: int Number of elements to be padded on the bottom of the input blob. value: float Value of the elements padded. Used only when padding_type = 'constant' input_name: str The input blob name of this layer. output_name: str The output blob name of this layer. padding_type: str Type of the padding. Can be one of 'constant', 'reflection' or 'replication' See Also -------- add_crop, add_convolution, add_pooling """ # Currently only constant padding is supported. spec = self.spec nn_spec = self.nn_spec # Add a new layer spec_layer = nn_spec.layers.add() spec_layer.name = name spec_layer.input.append(input_name) spec_layer.output.append(output_name) spec_layer_params = spec_layer.padding # Set the parameters if padding_type == 'constant': spec_layer_params.constant.value = value elif padding_type == 'reflection': spec_layer_params.reflection.MergeFromString(b'') elif padding_type == 'replication': spec_layer_params.replication.MergeFromString(b'') else: raise ValueError("Unknown padding_type %s" %(padding_type)) height_border = spec_layer_params.paddingAmounts.borderAmounts.add() height_border.startEdgeSize = top height_border.endEdgeSize = bottom width_border = spec_layer_params.paddingAmounts.borderAmounts.add() width_border.startEdgeSize = left width_border.endEdgeSize = right
python
def add_padding(self, name, left = 0, right = 0, top = 0, bottom = 0, value = 0, input_name = 'data', output_name = 'out', padding_type = 'constant'): """ Add a padding layer to the model. Kindly refer to NeuralNetwork.proto for details. Parameters ---------- name: str The name of this layer. left: int Number of elements to be padded on the left side of the input blob. right: int Number of elements to be padded on the right side of the input blob. top: int Number of elements to be padded on the top of the input blob. bottom: int Number of elements to be padded on the bottom of the input blob. value: float Value of the elements padded. Used only when padding_type = 'constant' input_name: str The input blob name of this layer. output_name: str The output blob name of this layer. padding_type: str Type of the padding. Can be one of 'constant', 'reflection' or 'replication' See Also -------- add_crop, add_convolution, add_pooling """ # Currently only constant padding is supported. spec = self.spec nn_spec = self.nn_spec # Add a new layer spec_layer = nn_spec.layers.add() spec_layer.name = name spec_layer.input.append(input_name) spec_layer.output.append(output_name) spec_layer_params = spec_layer.padding # Set the parameters if padding_type == 'constant': spec_layer_params.constant.value = value elif padding_type == 'reflection': spec_layer_params.reflection.MergeFromString(b'') elif padding_type == 'replication': spec_layer_params.replication.MergeFromString(b'') else: raise ValueError("Unknown padding_type %s" %(padding_type)) height_border = spec_layer_params.paddingAmounts.borderAmounts.add() height_border.startEdgeSize = top height_border.endEdgeSize = bottom width_border = spec_layer_params.paddingAmounts.borderAmounts.add() width_border.startEdgeSize = left width_border.endEdgeSize = right
[ "def", "add_padding", "(", "self", ",", "name", ",", "left", "=", "0", ",", "right", "=", "0", ",", "top", "=", "0", ",", "bottom", "=", "0", ",", "value", "=", "0", ",", "input_name", "=", "'data'", ",", "output_name", "=", "'out'", ",", "padding_type", "=", "'constant'", ")", ":", "# Currently only constant padding is supported.", "spec", "=", "self", ".", "spec", "nn_spec", "=", "self", ".", "nn_spec", "# Add a new layer", "spec_layer", "=", "nn_spec", ".", "layers", ".", "add", "(", ")", "spec_layer", ".", "name", "=", "name", "spec_layer", ".", "input", ".", "append", "(", "input_name", ")", "spec_layer", ".", "output", ".", "append", "(", "output_name", ")", "spec_layer_params", "=", "spec_layer", ".", "padding", "# Set the parameters", "if", "padding_type", "==", "'constant'", ":", "spec_layer_params", ".", "constant", ".", "value", "=", "value", "elif", "padding_type", "==", "'reflection'", ":", "spec_layer_params", ".", "reflection", ".", "MergeFromString", "(", "b''", ")", "elif", "padding_type", "==", "'replication'", ":", "spec_layer_params", ".", "replication", ".", "MergeFromString", "(", "b''", ")", "else", ":", "raise", "ValueError", "(", "\"Unknown padding_type %s\"", "%", "(", "padding_type", ")", ")", "height_border", "=", "spec_layer_params", ".", "paddingAmounts", ".", "borderAmounts", ".", "add", "(", ")", "height_border", ".", "startEdgeSize", "=", "top", "height_border", ".", "endEdgeSize", "=", "bottom", "width_border", "=", "spec_layer_params", ".", "paddingAmounts", ".", "borderAmounts", ".", "add", "(", ")", "width_border", ".", "startEdgeSize", "=", "left", "width_border", ".", "endEdgeSize", "=", "right" ]
Add a padding layer to the model. Kindly refer to NeuralNetwork.proto for details. Parameters ---------- name: str The name of this layer. left: int Number of elements to be padded on the left side of the input blob. right: int Number of elements to be padded on the right side of the input blob. top: int Number of elements to be padded on the top of the input blob. bottom: int Number of elements to be padded on the bottom of the input blob. value: float Value of the elements padded. Used only when padding_type = 'constant' input_name: str The input blob name of this layer. output_name: str The output blob name of this layer. padding_type: str Type of the padding. Can be one of 'constant', 'reflection' or 'replication' See Also -------- add_crop, add_convolution, add_pooling
[ "Add", "a", "padding", "layer", "to", "the", "model", ".", "Kindly", "refer", "to", "NeuralNetwork", ".", "proto", "for", "details", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py#L1148-L1208
28,794
apple/turicreate
src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py
NeuralNetworkBuilder.add_simple_rnn
def add_simple_rnn(self,name, W_h, W_x, b, hidden_size, input_size, activation, input_names, output_names, output_all = False, reverse_input = False): """ Add a simple recurrent layer to the model. Parameters ---------- name: str The name of this layer. W_h: numpy.array Weights of the recurrent layer's hidden state. Must be of shape (hidden_size, hidden_size). W_x: numpy.array Weights of the recurrent layer's input. Must be of shape (hidden_size, input_size). b: numpy.array | None Bias of the recurrent layer's output. If None, bias is ignored. Otherwise it must be of shape (hidden_size, ). hidden_size: int Number of hidden units. This is equal to the number of channels of output shape. input_size: int Number of the number of channels of input shape. activation: str Activation function name. Can be one of the following option: ['RELU', 'TANH', 'SIGMOID', 'SCALED_TANH', 'SIGMOID_HARD', 'LINEAR']. See add_activation for more detailed description. input_names: [str] The input blob name list of this layer, in the order of [x, h_input]. output_name: [str] The output blob name list of this layer, in the order of [y, h_output]. output_all: boolean Whether the recurrent layer should output at every time step. - If False, the output is the result after the final state update. - If True, the output is a sequence, containing outputs at all time steps. reverse_input: boolean Whether the recurrent layer should process the input sequence in the reverse order. - If False, the input sequence order is not reversed. - If True, the input sequence order is reversed. See Also -------- add_activation, add_gru, add_unilstm, add_bidirlstm """ spec = self.spec nn_spec = self.nn_spec # Add a new Layer spec_layer = nn_spec.layers.add() spec_layer.name = name for name in input_names: spec_layer.input.append(name) for name in output_names: spec_layer.output.append(name) spec_layer_params = spec_layer.simpleRecurrent spec_layer_params.reverseInput = reverse_input #set the parameters spec_layer_params.inputVectorSize = input_size spec_layer_params.outputVectorSize = hidden_size if b is not None: spec_layer_params.hasBiasVector = True spec_layer_params.sequenceOutput = output_all activation_f = spec_layer_params.activation _set_recurrent_activation(activation_f, activation) # Write the weights spec_layer_params.weightMatrix.floatValue.extend(map(float, W_x.flatten())) spec_layer_params.recursionMatrix.floatValue.extend(map(float, W_h.flatten())) if b is not None: spec_layer_params.biasVector.floatValue.extend(map(float, b.flatten()))
python
def add_simple_rnn(self,name, W_h, W_x, b, hidden_size, input_size, activation, input_names, output_names, output_all = False, reverse_input = False): """ Add a simple recurrent layer to the model. Parameters ---------- name: str The name of this layer. W_h: numpy.array Weights of the recurrent layer's hidden state. Must be of shape (hidden_size, hidden_size). W_x: numpy.array Weights of the recurrent layer's input. Must be of shape (hidden_size, input_size). b: numpy.array | None Bias of the recurrent layer's output. If None, bias is ignored. Otherwise it must be of shape (hidden_size, ). hidden_size: int Number of hidden units. This is equal to the number of channels of output shape. input_size: int Number of the number of channels of input shape. activation: str Activation function name. Can be one of the following option: ['RELU', 'TANH', 'SIGMOID', 'SCALED_TANH', 'SIGMOID_HARD', 'LINEAR']. See add_activation for more detailed description. input_names: [str] The input blob name list of this layer, in the order of [x, h_input]. output_name: [str] The output blob name list of this layer, in the order of [y, h_output]. output_all: boolean Whether the recurrent layer should output at every time step. - If False, the output is the result after the final state update. - If True, the output is a sequence, containing outputs at all time steps. reverse_input: boolean Whether the recurrent layer should process the input sequence in the reverse order. - If False, the input sequence order is not reversed. - If True, the input sequence order is reversed. See Also -------- add_activation, add_gru, add_unilstm, add_bidirlstm """ spec = self.spec nn_spec = self.nn_spec # Add a new Layer spec_layer = nn_spec.layers.add() spec_layer.name = name for name in input_names: spec_layer.input.append(name) for name in output_names: spec_layer.output.append(name) spec_layer_params = spec_layer.simpleRecurrent spec_layer_params.reverseInput = reverse_input #set the parameters spec_layer_params.inputVectorSize = input_size spec_layer_params.outputVectorSize = hidden_size if b is not None: spec_layer_params.hasBiasVector = True spec_layer_params.sequenceOutput = output_all activation_f = spec_layer_params.activation _set_recurrent_activation(activation_f, activation) # Write the weights spec_layer_params.weightMatrix.floatValue.extend(map(float, W_x.flatten())) spec_layer_params.recursionMatrix.floatValue.extend(map(float, W_h.flatten())) if b is not None: spec_layer_params.biasVector.floatValue.extend(map(float, b.flatten()))
[ "def", "add_simple_rnn", "(", "self", ",", "name", ",", "W_h", ",", "W_x", ",", "b", ",", "hidden_size", ",", "input_size", ",", "activation", ",", "input_names", ",", "output_names", ",", "output_all", "=", "False", ",", "reverse_input", "=", "False", ")", ":", "spec", "=", "self", ".", "spec", "nn_spec", "=", "self", ".", "nn_spec", "# Add a new Layer", "spec_layer", "=", "nn_spec", ".", "layers", ".", "add", "(", ")", "spec_layer", ".", "name", "=", "name", "for", "name", "in", "input_names", ":", "spec_layer", ".", "input", ".", "append", "(", "name", ")", "for", "name", "in", "output_names", ":", "spec_layer", ".", "output", ".", "append", "(", "name", ")", "spec_layer_params", "=", "spec_layer", ".", "simpleRecurrent", "spec_layer_params", ".", "reverseInput", "=", "reverse_input", "#set the parameters", "spec_layer_params", ".", "inputVectorSize", "=", "input_size", "spec_layer_params", ".", "outputVectorSize", "=", "hidden_size", "if", "b", "is", "not", "None", ":", "spec_layer_params", ".", "hasBiasVector", "=", "True", "spec_layer_params", ".", "sequenceOutput", "=", "output_all", "activation_f", "=", "spec_layer_params", ".", "activation", "_set_recurrent_activation", "(", "activation_f", ",", "activation", ")", "# Write the weights", "spec_layer_params", ".", "weightMatrix", ".", "floatValue", ".", "extend", "(", "map", "(", "float", ",", "W_x", ".", "flatten", "(", ")", ")", ")", "spec_layer_params", ".", "recursionMatrix", ".", "floatValue", ".", "extend", "(", "map", "(", "float", ",", "W_h", ".", "flatten", "(", ")", ")", ")", "if", "b", "is", "not", "None", ":", "spec_layer_params", ".", "biasVector", ".", "floatValue", ".", "extend", "(", "map", "(", "float", ",", "b", ".", "flatten", "(", ")", ")", ")" ]
Add a simple recurrent layer to the model. Parameters ---------- name: str The name of this layer. W_h: numpy.array Weights of the recurrent layer's hidden state. Must be of shape (hidden_size, hidden_size). W_x: numpy.array Weights of the recurrent layer's input. Must be of shape (hidden_size, input_size). b: numpy.array | None Bias of the recurrent layer's output. If None, bias is ignored. Otherwise it must be of shape (hidden_size, ). hidden_size: int Number of hidden units. This is equal to the number of channels of output shape. input_size: int Number of the number of channels of input shape. activation: str Activation function name. Can be one of the following option: ['RELU', 'TANH', 'SIGMOID', 'SCALED_TANH', 'SIGMOID_HARD', 'LINEAR']. See add_activation for more detailed description. input_names: [str] The input blob name list of this layer, in the order of [x, h_input]. output_name: [str] The output blob name list of this layer, in the order of [y, h_output]. output_all: boolean Whether the recurrent layer should output at every time step. - If False, the output is the result after the final state update. - If True, the output is a sequence, containing outputs at all time steps. reverse_input: boolean Whether the recurrent layer should process the input sequence in the reverse order. - If False, the input sequence order is not reversed. - If True, the input sequence order is reversed. See Also -------- add_activation, add_gru, add_unilstm, add_bidirlstm
[ "Add", "a", "simple", "recurrent", "layer", "to", "the", "model", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py#L1271-L1341
28,795
apple/turicreate
src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py
NeuralNetworkBuilder.add_flatten
def add_flatten(self, name, mode, input_name, output_name): """ Add a flatten layer. Only flattens the channel, height and width axis. Leaves the sequence axis as is. Parameters ---------- name: str The name of this layer. mode: int - If mode == 0, the flatten layer is in CHANNEL_FIRST mode. - If mode == 1, the flatten layer is in CHANNEL_LAST mode. input_name: str The input blob name of this layer. output_name: str The output blob name of this layer. See Also -------- add_permute, add_reshape """ spec = self.spec nn_spec = self.nn_spec # Add a new layer spec_layer = nn_spec.layers.add() spec_layer.name = name spec_layer.input.append(input_name) spec_layer.output.append(output_name) spec_layer_params = spec_layer.flatten # Set the parameters if mode == 0: spec_layer_params.mode = \ _NeuralNetwork_pb2.FlattenLayerParams.FlattenOrder.Value('CHANNEL_FIRST') elif mode == 1: spec_layer_params.mode = \ _NeuralNetwork_pb2.FlattenLayerParams.FlattenOrder.Value('CHANNEL_LAST') else: raise NotImplementedError( 'Unknown flatten mode %d ' % mode)
python
def add_flatten(self, name, mode, input_name, output_name): """ Add a flatten layer. Only flattens the channel, height and width axis. Leaves the sequence axis as is. Parameters ---------- name: str The name of this layer. mode: int - If mode == 0, the flatten layer is in CHANNEL_FIRST mode. - If mode == 1, the flatten layer is in CHANNEL_LAST mode. input_name: str The input blob name of this layer. output_name: str The output blob name of this layer. See Also -------- add_permute, add_reshape """ spec = self.spec nn_spec = self.nn_spec # Add a new layer spec_layer = nn_spec.layers.add() spec_layer.name = name spec_layer.input.append(input_name) spec_layer.output.append(output_name) spec_layer_params = spec_layer.flatten # Set the parameters if mode == 0: spec_layer_params.mode = \ _NeuralNetwork_pb2.FlattenLayerParams.FlattenOrder.Value('CHANNEL_FIRST') elif mode == 1: spec_layer_params.mode = \ _NeuralNetwork_pb2.FlattenLayerParams.FlattenOrder.Value('CHANNEL_LAST') else: raise NotImplementedError( 'Unknown flatten mode %d ' % mode)
[ "def", "add_flatten", "(", "self", ",", "name", ",", "mode", ",", "input_name", ",", "output_name", ")", ":", "spec", "=", "self", ".", "spec", "nn_spec", "=", "self", ".", "nn_spec", "# Add a new layer", "spec_layer", "=", "nn_spec", ".", "layers", ".", "add", "(", ")", "spec_layer", ".", "name", "=", "name", "spec_layer", ".", "input", ".", "append", "(", "input_name", ")", "spec_layer", ".", "output", ".", "append", "(", "output_name", ")", "spec_layer_params", "=", "spec_layer", ".", "flatten", "# Set the parameters", "if", "mode", "==", "0", ":", "spec_layer_params", ".", "mode", "=", "_NeuralNetwork_pb2", ".", "FlattenLayerParams", ".", "FlattenOrder", ".", "Value", "(", "'CHANNEL_FIRST'", ")", "elif", "mode", "==", "1", ":", "spec_layer_params", ".", "mode", "=", "_NeuralNetwork_pb2", ".", "FlattenLayerParams", ".", "FlattenOrder", ".", "Value", "(", "'CHANNEL_LAST'", ")", "else", ":", "raise", "NotImplementedError", "(", "'Unknown flatten mode %d '", "%", "mode", ")" ]
Add a flatten layer. Only flattens the channel, height and width axis. Leaves the sequence axis as is. Parameters ---------- name: str The name of this layer. mode: int - If mode == 0, the flatten layer is in CHANNEL_FIRST mode. - If mode == 1, the flatten layer is in CHANNEL_LAST mode. input_name: str The input blob name of this layer. output_name: str The output blob name of this layer. See Also -------- add_permute, add_reshape
[ "Add", "a", "flatten", "layer", ".", "Only", "flattens", "the", "channel", "height", "and", "width", "axis", ".", "Leaves", "the", "sequence", "axis", "as", "is", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py#L1761-L1802
28,796
apple/turicreate
src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py
NeuralNetworkBuilder.add_reorganize_data
def add_reorganize_data(self, name, input_name, output_name, mode = 'SPACE_TO_DEPTH', block_size = 2): """ Add a data reorganization layer of type "SPACE_TO_DEPTH" or "DEPTH_TO_SPACE". Parameters ---------- name: str The name of this layer. input_name: str The input blob name of this layer. output_name: str The output blob name of this layer. mode: str - If mode == 'SPACE_TO_DEPTH': data is moved from the spatial to the channel dimension. Input is spatially divided into non-overlapping blocks of size block_size X block_size and data from each block is moved to the channel dimension. Output CHW dimensions are: [C * block_size * block_size, H/block_size, C/block_size]. - If mode == 'DEPTH_TO_SPACE': data is moved from the channel to the spatial dimension. Reverse of the operation 'SPACE_TO_DEPTH'. Output CHW dimensions are: [C/(block_size * block_size), H * block_size, C * block_size]. block_size: int Must be greater than 1. Must divide H and W, when mode is 'SPACE_TO_DEPTH'. (block_size * block_size) must divide C when mode is 'DEPTH_TO_SPACE'. See Also -------- add_flatten, add_reshape """ spec = self.spec nn_spec = self.nn_spec # Add a new layer spec_layer = nn_spec.layers.add() spec_layer.name = name spec_layer.input.append(input_name) spec_layer.output.append(output_name) spec_layer_params = spec_layer.reorganizeData # Set the parameters if block_size < 2: raise ValueError("Invalid block_size value %d. Must be greater than 1." % block_size) spec_layer_params.blockSize = block_size if mode == 'SPACE_TO_DEPTH': spec_layer_params.mode = \ _NeuralNetwork_pb2.ReorganizeDataLayerParams.ReorganizationType.Value('SPACE_TO_DEPTH') elif mode == 'DEPTH_TO_SPACE': spec_layer_params.mode = \ _NeuralNetwork_pb2.ReorganizeDataLayerParams.ReorganizationType.Value('DEPTH_TO_SPACE') else: raise NotImplementedError( 'Unknown reorganization mode %s ' % mode)
python
def add_reorganize_data(self, name, input_name, output_name, mode = 'SPACE_TO_DEPTH', block_size = 2): """ Add a data reorganization layer of type "SPACE_TO_DEPTH" or "DEPTH_TO_SPACE". Parameters ---------- name: str The name of this layer. input_name: str The input blob name of this layer. output_name: str The output blob name of this layer. mode: str - If mode == 'SPACE_TO_DEPTH': data is moved from the spatial to the channel dimension. Input is spatially divided into non-overlapping blocks of size block_size X block_size and data from each block is moved to the channel dimension. Output CHW dimensions are: [C * block_size * block_size, H/block_size, C/block_size]. - If mode == 'DEPTH_TO_SPACE': data is moved from the channel to the spatial dimension. Reverse of the operation 'SPACE_TO_DEPTH'. Output CHW dimensions are: [C/(block_size * block_size), H * block_size, C * block_size]. block_size: int Must be greater than 1. Must divide H and W, when mode is 'SPACE_TO_DEPTH'. (block_size * block_size) must divide C when mode is 'DEPTH_TO_SPACE'. See Also -------- add_flatten, add_reshape """ spec = self.spec nn_spec = self.nn_spec # Add a new layer spec_layer = nn_spec.layers.add() spec_layer.name = name spec_layer.input.append(input_name) spec_layer.output.append(output_name) spec_layer_params = spec_layer.reorganizeData # Set the parameters if block_size < 2: raise ValueError("Invalid block_size value %d. Must be greater than 1." % block_size) spec_layer_params.blockSize = block_size if mode == 'SPACE_TO_DEPTH': spec_layer_params.mode = \ _NeuralNetwork_pb2.ReorganizeDataLayerParams.ReorganizationType.Value('SPACE_TO_DEPTH') elif mode == 'DEPTH_TO_SPACE': spec_layer_params.mode = \ _NeuralNetwork_pb2.ReorganizeDataLayerParams.ReorganizationType.Value('DEPTH_TO_SPACE') else: raise NotImplementedError( 'Unknown reorganization mode %s ' % mode)
[ "def", "add_reorganize_data", "(", "self", ",", "name", ",", "input_name", ",", "output_name", ",", "mode", "=", "'SPACE_TO_DEPTH'", ",", "block_size", "=", "2", ")", ":", "spec", "=", "self", ".", "spec", "nn_spec", "=", "self", ".", "nn_spec", "# Add a new layer", "spec_layer", "=", "nn_spec", ".", "layers", ".", "add", "(", ")", "spec_layer", ".", "name", "=", "name", "spec_layer", ".", "input", ".", "append", "(", "input_name", ")", "spec_layer", ".", "output", ".", "append", "(", "output_name", ")", "spec_layer_params", "=", "spec_layer", ".", "reorganizeData", "# Set the parameters", "if", "block_size", "<", "2", ":", "raise", "ValueError", "(", "\"Invalid block_size value %d. Must be greater than 1.\"", "%", "block_size", ")", "spec_layer_params", ".", "blockSize", "=", "block_size", "if", "mode", "==", "'SPACE_TO_DEPTH'", ":", "spec_layer_params", ".", "mode", "=", "_NeuralNetwork_pb2", ".", "ReorganizeDataLayerParams", ".", "ReorganizationType", ".", "Value", "(", "'SPACE_TO_DEPTH'", ")", "elif", "mode", "==", "'DEPTH_TO_SPACE'", ":", "spec_layer_params", ".", "mode", "=", "_NeuralNetwork_pb2", ".", "ReorganizeDataLayerParams", ".", "ReorganizationType", ".", "Value", "(", "'DEPTH_TO_SPACE'", ")", "else", ":", "raise", "NotImplementedError", "(", "'Unknown reorganization mode %s '", "%", "mode", ")" ]
Add a data reorganization layer of type "SPACE_TO_DEPTH" or "DEPTH_TO_SPACE". Parameters ---------- name: str The name of this layer. input_name: str The input blob name of this layer. output_name: str The output blob name of this layer. mode: str - If mode == 'SPACE_TO_DEPTH': data is moved from the spatial to the channel dimension. Input is spatially divided into non-overlapping blocks of size block_size X block_size and data from each block is moved to the channel dimension. Output CHW dimensions are: [C * block_size * block_size, H/block_size, C/block_size]. - If mode == 'DEPTH_TO_SPACE': data is moved from the channel to the spatial dimension. Reverse of the operation 'SPACE_TO_DEPTH'. Output CHW dimensions are: [C/(block_size * block_size), H * block_size, C * block_size]. block_size: int Must be greater than 1. Must divide H and W, when mode is 'SPACE_TO_DEPTH'. (block_size * block_size) must divide C when mode is 'DEPTH_TO_SPACE'. See Also -------- add_flatten, add_reshape
[ "Add", "a", "data", "reorganization", "layer", "of", "type", "SPACE_TO_DEPTH", "or", "DEPTH_TO_SPACE", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py#L1867-L1922
28,797
apple/turicreate
src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py
NeuralNetworkBuilder.add_reshape
def add_reshape(self, name, input_name, output_name, target_shape, mode): """ Add a reshape layer. Kindly refer to NeuralNetwork.proto for details. Parameters ---------- name: str The name of this layer. target_shape: tuple Shape of the output blob. The product of target_shape must be equal to the shape of the input blob. Can be either length 3 (C,H,W) or length 4 (Seq,C,H,W). mode: int - If mode == 0, the reshape layer is in CHANNEL_FIRST mode. - If mode == 1, the reshape layer is in CHANNEL_LAST mode. input_name: str The input blob name of this layer. output_name: str The output blob name of this layer. See Also -------- add_flatten, add_permute """ spec = self.spec nn_spec = self.nn_spec # Add a new layer spec_layer = nn_spec.layers.add() spec_layer.name = name spec_layer.input.append(input_name) spec_layer.output.append(output_name) spec_layer_params = spec_layer.reshape spec_layer_params.targetShape.extend(target_shape) if mode == 0: spec_layer_params.mode = \ _NeuralNetwork_pb2.ReshapeLayerParams.ReshapeOrder.Value('CHANNEL_FIRST') else: spec_layer_params.mode = \ _NeuralNetwork_pb2.ReshapeLayerParams.ReshapeOrder.Value('CHANNEL_LAST') if len(target_shape) != 4 and len(target_shape) != 3: raise ValueError("Length of the 'target-shape' parameter must be equal to 3 or 4")
python
def add_reshape(self, name, input_name, output_name, target_shape, mode): """ Add a reshape layer. Kindly refer to NeuralNetwork.proto for details. Parameters ---------- name: str The name of this layer. target_shape: tuple Shape of the output blob. The product of target_shape must be equal to the shape of the input blob. Can be either length 3 (C,H,W) or length 4 (Seq,C,H,W). mode: int - If mode == 0, the reshape layer is in CHANNEL_FIRST mode. - If mode == 1, the reshape layer is in CHANNEL_LAST mode. input_name: str The input blob name of this layer. output_name: str The output blob name of this layer. See Also -------- add_flatten, add_permute """ spec = self.spec nn_spec = self.nn_spec # Add a new layer spec_layer = nn_spec.layers.add() spec_layer.name = name spec_layer.input.append(input_name) spec_layer.output.append(output_name) spec_layer_params = spec_layer.reshape spec_layer_params.targetShape.extend(target_shape) if mode == 0: spec_layer_params.mode = \ _NeuralNetwork_pb2.ReshapeLayerParams.ReshapeOrder.Value('CHANNEL_FIRST') else: spec_layer_params.mode = \ _NeuralNetwork_pb2.ReshapeLayerParams.ReshapeOrder.Value('CHANNEL_LAST') if len(target_shape) != 4 and len(target_shape) != 3: raise ValueError("Length of the 'target-shape' parameter must be equal to 3 or 4")
[ "def", "add_reshape", "(", "self", ",", "name", ",", "input_name", ",", "output_name", ",", "target_shape", ",", "mode", ")", ":", "spec", "=", "self", ".", "spec", "nn_spec", "=", "self", ".", "nn_spec", "# Add a new layer", "spec_layer", "=", "nn_spec", ".", "layers", ".", "add", "(", ")", "spec_layer", ".", "name", "=", "name", "spec_layer", ".", "input", ".", "append", "(", "input_name", ")", "spec_layer", ".", "output", ".", "append", "(", "output_name", ")", "spec_layer_params", "=", "spec_layer", ".", "reshape", "spec_layer_params", ".", "targetShape", ".", "extend", "(", "target_shape", ")", "if", "mode", "==", "0", ":", "spec_layer_params", ".", "mode", "=", "_NeuralNetwork_pb2", ".", "ReshapeLayerParams", ".", "ReshapeOrder", ".", "Value", "(", "'CHANNEL_FIRST'", ")", "else", ":", "spec_layer_params", ".", "mode", "=", "_NeuralNetwork_pb2", ".", "ReshapeLayerParams", ".", "ReshapeOrder", ".", "Value", "(", "'CHANNEL_LAST'", ")", "if", "len", "(", "target_shape", ")", "!=", "4", "and", "len", "(", "target_shape", ")", "!=", "3", ":", "raise", "ValueError", "(", "\"Length of the 'target-shape' parameter must be equal to 3 or 4\"", ")" ]
Add a reshape layer. Kindly refer to NeuralNetwork.proto for details. Parameters ---------- name: str The name of this layer. target_shape: tuple Shape of the output blob. The product of target_shape must be equal to the shape of the input blob. Can be either length 3 (C,H,W) or length 4 (Seq,C,H,W). mode: int - If mode == 0, the reshape layer is in CHANNEL_FIRST mode. - If mode == 1, the reshape layer is in CHANNEL_LAST mode. input_name: str The input blob name of this layer. output_name: str The output blob name of this layer. See Also -------- add_flatten, add_permute
[ "Add", "a", "reshape", "layer", ".", "Kindly", "refer", "to", "NeuralNetwork", ".", "proto", "for", "details", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py#L2048-L2094
28,798
apple/turicreate
src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py
NeuralNetworkBuilder.add_l2_normalize
def add_l2_normalize(self, name, input_name, output_name, epsilon = 1e-5): """ Add L2 normalize layer. Normalizes the input by the L2 norm, i.e. divides by the the square root of the sum of squares of all elements of the input along C, H and W dimensions. Parameters ---------- name: str The name of this layer. input_name: str The input blob name of this layer. output_name: str The output blob name of this layer. epsilon: float small bias to avoid division by zero. See Also -------- add_mvn, add_lrn """ spec = self.spec nn_spec = self.nn_spec # Add a new layer spec_layer = nn_spec.layers.add() spec_layer.name = name spec_layer.input.append(input_name) spec_layer.output.append(output_name) spec_layer_params = spec_layer.l2normalize spec_layer_params.epsilon = epsilon
python
def add_l2_normalize(self, name, input_name, output_name, epsilon = 1e-5): """ Add L2 normalize layer. Normalizes the input by the L2 norm, i.e. divides by the the square root of the sum of squares of all elements of the input along C, H and W dimensions. Parameters ---------- name: str The name of this layer. input_name: str The input blob name of this layer. output_name: str The output blob name of this layer. epsilon: float small bias to avoid division by zero. See Also -------- add_mvn, add_lrn """ spec = self.spec nn_spec = self.nn_spec # Add a new layer spec_layer = nn_spec.layers.add() spec_layer.name = name spec_layer.input.append(input_name) spec_layer.output.append(output_name) spec_layer_params = spec_layer.l2normalize spec_layer_params.epsilon = epsilon
[ "def", "add_l2_normalize", "(", "self", ",", "name", ",", "input_name", ",", "output_name", ",", "epsilon", "=", "1e-5", ")", ":", "spec", "=", "self", ".", "spec", "nn_spec", "=", "self", ".", "nn_spec", "# Add a new layer", "spec_layer", "=", "nn_spec", ".", "layers", ".", "add", "(", ")", "spec_layer", ".", "name", "=", "name", "spec_layer", ".", "input", ".", "append", "(", "input_name", ")", "spec_layer", ".", "output", ".", "append", "(", "output_name", ")", "spec_layer_params", "=", "spec_layer", ".", "l2normalize", "spec_layer_params", ".", "epsilon", "=", "epsilon" ]
Add L2 normalize layer. Normalizes the input by the L2 norm, i.e. divides by the the square root of the sum of squares of all elements of the input along C, H and W dimensions. Parameters ---------- name: str The name of this layer. input_name: str The input blob name of this layer. output_name: str The output blob name of this layer. epsilon: float small bias to avoid division by zero. See Also -------- add_mvn, add_lrn
[ "Add", "L2", "normalize", "layer", ".", "Normalizes", "the", "input", "by", "the", "L2", "norm", "i", ".", "e", ".", "divides", "by", "the", "the", "square", "root", "of", "the", "sum", "of", "squares", "of", "all", "elements", "of", "the", "input", "along", "C", "H", "and", "W", "dimensions", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py#L2271-L2305
28,799
apple/turicreate
src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py
NeuralNetworkBuilder.add_split
def add_split(self, name, input_name, output_names): """ Add a Split layer that uniformly splits the input along the channel dimension to produce multiple outputs. Parameters ---------- name: str The name of this layer. input_name: str The input blob name of this layer. output_names: [str] List of output blob names of this layer. See Also -------- add_elementwise """ spec = self.spec nn_spec = self.nn_spec # Add a new layer spec_layer = nn_spec.layers.add() spec_layer.name = name spec_layer.input.append(input_name) spec_layer.output.extend(output_names) spec_layer_params = spec_layer.split spec_layer_params.nOutputs = len(output_names)
python
def add_split(self, name, input_name, output_names): """ Add a Split layer that uniformly splits the input along the channel dimension to produce multiple outputs. Parameters ---------- name: str The name of this layer. input_name: str The input blob name of this layer. output_names: [str] List of output blob names of this layer. See Also -------- add_elementwise """ spec = self.spec nn_spec = self.nn_spec # Add a new layer spec_layer = nn_spec.layers.add() spec_layer.name = name spec_layer.input.append(input_name) spec_layer.output.extend(output_names) spec_layer_params = spec_layer.split spec_layer_params.nOutputs = len(output_names)
[ "def", "add_split", "(", "self", ",", "name", ",", "input_name", ",", "output_names", ")", ":", "spec", "=", "self", ".", "spec", "nn_spec", "=", "self", ".", "nn_spec", "# Add a new layer", "spec_layer", "=", "nn_spec", ".", "layers", ".", "add", "(", ")", "spec_layer", ".", "name", "=", "name", "spec_layer", ".", "input", ".", "append", "(", "input_name", ")", "spec_layer", ".", "output", ".", "extend", "(", "output_names", ")", "spec_layer_params", "=", "spec_layer", ".", "split", "spec_layer_params", ".", "nOutputs", "=", "len", "(", "output_names", ")" ]
Add a Split layer that uniformly splits the input along the channel dimension to produce multiple outputs. Parameters ---------- name: str The name of this layer. input_name: str The input blob name of this layer. output_names: [str] List of output blob names of this layer. See Also -------- add_elementwise
[ "Add", "a", "Split", "layer", "that", "uniformly", "splits", "the", "input", "along", "the", "channel", "dimension", "to", "produce", "multiple", "outputs", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py#L2379-L2409