id int32 0 252k | repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1 value | code stringlengths 75 19.8k | code_tokens list | docstring stringlengths 3 17.3k | docstring_tokens list | sha stringlengths 40 40 | url stringlengths 87 242 |
|---|---|---|---|---|---|---|---|---|---|---|---|
28,900 | apple/turicreate | deps/src/boost_1_68_0/libs/predef/tools/ci/build_log.py | BuildConsoleSummaryReport.print_action | def print_action(self, test_succeed, action):
'''
Print the detailed info of failed or always print tests.
'''
#self.info_print(">>> {0}",action.keys())
if not test_succeed or action['info']['always_show_run_output']:
output = action['output'].strip()
if output != "":
p = self.fail_print if action['result'] == 'fail' else self.p_print
self.info_print("")
self.info_print("({0}) {1}",action['info']['name'],action['info']['path'])
p("")
p("{0}",action['command'].strip())
p("")
for line in output.splitlines():
p("{0}",line.encode('utf-8')) | python | def print_action(self, test_succeed, action):
'''
Print the detailed info of failed or always print tests.
'''
#self.info_print(">>> {0}",action.keys())
if not test_succeed or action['info']['always_show_run_output']:
output = action['output'].strip()
if output != "":
p = self.fail_print if action['result'] == 'fail' else self.p_print
self.info_print("")
self.info_print("({0}) {1}",action['info']['name'],action['info']['path'])
p("")
p("{0}",action['command'].strip())
p("")
for line in output.splitlines():
p("{0}",line.encode('utf-8')) | [
"def",
"print_action",
"(",
"self",
",",
"test_succeed",
",",
"action",
")",
":",
"#self.info_print(\">>> {0}\",action.keys())",
"if",
"not",
"test_succeed",
"or",
"action",
"[",
"'info'",
"]",
"[",
"'always_show_run_output'",
"]",
":",
"output",
"=",
"action",
"[",
"'output'",
"]",
".",
"strip",
"(",
")",
"if",
"output",
"!=",
"\"\"",
":",
"p",
"=",
"self",
".",
"fail_print",
"if",
"action",
"[",
"'result'",
"]",
"==",
"'fail'",
"else",
"self",
".",
"p_print",
"self",
".",
"info_print",
"(",
"\"\"",
")",
"self",
".",
"info_print",
"(",
"\"({0}) {1}\"",
",",
"action",
"[",
"'info'",
"]",
"[",
"'name'",
"]",
",",
"action",
"[",
"'info'",
"]",
"[",
"'path'",
"]",
")",
"p",
"(",
"\"\"",
")",
"p",
"(",
"\"{0}\"",
",",
"action",
"[",
"'command'",
"]",
".",
"strip",
"(",
")",
")",
"p",
"(",
"\"\"",
")",
"for",
"line",
"in",
"output",
".",
"splitlines",
"(",
")",
":",
"p",
"(",
"\"{0}\"",
",",
"line",
".",
"encode",
"(",
"'utf-8'",
")",
")"
] | Print the detailed info of failed or always print tests. | [
"Print",
"the",
"detailed",
"info",
"of",
"failed",
"or",
"always",
"print",
"tests",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/libs/predef/tools/ci/build_log.py#L363-L378 |
28,901 | apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/converters/sklearn/_SVC.py | _generate_base_svm_classifier_spec | def _generate_base_svm_classifier_spec(model):
"""
Takes an SVM classifier produces a starting spec using the parts. that are
shared between all SVMs.
"""
if not(_HAS_SKLEARN):
raise RuntimeError('scikit-learn not found. scikit-learn conversion API is disabled.')
check_fitted(model, lambda m: hasattr(m, 'support_vectors_'))
spec = _Model_pb2.Model()
spec.specificationVersion = SPECIFICATION_VERSION
svm = spec.supportVectorClassifier
_set_kernel(model, svm)
for cur_rho in model.intercept_:
if(len(model.classes_) == 2):
# For some reason Scikit Learn doesn't negate for binary classification
svm.rho.append(cur_rho)
else:
svm.rho.append(-cur_rho)
for i in range(len(model._dual_coef_)):
svm.coefficients.add()
for cur_alpha in model._dual_coef_[i]:
svm.coefficients[i].alpha.append(cur_alpha)
for cur_src_vector in model.support_vectors_:
cur_dest_vector = svm.denseSupportVectors.vectors.add()
for i in cur_src_vector:
cur_dest_vector.values.append(i)
return spec | python | def _generate_base_svm_classifier_spec(model):
"""
Takes an SVM classifier produces a starting spec using the parts. that are
shared between all SVMs.
"""
if not(_HAS_SKLEARN):
raise RuntimeError('scikit-learn not found. scikit-learn conversion API is disabled.')
check_fitted(model, lambda m: hasattr(m, 'support_vectors_'))
spec = _Model_pb2.Model()
spec.specificationVersion = SPECIFICATION_VERSION
svm = spec.supportVectorClassifier
_set_kernel(model, svm)
for cur_rho in model.intercept_:
if(len(model.classes_) == 2):
# For some reason Scikit Learn doesn't negate for binary classification
svm.rho.append(cur_rho)
else:
svm.rho.append(-cur_rho)
for i in range(len(model._dual_coef_)):
svm.coefficients.add()
for cur_alpha in model._dual_coef_[i]:
svm.coefficients[i].alpha.append(cur_alpha)
for cur_src_vector in model.support_vectors_:
cur_dest_vector = svm.denseSupportVectors.vectors.add()
for i in cur_src_vector:
cur_dest_vector.values.append(i)
return spec | [
"def",
"_generate_base_svm_classifier_spec",
"(",
"model",
")",
":",
"if",
"not",
"(",
"_HAS_SKLEARN",
")",
":",
"raise",
"RuntimeError",
"(",
"'scikit-learn not found. scikit-learn conversion API is disabled.'",
")",
"check_fitted",
"(",
"model",
",",
"lambda",
"m",
":",
"hasattr",
"(",
"m",
",",
"'support_vectors_'",
")",
")",
"spec",
"=",
"_Model_pb2",
".",
"Model",
"(",
")",
"spec",
".",
"specificationVersion",
"=",
"SPECIFICATION_VERSION",
"svm",
"=",
"spec",
".",
"supportVectorClassifier",
"_set_kernel",
"(",
"model",
",",
"svm",
")",
"for",
"cur_rho",
"in",
"model",
".",
"intercept_",
":",
"if",
"(",
"len",
"(",
"model",
".",
"classes_",
")",
"==",
"2",
")",
":",
"# For some reason Scikit Learn doesn't negate for binary classification",
"svm",
".",
"rho",
".",
"append",
"(",
"cur_rho",
")",
"else",
":",
"svm",
".",
"rho",
".",
"append",
"(",
"-",
"cur_rho",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"model",
".",
"_dual_coef_",
")",
")",
":",
"svm",
".",
"coefficients",
".",
"add",
"(",
")",
"for",
"cur_alpha",
"in",
"model",
".",
"_dual_coef_",
"[",
"i",
"]",
":",
"svm",
".",
"coefficients",
"[",
"i",
"]",
".",
"alpha",
".",
"append",
"(",
"cur_alpha",
")",
"for",
"cur_src_vector",
"in",
"model",
".",
"support_vectors_",
":",
"cur_dest_vector",
"=",
"svm",
".",
"denseSupportVectors",
".",
"vectors",
".",
"add",
"(",
")",
"for",
"i",
"in",
"cur_src_vector",
":",
"cur_dest_vector",
".",
"values",
".",
"append",
"(",
"i",
")",
"return",
"spec"
] | Takes an SVM classifier produces a starting spec using the parts. that are
shared between all SVMs. | [
"Takes",
"an",
"SVM",
"classifier",
"produces",
"a",
"starting",
"spec",
"using",
"the",
"parts",
".",
"that",
"are",
"shared",
"between",
"all",
"SVMs",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/converters/sklearn/_SVC.py#L24-L56 |
28,902 | apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/converters/keras/_topology.py | NetGraph._insert_layer_after | def _insert_layer_after(self, layer_idx, new_layer, new_keras_layer):
"""
Insert the new_layer after layer, whose position is layer_idx. The new layer's
parameter is stored in a Keras layer called new_keras_layer
"""
# reminder: new_keras_layer is not part of the original Keras network,
# so it's input / output blob information is missing. It serves only as
# a parameter holder.
layer = self.layer_list[layer_idx]
self.layer_list.insert(layer_idx+1, new_layer)
self.keras_layer_map[new_layer] = new_keras_layer
successors = self.get_successors(layer)
# add edge layer -> new_layer
self._add_edge(layer, new_layer)
# add edges new_layer -> layer_successor, remove layer -> successor
for succ in successors:
self._add_edge(new_layer, succ)
self._remove_edge(layer, succ) | python | def _insert_layer_after(self, layer_idx, new_layer, new_keras_layer):
"""
Insert the new_layer after layer, whose position is layer_idx. The new layer's
parameter is stored in a Keras layer called new_keras_layer
"""
# reminder: new_keras_layer is not part of the original Keras network,
# so it's input / output blob information is missing. It serves only as
# a parameter holder.
layer = self.layer_list[layer_idx]
self.layer_list.insert(layer_idx+1, new_layer)
self.keras_layer_map[new_layer] = new_keras_layer
successors = self.get_successors(layer)
# add edge layer -> new_layer
self._add_edge(layer, new_layer)
# add edges new_layer -> layer_successor, remove layer -> successor
for succ in successors:
self._add_edge(new_layer, succ)
self._remove_edge(layer, succ) | [
"def",
"_insert_layer_after",
"(",
"self",
",",
"layer_idx",
",",
"new_layer",
",",
"new_keras_layer",
")",
":",
"# reminder: new_keras_layer is not part of the original Keras network,",
"# so it's input / output blob information is missing. It serves only as",
"# a parameter holder.",
"layer",
"=",
"self",
".",
"layer_list",
"[",
"layer_idx",
"]",
"self",
".",
"layer_list",
".",
"insert",
"(",
"layer_idx",
"+",
"1",
",",
"new_layer",
")",
"self",
".",
"keras_layer_map",
"[",
"new_layer",
"]",
"=",
"new_keras_layer",
"successors",
"=",
"self",
".",
"get_successors",
"(",
"layer",
")",
"# add edge layer -> new_layer",
"self",
".",
"_add_edge",
"(",
"layer",
",",
"new_layer",
")",
"# add edges new_layer -> layer_successor, remove layer -> successor",
"for",
"succ",
"in",
"successors",
":",
"self",
".",
"_add_edge",
"(",
"new_layer",
",",
"succ",
")",
"self",
".",
"_remove_edge",
"(",
"layer",
",",
"succ",
")"
] | Insert the new_layer after layer, whose position is layer_idx. The new layer's
parameter is stored in a Keras layer called new_keras_layer | [
"Insert",
"the",
"new_layer",
"after",
"layer",
"whose",
"position",
"is",
"layer_idx",
".",
"The",
"new",
"layer",
"s",
"parameter",
"is",
"stored",
"in",
"a",
"Keras",
"layer",
"called",
"new_keras_layer"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/converters/keras/_topology.py#L361-L378 |
28,903 | apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/converters/keras/_topology.py | NetGraph._insert_layer_between | def _insert_layer_between(self, src, snk, new_layer, new_keras_layer):
"""
Insert the new_layer before layer, whose position is layer_idx. The new layer's
parameter is stored in a Keras layer called new_keras_layer
"""
if snk is None:
insert_pos = self.layer_list.index(src) + 1
else:
insert_pos = self.layer_list.index(snk) # insert position
self.layer_list.insert(insert_pos, new_layer)
self.keras_layer_map[new_layer] = new_keras_layer
if src is None: # snk is an input layer
self._add_edge(new_layer, snk)
elif snk is None: # src is an output layer
self._add_edge(src, new_layer)
else:
self._add_edge(src, new_layer)
self._add_edge(new_layer, snk)
self._remove_edge(src, snk) | python | def _insert_layer_between(self, src, snk, new_layer, new_keras_layer):
"""
Insert the new_layer before layer, whose position is layer_idx. The new layer's
parameter is stored in a Keras layer called new_keras_layer
"""
if snk is None:
insert_pos = self.layer_list.index(src) + 1
else:
insert_pos = self.layer_list.index(snk) # insert position
self.layer_list.insert(insert_pos, new_layer)
self.keras_layer_map[new_layer] = new_keras_layer
if src is None: # snk is an input layer
self._add_edge(new_layer, snk)
elif snk is None: # src is an output layer
self._add_edge(src, new_layer)
else:
self._add_edge(src, new_layer)
self._add_edge(new_layer, snk)
self._remove_edge(src, snk) | [
"def",
"_insert_layer_between",
"(",
"self",
",",
"src",
",",
"snk",
",",
"new_layer",
",",
"new_keras_layer",
")",
":",
"if",
"snk",
"is",
"None",
":",
"insert_pos",
"=",
"self",
".",
"layer_list",
".",
"index",
"(",
"src",
")",
"+",
"1",
"else",
":",
"insert_pos",
"=",
"self",
".",
"layer_list",
".",
"index",
"(",
"snk",
")",
"# insert position",
"self",
".",
"layer_list",
".",
"insert",
"(",
"insert_pos",
",",
"new_layer",
")",
"self",
".",
"keras_layer_map",
"[",
"new_layer",
"]",
"=",
"new_keras_layer",
"if",
"src",
"is",
"None",
":",
"# snk is an input layer",
"self",
".",
"_add_edge",
"(",
"new_layer",
",",
"snk",
")",
"elif",
"snk",
"is",
"None",
":",
"# src is an output layer",
"self",
".",
"_add_edge",
"(",
"src",
",",
"new_layer",
")",
"else",
":",
"self",
".",
"_add_edge",
"(",
"src",
",",
"new_layer",
")",
"self",
".",
"_add_edge",
"(",
"new_layer",
",",
"snk",
")",
"self",
".",
"_remove_edge",
"(",
"src",
",",
"snk",
")"
] | Insert the new_layer before layer, whose position is layer_idx. The new layer's
parameter is stored in a Keras layer called new_keras_layer | [
"Insert",
"the",
"new_layer",
"before",
"layer",
"whose",
"position",
"is",
"layer_idx",
".",
"The",
"new",
"layer",
"s",
"parameter",
"is",
"stored",
"in",
"a",
"Keras",
"layer",
"called",
"new_keras_layer"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/converters/keras/_topology.py#L380-L398 |
28,904 | apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/converters/keras/_topology.py | NetGraph.insert_1d_permute_layers | def insert_1d_permute_layers(self):
"""
Insert permutation layers before a 1D start point or after 1D end point
"""
idx, nb_layers = 0, len(self.layer_list)
in_edges, out_edges = self._get_1d_interface_edges()
# Hacky Warning: (1) use a 4-D permute, which is not likely to happen in Keras,
# to represent actual permutation needed for (seq, c, h, w) in CoreML
# (2) Assume 2-D input shape has meaning (seq, c), and during CoreML runtime,
# it is represented as 4D blob, (seq, c, h, w)
for in_edge in in_edges:
src, snk = in_edge
if src is None:
permute_layer = '_permute_' + snk
else:
permute_layer = src + '_permute_' + snk
keras_permute = _keras.layers.Permute(dims=(3,1,2,0)) # assume w = 1, switch seq and w
self._insert_layer_between(src, snk, permute_layer, keras_permute)
for out_edge in out_edges:
src, snk = out_edge
if snk is None:
permute_layer = src + '_permute_'
else:
permute_layer = src + '_permute_' + snk
keras_permute = _keras.layers.Permute(dims=(3,1,2,0)) # assume w = 1, switch seq and w back
self._insert_layer_between(src, snk, permute_layer, keras_permute) | python | def insert_1d_permute_layers(self):
"""
Insert permutation layers before a 1D start point or after 1D end point
"""
idx, nb_layers = 0, len(self.layer_list)
in_edges, out_edges = self._get_1d_interface_edges()
# Hacky Warning: (1) use a 4-D permute, which is not likely to happen in Keras,
# to represent actual permutation needed for (seq, c, h, w) in CoreML
# (2) Assume 2-D input shape has meaning (seq, c), and during CoreML runtime,
# it is represented as 4D blob, (seq, c, h, w)
for in_edge in in_edges:
src, snk = in_edge
if src is None:
permute_layer = '_permute_' + snk
else:
permute_layer = src + '_permute_' + snk
keras_permute = _keras.layers.Permute(dims=(3,1,2,0)) # assume w = 1, switch seq and w
self._insert_layer_between(src, snk, permute_layer, keras_permute)
for out_edge in out_edges:
src, snk = out_edge
if snk is None:
permute_layer = src + '_permute_'
else:
permute_layer = src + '_permute_' + snk
keras_permute = _keras.layers.Permute(dims=(3,1,2,0)) # assume w = 1, switch seq and w back
self._insert_layer_between(src, snk, permute_layer, keras_permute) | [
"def",
"insert_1d_permute_layers",
"(",
"self",
")",
":",
"idx",
",",
"nb_layers",
"=",
"0",
",",
"len",
"(",
"self",
".",
"layer_list",
")",
"in_edges",
",",
"out_edges",
"=",
"self",
".",
"_get_1d_interface_edges",
"(",
")",
"# Hacky Warning: (1) use a 4-D permute, which is not likely to happen in Keras,",
"# to represent actual permutation needed for (seq, c, h, w) in CoreML",
"# (2) Assume 2-D input shape has meaning (seq, c), and during CoreML runtime,",
"# it is represented as 4D blob, (seq, c, h, w)",
"for",
"in_edge",
"in",
"in_edges",
":",
"src",
",",
"snk",
"=",
"in_edge",
"if",
"src",
"is",
"None",
":",
"permute_layer",
"=",
"'_permute_'",
"+",
"snk",
"else",
":",
"permute_layer",
"=",
"src",
"+",
"'_permute_'",
"+",
"snk",
"keras_permute",
"=",
"_keras",
".",
"layers",
".",
"Permute",
"(",
"dims",
"=",
"(",
"3",
",",
"1",
",",
"2",
",",
"0",
")",
")",
"# assume w = 1, switch seq and w",
"self",
".",
"_insert_layer_between",
"(",
"src",
",",
"snk",
",",
"permute_layer",
",",
"keras_permute",
")",
"for",
"out_edge",
"in",
"out_edges",
":",
"src",
",",
"snk",
"=",
"out_edge",
"if",
"snk",
"is",
"None",
":",
"permute_layer",
"=",
"src",
"+",
"'_permute_'",
"else",
":",
"permute_layer",
"=",
"src",
"+",
"'_permute_'",
"+",
"snk",
"keras_permute",
"=",
"_keras",
".",
"layers",
".",
"Permute",
"(",
"dims",
"=",
"(",
"3",
",",
"1",
",",
"2",
",",
"0",
")",
")",
"# assume w = 1, switch seq and w back",
"self",
".",
"_insert_layer_between",
"(",
"src",
",",
"snk",
",",
"permute_layer",
",",
"keras_permute",
")"
] | Insert permutation layers before a 1D start point or after 1D end point | [
"Insert",
"permutation",
"layers",
"before",
"a",
"1D",
"start",
"point",
"or",
"after",
"1D",
"end",
"point"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/converters/keras/_topology.py#L492-L518 |
28,905 | apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/configure.py | log_component_configuration | def log_component_configuration(component, message):
"""Report something about component configuration that the user should better know."""
assert isinstance(component, basestring)
assert isinstance(message, basestring)
__component_logs.setdefault(component, []).append(message) | python | def log_component_configuration(component, message):
"""Report something about component configuration that the user should better know."""
assert isinstance(component, basestring)
assert isinstance(message, basestring)
__component_logs.setdefault(component, []).append(message) | [
"def",
"log_component_configuration",
"(",
"component",
",",
"message",
")",
":",
"assert",
"isinstance",
"(",
"component",
",",
"basestring",
")",
"assert",
"isinstance",
"(",
"message",
",",
"basestring",
")",
"__component_logs",
".",
"setdefault",
"(",
"component",
",",
"[",
"]",
")",
".",
"append",
"(",
"message",
")"
] | Report something about component configuration that the user should better know. | [
"Report",
"something",
"about",
"component",
"configuration",
"that",
"the",
"user",
"should",
"better",
"know",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/configure.py#L52-L56 |
28,906 | apple/turicreate | src/unity/python/turicreate/toolkits/_feature_engineering/__init__.py | create | def create(dataset, transformers):
"""
Create a Transformer object to transform data for feature engineering.
Parameters
----------
dataset : SFrame
The dataset to use for training the model.
transformers: Transformer | list[Transformer]
An Transformer or a list of Transformers.
See Also
--------
turicreate.toolkits.feature_engineering._feature_engineering._TransformerBase
Examples
--------
.. sourcecode:: python
# Create data.
>>> sf = turicreate.SFrame({'a': [1,2,3], 'b' : [2,3,4]})
>>> from turicreate.feature_engineering import FeatureHasher, \
QuadraticFeatures, OneHotEncoder
# Create a single transformer.
>>> encoder = turicreate.feature_engineering.create(sf,
OneHotEncoder(max_categories = 10))
# Create a chain of transformers.
>>> chain = turicreate.feature_engineering.create(sf, [
QuadraticFeatures(),
FeatureHasher()
])
# Create a chain of transformers with names for each of the steps.
>>> chain = turicreate.feature_engineering.create(sf, [
('quadratic', QuadraticFeatures()),
('hasher', FeatureHasher())
])
"""
err_msg = "The parameters 'transformers' must be a valid Transformer object."
cls = transformers.__class__
_raise_error_if_not_sframe(dataset, "dataset")
# List of transformers.
if (cls == list):
transformers = TransformerChain(transformers)
# Transformer.
else:
if not issubclass(cls, TransformerBase):
raise TypeError(err_msg)
# Fit and return
transformers.fit(dataset)
return transformers | python | def create(dataset, transformers):
"""
Create a Transformer object to transform data for feature engineering.
Parameters
----------
dataset : SFrame
The dataset to use for training the model.
transformers: Transformer | list[Transformer]
An Transformer or a list of Transformers.
See Also
--------
turicreate.toolkits.feature_engineering._feature_engineering._TransformerBase
Examples
--------
.. sourcecode:: python
# Create data.
>>> sf = turicreate.SFrame({'a': [1,2,3], 'b' : [2,3,4]})
>>> from turicreate.feature_engineering import FeatureHasher, \
QuadraticFeatures, OneHotEncoder
# Create a single transformer.
>>> encoder = turicreate.feature_engineering.create(sf,
OneHotEncoder(max_categories = 10))
# Create a chain of transformers.
>>> chain = turicreate.feature_engineering.create(sf, [
QuadraticFeatures(),
FeatureHasher()
])
# Create a chain of transformers with names for each of the steps.
>>> chain = turicreate.feature_engineering.create(sf, [
('quadratic', QuadraticFeatures()),
('hasher', FeatureHasher())
])
"""
err_msg = "The parameters 'transformers' must be a valid Transformer object."
cls = transformers.__class__
_raise_error_if_not_sframe(dataset, "dataset")
# List of transformers.
if (cls == list):
transformers = TransformerChain(transformers)
# Transformer.
else:
if not issubclass(cls, TransformerBase):
raise TypeError(err_msg)
# Fit and return
transformers.fit(dataset)
return transformers | [
"def",
"create",
"(",
"dataset",
",",
"transformers",
")",
":",
"err_msg",
"=",
"\"The parameters 'transformers' must be a valid Transformer object.\"",
"cls",
"=",
"transformers",
".",
"__class__",
"_raise_error_if_not_sframe",
"(",
"dataset",
",",
"\"dataset\"",
")",
"# List of transformers.",
"if",
"(",
"cls",
"==",
"list",
")",
":",
"transformers",
"=",
"TransformerChain",
"(",
"transformers",
")",
"# Transformer.",
"else",
":",
"if",
"not",
"issubclass",
"(",
"cls",
",",
"TransformerBase",
")",
":",
"raise",
"TypeError",
"(",
"err_msg",
")",
"# Fit and return",
"transformers",
".",
"fit",
"(",
"dataset",
")",
"return",
"transformers"
] | Create a Transformer object to transform data for feature engineering.
Parameters
----------
dataset : SFrame
The dataset to use for training the model.
transformers: Transformer | list[Transformer]
An Transformer or a list of Transformers.
See Also
--------
turicreate.toolkits.feature_engineering._feature_engineering._TransformerBase
Examples
--------
.. sourcecode:: python
# Create data.
>>> sf = turicreate.SFrame({'a': [1,2,3], 'b' : [2,3,4]})
>>> from turicreate.feature_engineering import FeatureHasher, \
QuadraticFeatures, OneHotEncoder
# Create a single transformer.
>>> encoder = turicreate.feature_engineering.create(sf,
OneHotEncoder(max_categories = 10))
# Create a chain of transformers.
>>> chain = turicreate.feature_engineering.create(sf, [
QuadraticFeatures(),
FeatureHasher()
])
# Create a chain of transformers with names for each of the steps.
>>> chain = turicreate.feature_engineering.create(sf, [
('quadratic', QuadraticFeatures()),
('hasher', FeatureHasher())
]) | [
"Create",
"a",
"Transformer",
"object",
"to",
"transform",
"data",
"for",
"feature",
"engineering",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_feature_engineering/__init__.py#L47-L106 |
28,907 | apple/turicreate | src/unity/python/turicreate/toolkits/sound_classifier/_audio_feature_extractor.py | VGGishFeatureExtractor._preprocess_data | def _preprocess_data(audio_data, verbose=True):
'''
Preprocess each example, breaking it up into frames.
Returns two numpy arrays: preprocessed frame and their indexes
'''
from .vggish_input import waveform_to_examples
last_progress_update = _time.time()
progress_header_printed = False
# Can't run as a ".apply(...)" due to numba.jit decorator issue:
# https://github.com/apple/turicreate/issues/1216
preprocessed_data, audio_data_index = [], []
for i, audio_dict in enumerate(audio_data):
scaled_data = audio_dict['data'] / 32768.0
data = waveform_to_examples(scaled_data, audio_dict['sample_rate'])
for j in data:
preprocessed_data.append([j])
audio_data_index.append(i)
# If `verbose` is set, print an progress update about every 20s
if verbose and _time.time() - last_progress_update >= 20:
if not progress_header_printed:
print("Preprocessing audio data -")
progress_header_printed = True
print("Preprocessed {} of {} examples".format(i, len(audio_data)))
last_progress_update = _time.time()
if progress_header_printed:
print("Preprocessed {} of {} examples\n".format(len(audio_data), len(audio_data)))
return _np.asarray(preprocessed_data), audio_data_index | python | def _preprocess_data(audio_data, verbose=True):
'''
Preprocess each example, breaking it up into frames.
Returns two numpy arrays: preprocessed frame and their indexes
'''
from .vggish_input import waveform_to_examples
last_progress_update = _time.time()
progress_header_printed = False
# Can't run as a ".apply(...)" due to numba.jit decorator issue:
# https://github.com/apple/turicreate/issues/1216
preprocessed_data, audio_data_index = [], []
for i, audio_dict in enumerate(audio_data):
scaled_data = audio_dict['data'] / 32768.0
data = waveform_to_examples(scaled_data, audio_dict['sample_rate'])
for j in data:
preprocessed_data.append([j])
audio_data_index.append(i)
# If `verbose` is set, print an progress update about every 20s
if verbose and _time.time() - last_progress_update >= 20:
if not progress_header_printed:
print("Preprocessing audio data -")
progress_header_printed = True
print("Preprocessed {} of {} examples".format(i, len(audio_data)))
last_progress_update = _time.time()
if progress_header_printed:
print("Preprocessed {} of {} examples\n".format(len(audio_data), len(audio_data)))
return _np.asarray(preprocessed_data), audio_data_index | [
"def",
"_preprocess_data",
"(",
"audio_data",
",",
"verbose",
"=",
"True",
")",
":",
"from",
".",
"vggish_input",
"import",
"waveform_to_examples",
"last_progress_update",
"=",
"_time",
".",
"time",
"(",
")",
"progress_header_printed",
"=",
"False",
"# Can't run as a \".apply(...)\" due to numba.jit decorator issue:",
"# https://github.com/apple/turicreate/issues/1216",
"preprocessed_data",
",",
"audio_data_index",
"=",
"[",
"]",
",",
"[",
"]",
"for",
"i",
",",
"audio_dict",
"in",
"enumerate",
"(",
"audio_data",
")",
":",
"scaled_data",
"=",
"audio_dict",
"[",
"'data'",
"]",
"/",
"32768.0",
"data",
"=",
"waveform_to_examples",
"(",
"scaled_data",
",",
"audio_dict",
"[",
"'sample_rate'",
"]",
")",
"for",
"j",
"in",
"data",
":",
"preprocessed_data",
".",
"append",
"(",
"[",
"j",
"]",
")",
"audio_data_index",
".",
"append",
"(",
"i",
")",
"# If `verbose` is set, print an progress update about every 20s",
"if",
"verbose",
"and",
"_time",
".",
"time",
"(",
")",
"-",
"last_progress_update",
">=",
"20",
":",
"if",
"not",
"progress_header_printed",
":",
"print",
"(",
"\"Preprocessing audio data -\"",
")",
"progress_header_printed",
"=",
"True",
"print",
"(",
"\"Preprocessed {} of {} examples\"",
".",
"format",
"(",
"i",
",",
"len",
"(",
"audio_data",
")",
")",
")",
"last_progress_update",
"=",
"_time",
".",
"time",
"(",
")",
"if",
"progress_header_printed",
":",
"print",
"(",
"\"Preprocessed {} of {} examples\\n\"",
".",
"format",
"(",
"len",
"(",
"audio_data",
")",
",",
"len",
"(",
"audio_data",
")",
")",
")",
"return",
"_np",
".",
"asarray",
"(",
"preprocessed_data",
")",
",",
"audio_data_index"
] | Preprocess each example, breaking it up into frames.
Returns two numpy arrays: preprocessed frame and their indexes | [
"Preprocess",
"each",
"example",
"breaking",
"it",
"up",
"into",
"frames",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/sound_classifier/_audio_feature_extractor.py#L40-L72 |
28,908 | apple/turicreate | src/unity/python/turicreate/toolkits/sound_classifier/_audio_feature_extractor.py | VGGishFeatureExtractor.get_deep_features | def get_deep_features(self, audio_data, verbose):
'''
Performs both audio preprocessing and VGGish deep feature extraction.
'''
preprocessed_data, row_ids = self._preprocess_data(audio_data, verbose)
deep_features = self._extract_features(preprocessed_data, verbose)
output = _tc.SFrame({'deep features': deep_features, 'row id': row_ids})
output = output.unstack('deep features')
max_row_id = len(audio_data)
missing_ids = set(range(max_row_id)) - set(output['row id'].unique())
if len(missing_ids) != 0:
empty_rows = _tc.SFrame({'List of deep features': [ [] for _ in range(len(missing_ids)) ],
'row id': missing_ids})
output = output.append(empty_rows)
output = output.sort('row id')
return output['List of deep features'] | python | def get_deep_features(self, audio_data, verbose):
'''
Performs both audio preprocessing and VGGish deep feature extraction.
'''
preprocessed_data, row_ids = self._preprocess_data(audio_data, verbose)
deep_features = self._extract_features(preprocessed_data, verbose)
output = _tc.SFrame({'deep features': deep_features, 'row id': row_ids})
output = output.unstack('deep features')
max_row_id = len(audio_data)
missing_ids = set(range(max_row_id)) - set(output['row id'].unique())
if len(missing_ids) != 0:
empty_rows = _tc.SFrame({'List of deep features': [ [] for _ in range(len(missing_ids)) ],
'row id': missing_ids})
output = output.append(empty_rows)
output = output.sort('row id')
return output['List of deep features'] | [
"def",
"get_deep_features",
"(",
"self",
",",
"audio_data",
",",
"verbose",
")",
":",
"preprocessed_data",
",",
"row_ids",
"=",
"self",
".",
"_preprocess_data",
"(",
"audio_data",
",",
"verbose",
")",
"deep_features",
"=",
"self",
".",
"_extract_features",
"(",
"preprocessed_data",
",",
"verbose",
")",
"output",
"=",
"_tc",
".",
"SFrame",
"(",
"{",
"'deep features'",
":",
"deep_features",
",",
"'row id'",
":",
"row_ids",
"}",
")",
"output",
"=",
"output",
".",
"unstack",
"(",
"'deep features'",
")",
"max_row_id",
"=",
"len",
"(",
"audio_data",
")",
"missing_ids",
"=",
"set",
"(",
"range",
"(",
"max_row_id",
")",
")",
"-",
"set",
"(",
"output",
"[",
"'row id'",
"]",
".",
"unique",
"(",
")",
")",
"if",
"len",
"(",
"missing_ids",
")",
"!=",
"0",
":",
"empty_rows",
"=",
"_tc",
".",
"SFrame",
"(",
"{",
"'List of deep features'",
":",
"[",
"[",
"]",
"for",
"_",
"in",
"range",
"(",
"len",
"(",
"missing_ids",
")",
")",
"]",
",",
"'row id'",
":",
"missing_ids",
"}",
")",
"output",
"=",
"output",
".",
"append",
"(",
"empty_rows",
")",
"output",
"=",
"output",
".",
"sort",
"(",
"'row id'",
")",
"return",
"output",
"[",
"'List of deep features'",
"]"
] | Performs both audio preprocessing and VGGish deep feature extraction. | [
"Performs",
"both",
"audio",
"preprocessing",
"and",
"VGGish",
"deep",
"feature",
"extraction",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/sound_classifier/_audio_feature_extractor.py#L172-L190 |
28,909 | apple/turicreate | src/unity/python/turicreate/toolkits/sound_classifier/_audio_feature_extractor.py | VGGishFeatureExtractor.get_spec | def get_spec(self):
"""
Return the Core ML spec
"""
if _mac_ver() >= (10, 14):
return self.vggish_model.get_spec()
else:
vggish_model_file = VGGish()
coreml_model_path = vggish_model_file.get_model_path(format='coreml')
return MLModel(coreml_model_path).get_spec() | python | def get_spec(self):
"""
Return the Core ML spec
"""
if _mac_ver() >= (10, 14):
return self.vggish_model.get_spec()
else:
vggish_model_file = VGGish()
coreml_model_path = vggish_model_file.get_model_path(format='coreml')
return MLModel(coreml_model_path).get_spec() | [
"def",
"get_spec",
"(",
"self",
")",
":",
"if",
"_mac_ver",
"(",
")",
">=",
"(",
"10",
",",
"14",
")",
":",
"return",
"self",
".",
"vggish_model",
".",
"get_spec",
"(",
")",
"else",
":",
"vggish_model_file",
"=",
"VGGish",
"(",
")",
"coreml_model_path",
"=",
"vggish_model_file",
".",
"get_model_path",
"(",
"format",
"=",
"'coreml'",
")",
"return",
"MLModel",
"(",
"coreml_model_path",
")",
".",
"get_spec",
"(",
")"
] | Return the Core ML spec | [
"Return",
"the",
"Core",
"ML",
"spec"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/sound_classifier/_audio_feature_extractor.py#L192-L201 |
28,910 | apple/turicreate | deps/src/boost_1_68_0/tools/build/src/util/__init__.py | value_to_jam | def value_to_jam(value, methods=False):
"""Makes a token to refer to a Python value inside Jam language code.
The token is merely a string that can be passed around in Jam code and
eventually passed back. For example, we might want to pass PropertySet
instance to a tag function and it might eventually call back
to virtual_target.add_suffix_and_prefix, passing the same instance.
For values that are classes, we'll also make class methods callable
from Jam.
Note that this is necessary to make a bit more of existing Jamfiles work.
This trick should not be used to much, or else the performance benefits of
Python port will be eaten.
"""
global __value_id
r = __python_to_jam.get(value, None)
if r:
return r
exported_name = '###_' + str(__value_id)
__value_id = __value_id + 1
__python_to_jam[value] = exported_name
__jam_to_python[exported_name] = value
if methods and type(value) == types.InstanceType:
for field_name in dir(value):
field = getattr(value, field_name)
if callable(field) and not field_name.startswith("__"):
bjam.import_rule("", exported_name + "." + field_name, field)
return exported_name | python | def value_to_jam(value, methods=False):
"""Makes a token to refer to a Python value inside Jam language code.
The token is merely a string that can be passed around in Jam code and
eventually passed back. For example, we might want to pass PropertySet
instance to a tag function and it might eventually call back
to virtual_target.add_suffix_and_prefix, passing the same instance.
For values that are classes, we'll also make class methods callable
from Jam.
Note that this is necessary to make a bit more of existing Jamfiles work.
This trick should not be used to much, or else the performance benefits of
Python port will be eaten.
"""
global __value_id
r = __python_to_jam.get(value, None)
if r:
return r
exported_name = '###_' + str(__value_id)
__value_id = __value_id + 1
__python_to_jam[value] = exported_name
__jam_to_python[exported_name] = value
if methods and type(value) == types.InstanceType:
for field_name in dir(value):
field = getattr(value, field_name)
if callable(field) and not field_name.startswith("__"):
bjam.import_rule("", exported_name + "." + field_name, field)
return exported_name | [
"def",
"value_to_jam",
"(",
"value",
",",
"methods",
"=",
"False",
")",
":",
"global",
"__value_id",
"r",
"=",
"__python_to_jam",
".",
"get",
"(",
"value",
",",
"None",
")",
"if",
"r",
":",
"return",
"r",
"exported_name",
"=",
"'###_'",
"+",
"str",
"(",
"__value_id",
")",
"__value_id",
"=",
"__value_id",
"+",
"1",
"__python_to_jam",
"[",
"value",
"]",
"=",
"exported_name",
"__jam_to_python",
"[",
"exported_name",
"]",
"=",
"value",
"if",
"methods",
"and",
"type",
"(",
"value",
")",
"==",
"types",
".",
"InstanceType",
":",
"for",
"field_name",
"in",
"dir",
"(",
"value",
")",
":",
"field",
"=",
"getattr",
"(",
"value",
",",
"field_name",
")",
"if",
"callable",
"(",
"field",
")",
"and",
"not",
"field_name",
".",
"startswith",
"(",
"\"__\"",
")",
":",
"bjam",
".",
"import_rule",
"(",
"\"\"",
",",
"exported_name",
"+",
"\".\"",
"+",
"field_name",
",",
"field",
")",
"return",
"exported_name"
] | Makes a token to refer to a Python value inside Jam language code.
The token is merely a string that can be passed around in Jam code and
eventually passed back. For example, we might want to pass PropertySet
instance to a tag function and it might eventually call back
to virtual_target.add_suffix_and_prefix, passing the same instance.
For values that are classes, we'll also make class methods callable
from Jam.
Note that this is necessary to make a bit more of existing Jamfiles work.
This trick should not be used to much, or else the performance benefits of
Python port will be eaten. | [
"Makes",
"a",
"token",
"to",
"refer",
"to",
"a",
"Python",
"value",
"inside",
"Jam",
"language",
"code",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/util/__init__.py#L228-L261 |
28,911 | apple/turicreate | deps/src/boost_1_68_0/tools/build/src/util/__init__.py | abbreviate_dashed | def abbreviate_dashed(s):
"""Abbreviates each part of string that is delimited by a '-'."""
r = []
for part in s.split('-'):
r.append(abbreviate(part))
return '-'.join(r) | python | def abbreviate_dashed(s):
"""Abbreviates each part of string that is delimited by a '-'."""
r = []
for part in s.split('-'):
r.append(abbreviate(part))
return '-'.join(r) | [
"def",
"abbreviate_dashed",
"(",
"s",
")",
":",
"r",
"=",
"[",
"]",
"for",
"part",
"in",
"s",
".",
"split",
"(",
"'-'",
")",
":",
"r",
".",
"append",
"(",
"abbreviate",
"(",
"part",
")",
")",
"return",
"'-'",
".",
"join",
"(",
"r",
")"
] | Abbreviates each part of string that is delimited by a '-'. | [
"Abbreviates",
"each",
"part",
"of",
"string",
"that",
"is",
"delimited",
"by",
"a",
"-",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/util/__init__.py#L281-L286 |
28,912 | apple/turicreate | deps/src/boost_1_68_0/tools/build/src/util/__init__.py | abbreviate | def abbreviate(s):
"""Apply a set of standard transformations to string to produce an
abbreviation no more than 4 characters long.
"""
if not s:
return ''
# check the cache
if s in abbreviate.abbreviations:
return abbreviate.abbreviations[s]
# anything less than 4 characters doesn't need
# an abbreviation
if len(s) < 4:
# update cache
abbreviate.abbreviations[s] = s
return s
# save the first character in case it's a vowel
s1 = s[0]
s2 = s[1:]
if s.endswith('ing'):
# strip off the 'ing'
s2 = s2[:-3]
# reduce all doubled characters to one
s2 = ''.join(c for c, _ in groupby(s2))
# remove all vowels
s2 = s2.translate(None, "AEIOUaeiou")
# shorten remaining consonants to 4 characters
# and add the first char back to the front
s2 = s1 + s2[:4]
# update cache
abbreviate.abbreviations[s] = s2
return s2 | python | def abbreviate(s):
"""Apply a set of standard transformations to string to produce an
abbreviation no more than 4 characters long.
"""
if not s:
return ''
# check the cache
if s in abbreviate.abbreviations:
return abbreviate.abbreviations[s]
# anything less than 4 characters doesn't need
# an abbreviation
if len(s) < 4:
# update cache
abbreviate.abbreviations[s] = s
return s
# save the first character in case it's a vowel
s1 = s[0]
s2 = s[1:]
if s.endswith('ing'):
# strip off the 'ing'
s2 = s2[:-3]
# reduce all doubled characters to one
s2 = ''.join(c for c, _ in groupby(s2))
# remove all vowels
s2 = s2.translate(None, "AEIOUaeiou")
# shorten remaining consonants to 4 characters
# and add the first char back to the front
s2 = s1 + s2[:4]
# update cache
abbreviate.abbreviations[s] = s2
return s2 | [
"def",
"abbreviate",
"(",
"s",
")",
":",
"if",
"not",
"s",
":",
"return",
"''",
"# check the cache",
"if",
"s",
"in",
"abbreviate",
".",
"abbreviations",
":",
"return",
"abbreviate",
".",
"abbreviations",
"[",
"s",
"]",
"# anything less than 4 characters doesn't need",
"# an abbreviation",
"if",
"len",
"(",
"s",
")",
"<",
"4",
":",
"# update cache",
"abbreviate",
".",
"abbreviations",
"[",
"s",
"]",
"=",
"s",
"return",
"s",
"# save the first character in case it's a vowel",
"s1",
"=",
"s",
"[",
"0",
"]",
"s2",
"=",
"s",
"[",
"1",
":",
"]",
"if",
"s",
".",
"endswith",
"(",
"'ing'",
")",
":",
"# strip off the 'ing'",
"s2",
"=",
"s2",
"[",
":",
"-",
"3",
"]",
"# reduce all doubled characters to one",
"s2",
"=",
"''",
".",
"join",
"(",
"c",
"for",
"c",
",",
"_",
"in",
"groupby",
"(",
"s2",
")",
")",
"# remove all vowels",
"s2",
"=",
"s2",
".",
"translate",
"(",
"None",
",",
"\"AEIOUaeiou\"",
")",
"# shorten remaining consonants to 4 characters",
"# and add the first char back to the front",
"s2",
"=",
"s1",
"+",
"s2",
"[",
":",
"4",
"]",
"# update cache",
"abbreviate",
".",
"abbreviations",
"[",
"s",
"]",
"=",
"s2",
"return",
"s2"
] | Apply a set of standard transformations to string to produce an
abbreviation no more than 4 characters long. | [
"Apply",
"a",
"set",
"of",
"standard",
"transformations",
"to",
"string",
"to",
"produce",
"an",
"abbreviation",
"no",
"more",
"than",
"4",
"characters",
"long",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/util/__init__.py#L289-L319 |
28,913 | apple/turicreate | src/unity/python/turicreate/toolkits/_decision_tree.py | Node.get_decision | def get_decision(self, child, is_missing = False):
"""
Get the decision from this node to a child node.
Parameters
----------
child: Node
A child node of this node.
Returns
-------
dict: A dictionary that describes how to get from this node to the
child node.
"""
# Child does exist and there is a path to the child.
value = self.value
feature = self.split_feature_column
index = self.split_feature_index
if not is_missing:
if self.left_id == child.node_id:
if self.node_type in ["float", "integer"]:
sign = "<"
else:
sign = "="
else:
if self.node_type in ["float", "integer"]:
sign = ">="
else:
sign = "!="
else:
sign = "missing"
value = None
return {
"node_id" : self.node_id,
"node_type" : self.node_type,
"feature" : feature,
"index" : index,
"sign" : sign,
"value" : value,
"child_id" : child.node_id,
"is_missing" : is_missing
} | python | def get_decision(self, child, is_missing = False):
"""
Get the decision from this node to a child node.
Parameters
----------
child: Node
A child node of this node.
Returns
-------
dict: A dictionary that describes how to get from this node to the
child node.
"""
# Child does exist and there is a path to the child.
value = self.value
feature = self.split_feature_column
index = self.split_feature_index
if not is_missing:
if self.left_id == child.node_id:
if self.node_type in ["float", "integer"]:
sign = "<"
else:
sign = "="
else:
if self.node_type in ["float", "integer"]:
sign = ">="
else:
sign = "!="
else:
sign = "missing"
value = None
return {
"node_id" : self.node_id,
"node_type" : self.node_type,
"feature" : feature,
"index" : index,
"sign" : sign,
"value" : value,
"child_id" : child.node_id,
"is_missing" : is_missing
} | [
"def",
"get_decision",
"(",
"self",
",",
"child",
",",
"is_missing",
"=",
"False",
")",
":",
"# Child does exist and there is a path to the child.",
"value",
"=",
"self",
".",
"value",
"feature",
"=",
"self",
".",
"split_feature_column",
"index",
"=",
"self",
".",
"split_feature_index",
"if",
"not",
"is_missing",
":",
"if",
"self",
".",
"left_id",
"==",
"child",
".",
"node_id",
":",
"if",
"self",
".",
"node_type",
"in",
"[",
"\"float\"",
",",
"\"integer\"",
"]",
":",
"sign",
"=",
"\"<\"",
"else",
":",
"sign",
"=",
"\"=\"",
"else",
":",
"if",
"self",
".",
"node_type",
"in",
"[",
"\"float\"",
",",
"\"integer\"",
"]",
":",
"sign",
"=",
"\">=\"",
"else",
":",
"sign",
"=",
"\"!=\"",
"else",
":",
"sign",
"=",
"\"missing\"",
"value",
"=",
"None",
"return",
"{",
"\"node_id\"",
":",
"self",
".",
"node_id",
",",
"\"node_type\"",
":",
"self",
".",
"node_type",
",",
"\"feature\"",
":",
"feature",
",",
"\"index\"",
":",
"index",
",",
"\"sign\"",
":",
"sign",
",",
"\"value\"",
":",
"value",
",",
"\"child_id\"",
":",
"child",
".",
"node_id",
",",
"\"is_missing\"",
":",
"is_missing",
"}"
] | Get the decision from this node to a child node.
Parameters
----------
child: Node
A child node of this node.
Returns
-------
dict: A dictionary that describes how to get from this node to the
child node. | [
"Get",
"the",
"decision",
"from",
"this",
"node",
"to",
"a",
"child",
"node",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_decision_tree.py#L80-L123 |
28,914 | apple/turicreate | src/unity/python/turicreate/toolkits/_decision_tree.py | Node.to_dict | def to_dict(self):
"""
Return the node as a dictionary.
Returns
-------
dict: All the attributes of this node as a dictionary (minus the left
and right).
"""
out = {}
for key in self.__dict__.keys():
if key not in ['left', 'right', 'missing', 'parent']:
out[key] = self.__dict__[key]
return out | python | def to_dict(self):
"""
Return the node as a dictionary.
Returns
-------
dict: All the attributes of this node as a dictionary (minus the left
and right).
"""
out = {}
for key in self.__dict__.keys():
if key not in ['left', 'right', 'missing', 'parent']:
out[key] = self.__dict__[key]
return out | [
"def",
"to_dict",
"(",
"self",
")",
":",
"out",
"=",
"{",
"}",
"for",
"key",
"in",
"self",
".",
"__dict__",
".",
"keys",
"(",
")",
":",
"if",
"key",
"not",
"in",
"[",
"'left'",
",",
"'right'",
",",
"'missing'",
",",
"'parent'",
"]",
":",
"out",
"[",
"key",
"]",
"=",
"self",
".",
"__dict__",
"[",
"key",
"]",
"return",
"out"
] | Return the node as a dictionary.
Returns
-------
dict: All the attributes of this node as a dictionary (minus the left
and right). | [
"Return",
"the",
"node",
"as",
"a",
"dictionary",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_decision_tree.py#L125-L138 |
28,915 | apple/turicreate | src/unity/python/turicreate/toolkits/_decision_tree.py | DecisionTree.to_json | def to_json(self, root_id = 0, output = {}):
"""
Recursive function to dump this tree as a json blob.
Parameters
----------
root_id: Root id of the sub-tree
output: Carry over output from the previous sub-trees.
Returns
-------
dict: A tree in JSON format. Starts at the root node and recursively
represents each node in JSON.
- node_id : ID of the node.
- left_id : ID of left child (None if it doesn't exist).
- right_id : ID of right child (None if it doesn't exist).
- split_feature_column : Feature column on which a decision is made.
- split_feature_index : Feature index (within that column) on which the
decision is made.
- is_leaf : Is this node a leaf node?
- node_type : Node type (categorical, numerical, leaf etc.)
- value : Prediction (if leaf), decision split point
(if not leaf).
- left : JSON representation of the left node.
- right : JSON representation of the right node.
Examples
--------
.. sourcecode:: python
>>> tree.to_json() # Leaf node
{'is_leaf': False,
'left': {'is_leaf': True,
'left_id': None,
'node_id': 115,
'node_type': u'leaf',
'parent_id': 60,
'right_id': None,
'split_feature_column': None,
'split_feature_index': None,
'value': 0.436364},
'left_id': 115,
'node_id': 60,
'node_type': u'float',
'parent_id': 29,
'right': {'is_leaf': True,
'left_id': None,
'node_id': 116,
'node_type': u'leaf',
'parent_id': 60,
'right_id': None,
'split_feature_column': None,
'split_feature_index': None,
'value': -0.105882},
'right_id': 116,
'split_feature_column': 'Quantity_features_14',
'split_feature_index': 'count_sum',
'value': 22.5}
"""
_raise_error_if_not_of_type(root_id, [int,long], "root_id")
_numeric_param_check_range("root_id", root_id, 0, self.num_nodes - 1)
node = self.nodes[root_id]
output = node.to_dict()
if node.left_id is not None:
j = node.left_id
output['left'] = self.to_json(j, output)
if node.right_id is not None:
j = node.right_id
output['right'] = self.to_json(j, output)
return output | python | def to_json(self, root_id = 0, output = {}):
"""
Recursive function to dump this tree as a json blob.
Parameters
----------
root_id: Root id of the sub-tree
output: Carry over output from the previous sub-trees.
Returns
-------
dict: A tree in JSON format. Starts at the root node and recursively
represents each node in JSON.
- node_id : ID of the node.
- left_id : ID of left child (None if it doesn't exist).
- right_id : ID of right child (None if it doesn't exist).
- split_feature_column : Feature column on which a decision is made.
- split_feature_index : Feature index (within that column) on which the
decision is made.
- is_leaf : Is this node a leaf node?
- node_type : Node type (categorical, numerical, leaf etc.)
- value : Prediction (if leaf), decision split point
(if not leaf).
- left : JSON representation of the left node.
- right : JSON representation of the right node.
Examples
--------
.. sourcecode:: python
>>> tree.to_json() # Leaf node
{'is_leaf': False,
'left': {'is_leaf': True,
'left_id': None,
'node_id': 115,
'node_type': u'leaf',
'parent_id': 60,
'right_id': None,
'split_feature_column': None,
'split_feature_index': None,
'value': 0.436364},
'left_id': 115,
'node_id': 60,
'node_type': u'float',
'parent_id': 29,
'right': {'is_leaf': True,
'left_id': None,
'node_id': 116,
'node_type': u'leaf',
'parent_id': 60,
'right_id': None,
'split_feature_column': None,
'split_feature_index': None,
'value': -0.105882},
'right_id': 116,
'split_feature_column': 'Quantity_features_14',
'split_feature_index': 'count_sum',
'value': 22.5}
"""
_raise_error_if_not_of_type(root_id, [int,long], "root_id")
_numeric_param_check_range("root_id", root_id, 0, self.num_nodes - 1)
node = self.nodes[root_id]
output = node.to_dict()
if node.left_id is not None:
j = node.left_id
output['left'] = self.to_json(j, output)
if node.right_id is not None:
j = node.right_id
output['right'] = self.to_json(j, output)
return output | [
"def",
"to_json",
"(",
"self",
",",
"root_id",
"=",
"0",
",",
"output",
"=",
"{",
"}",
")",
":",
"_raise_error_if_not_of_type",
"(",
"root_id",
",",
"[",
"int",
",",
"long",
"]",
",",
"\"root_id\"",
")",
"_numeric_param_check_range",
"(",
"\"root_id\"",
",",
"root_id",
",",
"0",
",",
"self",
".",
"num_nodes",
"-",
"1",
")",
"node",
"=",
"self",
".",
"nodes",
"[",
"root_id",
"]",
"output",
"=",
"node",
".",
"to_dict",
"(",
")",
"if",
"node",
".",
"left_id",
"is",
"not",
"None",
":",
"j",
"=",
"node",
".",
"left_id",
"output",
"[",
"'left'",
"]",
"=",
"self",
".",
"to_json",
"(",
"j",
",",
"output",
")",
"if",
"node",
".",
"right_id",
"is",
"not",
"None",
":",
"j",
"=",
"node",
".",
"right_id",
"output",
"[",
"'right'",
"]",
"=",
"self",
".",
"to_json",
"(",
"j",
",",
"output",
")",
"return",
"output"
] | Recursive function to dump this tree as a json blob.
Parameters
----------
root_id: Root id of the sub-tree
output: Carry over output from the previous sub-trees.
Returns
-------
dict: A tree in JSON format. Starts at the root node and recursively
represents each node in JSON.
- node_id : ID of the node.
- left_id : ID of left child (None if it doesn't exist).
- right_id : ID of right child (None if it doesn't exist).
- split_feature_column : Feature column on which a decision is made.
- split_feature_index : Feature index (within that column) on which the
decision is made.
- is_leaf : Is this node a leaf node?
- node_type : Node type (categorical, numerical, leaf etc.)
- value : Prediction (if leaf), decision split point
(if not leaf).
- left : JSON representation of the left node.
- right : JSON representation of the right node.
Examples
--------
.. sourcecode:: python
>>> tree.to_json() # Leaf node
{'is_leaf': False,
'left': {'is_leaf': True,
'left_id': None,
'node_id': 115,
'node_type': u'leaf',
'parent_id': 60,
'right_id': None,
'split_feature_column': None,
'split_feature_index': None,
'value': 0.436364},
'left_id': 115,
'node_id': 60,
'node_type': u'float',
'parent_id': 29,
'right': {'is_leaf': True,
'left_id': None,
'node_id': 116,
'node_type': u'leaf',
'parent_id': 60,
'right_id': None,
'split_feature_column': None,
'split_feature_index': None,
'value': -0.105882},
'right_id': 116,
'split_feature_column': 'Quantity_features_14',
'split_feature_index': 'count_sum',
'value': 22.5} | [
"Recursive",
"function",
"to",
"dump",
"this",
"tree",
"as",
"a",
"json",
"blob",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_decision_tree.py#L300-L371 |
28,916 | apple/turicreate | src/unity/python/turicreate/toolkits/_decision_tree.py | DecisionTree.get_prediction_path | def get_prediction_path(self, node_id, missing_id = []):
"""
Return the prediction path from this node to the parent node.
Parameters
----------
node_id : id of the node to get the prediction path.
missing_id : Additional info that contains nodes with missing features.
Returns
-------
list: The list of decisions (top to bottom) from the root to this node.
Examples
--------
.. sourcecode:: python
>>> tree.get_prediction_score(5) # Any node
[{'child_id': 2,
'feature': 'Quantity_features_90',
'index': 'sum_timegaplast_gap',
'node_id': 0,
'sign': '>',
'value': 53.5},
{'child_id': 5,
'feature': 'Quantity_features_90',
'index': 'sum_sum',
'node_id': 2,
'sign': '<=',
'value': 146.5}]
"""
_raise_error_if_not_of_type(node_id, [int,long], "node_id")
_numeric_param_check_range("node_id", node_id, 0, self.num_nodes - 1)
def _deduplicate_path(path):
s_nodes = {} # super_nodes
s_path = [] # paths of super nodes.
for node in path:
feature = node['feature']
index = node['index']
if (feature, index) not in s_nodes:
s_nodes[feature, index] = node
s_path.append(node)
else:
s_node = s_nodes[feature, index]
s_sign = s_node['sign']
sign = node['sign']
value = node['value']
# Supernode has no range.
if s_sign == "<":
if sign == ">=":
s_node["value"] = [value, s_node["value"]]
s_node["sign"] = "in"
elif sign == "<":
s_node["value"] = value
elif s_sign == ">=":
if sign == ">=":
s_node["value"] = value
elif sign == "<":
s_node["value"] = [s_node["value"], value]
s_node["sign"] = "in"
# Supernode has a range.
elif s_sign == "in":
if sign == ">=":
s_node["value"][0] = value
elif sign == "<":
s_node["value"][1] = value
# Return super node path.
return s_path
path = []
node = self.nodes[node_id]
while node.parent is not None:
parent = node.parent
is_missing = node.node_id in missing_id
path.insert(0, parent.get_decision(node, is_missing))
node = node.parent
return _deduplicate_path(path) | python | def get_prediction_path(self, node_id, missing_id = []):
"""
Return the prediction path from this node to the parent node.
Parameters
----------
node_id : id of the node to get the prediction path.
missing_id : Additional info that contains nodes with missing features.
Returns
-------
list: The list of decisions (top to bottom) from the root to this node.
Examples
--------
.. sourcecode:: python
>>> tree.get_prediction_score(5) # Any node
[{'child_id': 2,
'feature': 'Quantity_features_90',
'index': 'sum_timegaplast_gap',
'node_id': 0,
'sign': '>',
'value': 53.5},
{'child_id': 5,
'feature': 'Quantity_features_90',
'index': 'sum_sum',
'node_id': 2,
'sign': '<=',
'value': 146.5}]
"""
_raise_error_if_not_of_type(node_id, [int,long], "node_id")
_numeric_param_check_range("node_id", node_id, 0, self.num_nodes - 1)
def _deduplicate_path(path):
s_nodes = {} # super_nodes
s_path = [] # paths of super nodes.
for node in path:
feature = node['feature']
index = node['index']
if (feature, index) not in s_nodes:
s_nodes[feature, index] = node
s_path.append(node)
else:
s_node = s_nodes[feature, index]
s_sign = s_node['sign']
sign = node['sign']
value = node['value']
# Supernode has no range.
if s_sign == "<":
if sign == ">=":
s_node["value"] = [value, s_node["value"]]
s_node["sign"] = "in"
elif sign == "<":
s_node["value"] = value
elif s_sign == ">=":
if sign == ">=":
s_node["value"] = value
elif sign == "<":
s_node["value"] = [s_node["value"], value]
s_node["sign"] = "in"
# Supernode has a range.
elif s_sign == "in":
if sign == ">=":
s_node["value"][0] = value
elif sign == "<":
s_node["value"][1] = value
# Return super node path.
return s_path
path = []
node = self.nodes[node_id]
while node.parent is not None:
parent = node.parent
is_missing = node.node_id in missing_id
path.insert(0, parent.get_decision(node, is_missing))
node = node.parent
return _deduplicate_path(path) | [
"def",
"get_prediction_path",
"(",
"self",
",",
"node_id",
",",
"missing_id",
"=",
"[",
"]",
")",
":",
"_raise_error_if_not_of_type",
"(",
"node_id",
",",
"[",
"int",
",",
"long",
"]",
",",
"\"node_id\"",
")",
"_numeric_param_check_range",
"(",
"\"node_id\"",
",",
"node_id",
",",
"0",
",",
"self",
".",
"num_nodes",
"-",
"1",
")",
"def",
"_deduplicate_path",
"(",
"path",
")",
":",
"s_nodes",
"=",
"{",
"}",
"# super_nodes",
"s_path",
"=",
"[",
"]",
"# paths of super nodes.",
"for",
"node",
"in",
"path",
":",
"feature",
"=",
"node",
"[",
"'feature'",
"]",
"index",
"=",
"node",
"[",
"'index'",
"]",
"if",
"(",
"feature",
",",
"index",
")",
"not",
"in",
"s_nodes",
":",
"s_nodes",
"[",
"feature",
",",
"index",
"]",
"=",
"node",
"s_path",
".",
"append",
"(",
"node",
")",
"else",
":",
"s_node",
"=",
"s_nodes",
"[",
"feature",
",",
"index",
"]",
"s_sign",
"=",
"s_node",
"[",
"'sign'",
"]",
"sign",
"=",
"node",
"[",
"'sign'",
"]",
"value",
"=",
"node",
"[",
"'value'",
"]",
"# Supernode has no range.",
"if",
"s_sign",
"==",
"\"<\"",
":",
"if",
"sign",
"==",
"\">=\"",
":",
"s_node",
"[",
"\"value\"",
"]",
"=",
"[",
"value",
",",
"s_node",
"[",
"\"value\"",
"]",
"]",
"s_node",
"[",
"\"sign\"",
"]",
"=",
"\"in\"",
"elif",
"sign",
"==",
"\"<\"",
":",
"s_node",
"[",
"\"value\"",
"]",
"=",
"value",
"elif",
"s_sign",
"==",
"\">=\"",
":",
"if",
"sign",
"==",
"\">=\"",
":",
"s_node",
"[",
"\"value\"",
"]",
"=",
"value",
"elif",
"sign",
"==",
"\"<\"",
":",
"s_node",
"[",
"\"value\"",
"]",
"=",
"[",
"s_node",
"[",
"\"value\"",
"]",
",",
"value",
"]",
"s_node",
"[",
"\"sign\"",
"]",
"=",
"\"in\"",
"# Supernode has a range.",
"elif",
"s_sign",
"==",
"\"in\"",
":",
"if",
"sign",
"==",
"\">=\"",
":",
"s_node",
"[",
"\"value\"",
"]",
"[",
"0",
"]",
"=",
"value",
"elif",
"sign",
"==",
"\"<\"",
":",
"s_node",
"[",
"\"value\"",
"]",
"[",
"1",
"]",
"=",
"value",
"# Return super node path.",
"return",
"s_path",
"path",
"=",
"[",
"]",
"node",
"=",
"self",
".",
"nodes",
"[",
"node_id",
"]",
"while",
"node",
".",
"parent",
"is",
"not",
"None",
":",
"parent",
"=",
"node",
".",
"parent",
"is_missing",
"=",
"node",
".",
"node_id",
"in",
"missing_id",
"path",
".",
"insert",
"(",
"0",
",",
"parent",
".",
"get_decision",
"(",
"node",
",",
"is_missing",
")",
")",
"node",
"=",
"node",
".",
"parent",
"return",
"_deduplicate_path",
"(",
"path",
")"
] | Return the prediction path from this node to the parent node.
Parameters
----------
node_id : id of the node to get the prediction path.
missing_id : Additional info that contains nodes with missing features.
Returns
-------
list: The list of decisions (top to bottom) from the root to this node.
Examples
--------
.. sourcecode:: python
>>> tree.get_prediction_score(5) # Any node
[{'child_id': 2,
'feature': 'Quantity_features_90',
'index': 'sum_timegaplast_gap',
'node_id': 0,
'sign': '>',
'value': 53.5},
{'child_id': 5,
'feature': 'Quantity_features_90',
'index': 'sum_sum',
'node_id': 2,
'sign': '<=',
'value': 146.5}] | [
"Return",
"the",
"prediction",
"path",
"from",
"this",
"node",
"to",
"the",
"parent",
"node",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_decision_tree.py#L403-L484 |
28,917 | apple/turicreate | src/unity/python/turicreate/toolkits/graph_analytics/label_propagation.py | create | def create(graph, label_field,
threshold=1e-3,
weight_field='',
self_weight=1.0,
undirected=False,
max_iterations=None,
_single_precision=False,
_distributed='auto',
verbose=True):
"""
Given a weighted graph with observed class labels of a subset of vertices,
infer the label probability for the unobserved vertices using the
"label propagation" algorithm.
The algorithm iteratively updates the label probability of current vertex
as a weighted sum of label probability of self and the neighboring vertices
until converge. See
:class:`turicreate.label_propagation.LabelPropagationModel` for the details
of the algorithm.
Notes: label propagation works well with small number of labels, i.e. binary
labels, or less than 1000 classes. The toolkit will throw error
if the number of classes exceeds the maximum value (1000).
Parameters
----------
graph : SGraph
The graph on which to compute the label propagation.
label_field: str
Vertex field storing the initial vertex labels. The values in
must be [0, num_classes). None values indicate unobserved vertex labels.
threshold : float, optional
Threshold for convergence, measured in the average L2 norm
(the sum of squared values) of the delta of each vertex's
label probability vector.
max_iterations: int, optional
The max number of iterations to run. Default is unlimited.
If set, the algorithm terminates when either max_iterations
or convergence threshold is reached.
weight_field: str, optional
Vertex field for edge weight. If empty, all edges are assumed
to have unit weight.
self_weight: float, optional
The weight for self edge.
undirected: bool, optional
If true, treat each edge as undirected, and propagates label in
both directions.
_single_precision : bool, optional
If true, running label propagation in single precision. The resulting
probability values may less accurate, but should run faster
and use less memory.
_distributed : distributed environment, internal
verbose : bool, optional
If True, print progress updates.
Returns
-------
out : LabelPropagationModel
References
----------
- Zhu, X., & Ghahramani, Z. (2002). `Learning from labeled and unlabeled data
with label propagation <http://www.cs.cmu.edu/~zhuxj/pub/CMU-CALD-02-107.pdf>`_.
Examples
--------
If given an :class:`~turicreate.SGraph` ``g``, we can create
a :class:`~turicreate.label_propagation.LabelPropagationModel` as follows:
>>> g = turicreate.load_sgraph('http://snap.stanford.edu/data/email-Enron.txt.gz',
... format='snap')
# Initialize random classes for a subset of vertices
# Leave the unobserved vertices with None label.
>>> import random
>>> def init_label(vid):
... x = random.random()
... if x < 0.2:
... return 0
... elif x > 0.9:
... return 1
... else:
... return None
>>> g.vertices['label'] = g.vertices['__id'].apply(init_label, int)
>>> m = turicreate.label_propagation.create(g, label_field='label')
We can obtain for each vertex the predicted label and the probability of
each label in the graph ``g`` using:
>>> labels = m['labels'] # SFrame
>>> labels
+------+-------+-----------------+-------------------+----------------+
| __id | label | predicted_label | P0 | P1 |
+------+-------+-----------------+-------------------+----------------+
| 5 | 1 | 1 | 0.0 | 1.0 |
| 7 | None | 0 | 0.8213214997 | 0.1786785003 |
| 8 | None | 1 | 5.96046447754e-08 | 0.999999940395 |
| 10 | None | 0 | 0.534984718273 | 0.465015281727 |
| 27 | None | 0 | 0.752801638549 | 0.247198361451 |
| 29 | None | 1 | 5.96046447754e-08 | 0.999999940395 |
| 33 | None | 1 | 5.96046447754e-08 | 0.999999940395 |
| 47 | 0 | 0 | 1.0 | 0.0 |
| 50 | None | 0 | 0.788279032657 | 0.211720967343 |
| 52 | None | 0 | 0.666666666667 | 0.333333333333 |
+------+-------+-----------------+-------------------+----------------+
[36692 rows x 5 columns]
See Also
--------
LabelPropagationModel
"""
from turicreate._cython.cy_server import QuietProgress
_raise_error_if_not_of_type(label_field, str)
_raise_error_if_not_of_type(weight_field, str)
if not isinstance(graph, _SGraph):
raise TypeError('graph input must be a SGraph object.')
if graph.vertices[label_field].dtype != int:
raise TypeError('label_field %s must be integer typed.' % label_field)
opts = {'label_field': label_field,
'threshold': threshold,
'weight_field': weight_field,
'self_weight': self_weight,
'undirected': undirected,
'max_iterations': max_iterations,
'single_precision': _single_precision,
'graph': graph.__proxy__}
with QuietProgress(verbose):
params = _tc.extensions._toolkits.graph.label_propagation.create(opts)
model = params['model']
return LabelPropagationModel(model) | python | def create(graph, label_field,
threshold=1e-3,
weight_field='',
self_weight=1.0,
undirected=False,
max_iterations=None,
_single_precision=False,
_distributed='auto',
verbose=True):
"""
Given a weighted graph with observed class labels of a subset of vertices,
infer the label probability for the unobserved vertices using the
"label propagation" algorithm.
The algorithm iteratively updates the label probability of current vertex
as a weighted sum of label probability of self and the neighboring vertices
until converge. See
:class:`turicreate.label_propagation.LabelPropagationModel` for the details
of the algorithm.
Notes: label propagation works well with small number of labels, i.e. binary
labels, or less than 1000 classes. The toolkit will throw error
if the number of classes exceeds the maximum value (1000).
Parameters
----------
graph : SGraph
The graph on which to compute the label propagation.
label_field: str
Vertex field storing the initial vertex labels. The values in
must be [0, num_classes). None values indicate unobserved vertex labels.
threshold : float, optional
Threshold for convergence, measured in the average L2 norm
(the sum of squared values) of the delta of each vertex's
label probability vector.
max_iterations: int, optional
The max number of iterations to run. Default is unlimited.
If set, the algorithm terminates when either max_iterations
or convergence threshold is reached.
weight_field: str, optional
Vertex field for edge weight. If empty, all edges are assumed
to have unit weight.
self_weight: float, optional
The weight for self edge.
undirected: bool, optional
If true, treat each edge as undirected, and propagates label in
both directions.
_single_precision : bool, optional
If true, running label propagation in single precision. The resulting
probability values may less accurate, but should run faster
and use less memory.
_distributed : distributed environment, internal
verbose : bool, optional
If True, print progress updates.
Returns
-------
out : LabelPropagationModel
References
----------
- Zhu, X., & Ghahramani, Z. (2002). `Learning from labeled and unlabeled data
with label propagation <http://www.cs.cmu.edu/~zhuxj/pub/CMU-CALD-02-107.pdf>`_.
Examples
--------
If given an :class:`~turicreate.SGraph` ``g``, we can create
a :class:`~turicreate.label_propagation.LabelPropagationModel` as follows:
>>> g = turicreate.load_sgraph('http://snap.stanford.edu/data/email-Enron.txt.gz',
... format='snap')
# Initialize random classes for a subset of vertices
# Leave the unobserved vertices with None label.
>>> import random
>>> def init_label(vid):
... x = random.random()
... if x < 0.2:
... return 0
... elif x > 0.9:
... return 1
... else:
... return None
>>> g.vertices['label'] = g.vertices['__id'].apply(init_label, int)
>>> m = turicreate.label_propagation.create(g, label_field='label')
We can obtain for each vertex the predicted label and the probability of
each label in the graph ``g`` using:
>>> labels = m['labels'] # SFrame
>>> labels
+------+-------+-----------------+-------------------+----------------+
| __id | label | predicted_label | P0 | P1 |
+------+-------+-----------------+-------------------+----------------+
| 5 | 1 | 1 | 0.0 | 1.0 |
| 7 | None | 0 | 0.8213214997 | 0.1786785003 |
| 8 | None | 1 | 5.96046447754e-08 | 0.999999940395 |
| 10 | None | 0 | 0.534984718273 | 0.465015281727 |
| 27 | None | 0 | 0.752801638549 | 0.247198361451 |
| 29 | None | 1 | 5.96046447754e-08 | 0.999999940395 |
| 33 | None | 1 | 5.96046447754e-08 | 0.999999940395 |
| 47 | 0 | 0 | 1.0 | 0.0 |
| 50 | None | 0 | 0.788279032657 | 0.211720967343 |
| 52 | None | 0 | 0.666666666667 | 0.333333333333 |
+------+-------+-----------------+-------------------+----------------+
[36692 rows x 5 columns]
See Also
--------
LabelPropagationModel
"""
from turicreate._cython.cy_server import QuietProgress
_raise_error_if_not_of_type(label_field, str)
_raise_error_if_not_of_type(weight_field, str)
if not isinstance(graph, _SGraph):
raise TypeError('graph input must be a SGraph object.')
if graph.vertices[label_field].dtype != int:
raise TypeError('label_field %s must be integer typed.' % label_field)
opts = {'label_field': label_field,
'threshold': threshold,
'weight_field': weight_field,
'self_weight': self_weight,
'undirected': undirected,
'max_iterations': max_iterations,
'single_precision': _single_precision,
'graph': graph.__proxy__}
with QuietProgress(verbose):
params = _tc.extensions._toolkits.graph.label_propagation.create(opts)
model = params['model']
return LabelPropagationModel(model) | [
"def",
"create",
"(",
"graph",
",",
"label_field",
",",
"threshold",
"=",
"1e-3",
",",
"weight_field",
"=",
"''",
",",
"self_weight",
"=",
"1.0",
",",
"undirected",
"=",
"False",
",",
"max_iterations",
"=",
"None",
",",
"_single_precision",
"=",
"False",
",",
"_distributed",
"=",
"'auto'",
",",
"verbose",
"=",
"True",
")",
":",
"from",
"turicreate",
".",
"_cython",
".",
"cy_server",
"import",
"QuietProgress",
"_raise_error_if_not_of_type",
"(",
"label_field",
",",
"str",
")",
"_raise_error_if_not_of_type",
"(",
"weight_field",
",",
"str",
")",
"if",
"not",
"isinstance",
"(",
"graph",
",",
"_SGraph",
")",
":",
"raise",
"TypeError",
"(",
"'graph input must be a SGraph object.'",
")",
"if",
"graph",
".",
"vertices",
"[",
"label_field",
"]",
".",
"dtype",
"!=",
"int",
":",
"raise",
"TypeError",
"(",
"'label_field %s must be integer typed.'",
"%",
"label_field",
")",
"opts",
"=",
"{",
"'label_field'",
":",
"label_field",
",",
"'threshold'",
":",
"threshold",
",",
"'weight_field'",
":",
"weight_field",
",",
"'self_weight'",
":",
"self_weight",
",",
"'undirected'",
":",
"undirected",
",",
"'max_iterations'",
":",
"max_iterations",
",",
"'single_precision'",
":",
"_single_precision",
",",
"'graph'",
":",
"graph",
".",
"__proxy__",
"}",
"with",
"QuietProgress",
"(",
"verbose",
")",
":",
"params",
"=",
"_tc",
".",
"extensions",
".",
"_toolkits",
".",
"graph",
".",
"label_propagation",
".",
"create",
"(",
"opts",
")",
"model",
"=",
"params",
"[",
"'model'",
"]",
"return",
"LabelPropagationModel",
"(",
"model",
")"
] | Given a weighted graph with observed class labels of a subset of vertices,
infer the label probability for the unobserved vertices using the
"label propagation" algorithm.
The algorithm iteratively updates the label probability of current vertex
as a weighted sum of label probability of self and the neighboring vertices
until converge. See
:class:`turicreate.label_propagation.LabelPropagationModel` for the details
of the algorithm.
Notes: label propagation works well with small number of labels, i.e. binary
labels, or less than 1000 classes. The toolkit will throw error
if the number of classes exceeds the maximum value (1000).
Parameters
----------
graph : SGraph
The graph on which to compute the label propagation.
label_field: str
Vertex field storing the initial vertex labels. The values in
must be [0, num_classes). None values indicate unobserved vertex labels.
threshold : float, optional
Threshold for convergence, measured in the average L2 norm
(the sum of squared values) of the delta of each vertex's
label probability vector.
max_iterations: int, optional
The max number of iterations to run. Default is unlimited.
If set, the algorithm terminates when either max_iterations
or convergence threshold is reached.
weight_field: str, optional
Vertex field for edge weight. If empty, all edges are assumed
to have unit weight.
self_weight: float, optional
The weight for self edge.
undirected: bool, optional
If true, treat each edge as undirected, and propagates label in
both directions.
_single_precision : bool, optional
If true, running label propagation in single precision. The resulting
probability values may less accurate, but should run faster
and use less memory.
_distributed : distributed environment, internal
verbose : bool, optional
If True, print progress updates.
Returns
-------
out : LabelPropagationModel
References
----------
- Zhu, X., & Ghahramani, Z. (2002). `Learning from labeled and unlabeled data
with label propagation <http://www.cs.cmu.edu/~zhuxj/pub/CMU-CALD-02-107.pdf>`_.
Examples
--------
If given an :class:`~turicreate.SGraph` ``g``, we can create
a :class:`~turicreate.label_propagation.LabelPropagationModel` as follows:
>>> g = turicreate.load_sgraph('http://snap.stanford.edu/data/email-Enron.txt.gz',
... format='snap')
# Initialize random classes for a subset of vertices
# Leave the unobserved vertices with None label.
>>> import random
>>> def init_label(vid):
... x = random.random()
... if x < 0.2:
... return 0
... elif x > 0.9:
... return 1
... else:
... return None
>>> g.vertices['label'] = g.vertices['__id'].apply(init_label, int)
>>> m = turicreate.label_propagation.create(g, label_field='label')
We can obtain for each vertex the predicted label and the probability of
each label in the graph ``g`` using:
>>> labels = m['labels'] # SFrame
>>> labels
+------+-------+-----------------+-------------------+----------------+
| __id | label | predicted_label | P0 | P1 |
+------+-------+-----------------+-------------------+----------------+
| 5 | 1 | 1 | 0.0 | 1.0 |
| 7 | None | 0 | 0.8213214997 | 0.1786785003 |
| 8 | None | 1 | 5.96046447754e-08 | 0.999999940395 |
| 10 | None | 0 | 0.534984718273 | 0.465015281727 |
| 27 | None | 0 | 0.752801638549 | 0.247198361451 |
| 29 | None | 1 | 5.96046447754e-08 | 0.999999940395 |
| 33 | None | 1 | 5.96046447754e-08 | 0.999999940395 |
| 47 | 0 | 0 | 1.0 | 0.0 |
| 50 | None | 0 | 0.788279032657 | 0.211720967343 |
| 52 | None | 0 | 0.666666666667 | 0.333333333333 |
+------+-------+-----------------+-------------------+----------------+
[36692 rows x 5 columns]
See Also
--------
LabelPropagationModel | [
"Given",
"a",
"weighted",
"graph",
"with",
"observed",
"class",
"labels",
"of",
"a",
"subset",
"of",
"vertices",
"infer",
"the",
"label",
"probability",
"for",
"the",
"unobserved",
"vertices",
"using",
"the",
"label",
"propagation",
"algorithm",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/graph_analytics/label_propagation.py#L131-L274 |
28,918 | apple/turicreate | src/unity/python/turicreate/_gl_pickle.py | _is_not_pickle_safe_gl_model_class | def _is_not_pickle_safe_gl_model_class(obj_class):
"""
Check if a Turi create model is pickle safe.
The function does it by checking that _CustomModel is the base class.
Parameters
----------
obj_class : Class to be checked.
Returns
----------
True if the GLC class is a model and is pickle safe.
"""
if issubclass(obj_class, _toolkits._model.CustomModel):
return not obj_class._is_gl_pickle_safe()
return False | python | def _is_not_pickle_safe_gl_model_class(obj_class):
"""
Check if a Turi create model is pickle safe.
The function does it by checking that _CustomModel is the base class.
Parameters
----------
obj_class : Class to be checked.
Returns
----------
True if the GLC class is a model and is pickle safe.
"""
if issubclass(obj_class, _toolkits._model.CustomModel):
return not obj_class._is_gl_pickle_safe()
return False | [
"def",
"_is_not_pickle_safe_gl_model_class",
"(",
"obj_class",
")",
":",
"if",
"issubclass",
"(",
"obj_class",
",",
"_toolkits",
".",
"_model",
".",
"CustomModel",
")",
":",
"return",
"not",
"obj_class",
".",
"_is_gl_pickle_safe",
"(",
")",
"return",
"False"
] | Check if a Turi create model is pickle safe.
The function does it by checking that _CustomModel is the base class.
Parameters
----------
obj_class : Class to be checked.
Returns
----------
True if the GLC class is a model and is pickle safe. | [
"Check",
"if",
"a",
"Turi",
"create",
"model",
"is",
"pickle",
"safe",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/_gl_pickle.py#L33-L50 |
28,919 | apple/turicreate | src/unity/python/turicreate/_gl_pickle.py | _is_not_pickle_safe_gl_class | def _is_not_pickle_safe_gl_class(obj_class):
"""
Check if class is a Turi create model.
The function does it by checking the method resolution order (MRO) of the
class and verifies that _Model is the base class.
Parameters
----------
obj_class : Class to be checked.
Returns
----------
True if the class is a GLC Model.
"""
gl_ds = [_SFrame, _SArray, _SGraph]
# Object is GLC-DS or GLC-Model
return (obj_class in gl_ds) or _is_not_pickle_safe_gl_model_class(obj_class) | python | def _is_not_pickle_safe_gl_class(obj_class):
"""
Check if class is a Turi create model.
The function does it by checking the method resolution order (MRO) of the
class and verifies that _Model is the base class.
Parameters
----------
obj_class : Class to be checked.
Returns
----------
True if the class is a GLC Model.
"""
gl_ds = [_SFrame, _SArray, _SGraph]
# Object is GLC-DS or GLC-Model
return (obj_class in gl_ds) or _is_not_pickle_safe_gl_model_class(obj_class) | [
"def",
"_is_not_pickle_safe_gl_class",
"(",
"obj_class",
")",
":",
"gl_ds",
"=",
"[",
"_SFrame",
",",
"_SArray",
",",
"_SGraph",
"]",
"# Object is GLC-DS or GLC-Model",
"return",
"(",
"obj_class",
"in",
"gl_ds",
")",
"or",
"_is_not_pickle_safe_gl_model_class",
"(",
"obj_class",
")"
] | Check if class is a Turi create model.
The function does it by checking the method resolution order (MRO) of the
class and verifies that _Model is the base class.
Parameters
----------
obj_class : Class to be checked.
Returns
----------
True if the class is a GLC Model. | [
"Check",
"if",
"class",
"is",
"a",
"Turi",
"create",
"model",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/_gl_pickle.py#L52-L71 |
28,920 | apple/turicreate | src/unity/python/turicreate/_gl_pickle.py | _get_gl_class_type | def _get_gl_class_type(obj_class):
"""
Internal util to get the type of the GLC class. The pickle file stores
this name so that it knows how to construct the object on unpickling.
Parameters
----------
obj_class : Class which has to be categorized.
Returns
----------
A class type for the pickle file to save.
"""
if obj_class == _SFrame:
return "SFrame"
elif obj_class == _SGraph:
return "SGraph"
elif obj_class == _SArray:
return "SArray"
elif _is_not_pickle_safe_gl_model_class(obj_class):
return "Model"
else:
return None | python | def _get_gl_class_type(obj_class):
"""
Internal util to get the type of the GLC class. The pickle file stores
this name so that it knows how to construct the object on unpickling.
Parameters
----------
obj_class : Class which has to be categorized.
Returns
----------
A class type for the pickle file to save.
"""
if obj_class == _SFrame:
return "SFrame"
elif obj_class == _SGraph:
return "SGraph"
elif obj_class == _SArray:
return "SArray"
elif _is_not_pickle_safe_gl_model_class(obj_class):
return "Model"
else:
return None | [
"def",
"_get_gl_class_type",
"(",
"obj_class",
")",
":",
"if",
"obj_class",
"==",
"_SFrame",
":",
"return",
"\"SFrame\"",
"elif",
"obj_class",
"==",
"_SGraph",
":",
"return",
"\"SGraph\"",
"elif",
"obj_class",
"==",
"_SArray",
":",
"return",
"\"SArray\"",
"elif",
"_is_not_pickle_safe_gl_model_class",
"(",
"obj_class",
")",
":",
"return",
"\"Model\"",
"else",
":",
"return",
"None"
] | Internal util to get the type of the GLC class. The pickle file stores
this name so that it knows how to construct the object on unpickling.
Parameters
----------
obj_class : Class which has to be categorized.
Returns
----------
A class type for the pickle file to save. | [
"Internal",
"util",
"to",
"get",
"the",
"type",
"of",
"the",
"GLC",
"class",
".",
"The",
"pickle",
"file",
"stores",
"this",
"name",
"so",
"that",
"it",
"knows",
"how",
"to",
"construct",
"the",
"object",
"on",
"unpickling",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/_gl_pickle.py#L73-L97 |
28,921 | apple/turicreate | src/unity/python/turicreate/_gl_pickle.py | _get_gl_object_from_persistent_id | def _get_gl_object_from_persistent_id(type_tag, gl_archive_abs_path):
"""
Internal util to get a GLC object from a persistent ID in the pickle file.
Parameters
----------
type_tag : The name of the glc class as saved in the GLC pickler.
gl_archive_abs_path: An absolute path to the GLC archive where the
object was saved.
Returns
----------
The GLC object.
"""
if type_tag == "SFrame":
obj = _SFrame(gl_archive_abs_path)
elif type_tag == "SGraph":
obj = _load_graph(gl_archive_abs_path)
elif type_tag == "SArray":
obj = _SArray(gl_archive_abs_path)
elif type_tag == "Model":
from . import load_model as _load_model
obj = _load_model(gl_archive_abs_path)
else:
raise _pickle.UnpicklingError("Turi pickling Error: Unsupported object."
" Only SFrames, SGraphs, SArrays, and Models are supported.")
return obj | python | def _get_gl_object_from_persistent_id(type_tag, gl_archive_abs_path):
"""
Internal util to get a GLC object from a persistent ID in the pickle file.
Parameters
----------
type_tag : The name of the glc class as saved in the GLC pickler.
gl_archive_abs_path: An absolute path to the GLC archive where the
object was saved.
Returns
----------
The GLC object.
"""
if type_tag == "SFrame":
obj = _SFrame(gl_archive_abs_path)
elif type_tag == "SGraph":
obj = _load_graph(gl_archive_abs_path)
elif type_tag == "SArray":
obj = _SArray(gl_archive_abs_path)
elif type_tag == "Model":
from . import load_model as _load_model
obj = _load_model(gl_archive_abs_path)
else:
raise _pickle.UnpicklingError("Turi pickling Error: Unsupported object."
" Only SFrames, SGraphs, SArrays, and Models are supported.")
return obj | [
"def",
"_get_gl_object_from_persistent_id",
"(",
"type_tag",
",",
"gl_archive_abs_path",
")",
":",
"if",
"type_tag",
"==",
"\"SFrame\"",
":",
"obj",
"=",
"_SFrame",
"(",
"gl_archive_abs_path",
")",
"elif",
"type_tag",
"==",
"\"SGraph\"",
":",
"obj",
"=",
"_load_graph",
"(",
"gl_archive_abs_path",
")",
"elif",
"type_tag",
"==",
"\"SArray\"",
":",
"obj",
"=",
"_SArray",
"(",
"gl_archive_abs_path",
")",
"elif",
"type_tag",
"==",
"\"Model\"",
":",
"from",
".",
"import",
"load_model",
"as",
"_load_model",
"obj",
"=",
"_load_model",
"(",
"gl_archive_abs_path",
")",
"else",
":",
"raise",
"_pickle",
".",
"UnpicklingError",
"(",
"\"Turi pickling Error: Unsupported object.\"",
"\" Only SFrames, SGraphs, SArrays, and Models are supported.\"",
")",
"return",
"obj"
] | Internal util to get a GLC object from a persistent ID in the pickle file.
Parameters
----------
type_tag : The name of the glc class as saved in the GLC pickler.
gl_archive_abs_path: An absolute path to the GLC archive where the
object was saved.
Returns
----------
The GLC object. | [
"Internal",
"util",
"to",
"get",
"a",
"GLC",
"object",
"from",
"a",
"persistent",
"ID",
"in",
"the",
"pickle",
"file",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/_gl_pickle.py#L99-L127 |
28,922 | apple/turicreate | src/unity/python/turicreate/_gl_pickle.py | GLPickler.persistent_id | def persistent_id(self, obj):
"""
Provide a persistent ID for "saving" GLC objects by reference. Return
None for all non GLC objects.
Parameters
----------
obj: Name of the object whose persistent ID is extracted.
Returns
--------
None if the object is not a GLC object. (ClassName, relative path)
if the object is a GLC object.
Notes
-----
Borrowed from pickle docs (https://docs.python.org/2/library/_pickle.html)
For the benefit of object persistence, the pickle module supports the
notion of a reference to an object outside the pickled data stream.
To pickle objects that have an external persistent id, the pickler must
have a custom persistent_id() method that takes an object as an argument and
returns either None or the persistent id for that object.
For GLC objects, the persistent_id is merely a relative file path (within
the ZIP archive) to the GLC archive where the GLC object is saved. For
example:
(SFrame, 'sframe-save-path')
(SGraph, 'sgraph-save-path')
(Model, 'model-save-path')
"""
# Get the class of the object (if it can be done)
obj_class = None if not hasattr(obj, '__class__') else obj.__class__
if obj_class is None:
return None
# If the object is a GLC class.
if _is_not_pickle_safe_gl_class(obj_class):
if (id(obj) in self.gl_object_memo):
# has already been pickled
return (None, None, id(obj))
else:
# Save the location of the GLC object's archive to the pickle file.
relative_filename = str(_uuid.uuid4())
filename = _os.path.join(self.gl_temp_storage_path, relative_filename)
self.mark_for_delete -= set([filename])
# Save the GLC object
obj.save(filename)
# Memoize.
self.gl_object_memo.add(id(obj))
# Return the tuple (class_name, relative_filename) in archive.
return (_get_gl_class_type(obj.__class__), relative_filename, id(obj))
# Not a GLC object. Default to cloud pickle
else:
return None | python | def persistent_id(self, obj):
"""
Provide a persistent ID for "saving" GLC objects by reference. Return
None for all non GLC objects.
Parameters
----------
obj: Name of the object whose persistent ID is extracted.
Returns
--------
None if the object is not a GLC object. (ClassName, relative path)
if the object is a GLC object.
Notes
-----
Borrowed from pickle docs (https://docs.python.org/2/library/_pickle.html)
For the benefit of object persistence, the pickle module supports the
notion of a reference to an object outside the pickled data stream.
To pickle objects that have an external persistent id, the pickler must
have a custom persistent_id() method that takes an object as an argument and
returns either None or the persistent id for that object.
For GLC objects, the persistent_id is merely a relative file path (within
the ZIP archive) to the GLC archive where the GLC object is saved. For
example:
(SFrame, 'sframe-save-path')
(SGraph, 'sgraph-save-path')
(Model, 'model-save-path')
"""
# Get the class of the object (if it can be done)
obj_class = None if not hasattr(obj, '__class__') else obj.__class__
if obj_class is None:
return None
# If the object is a GLC class.
if _is_not_pickle_safe_gl_class(obj_class):
if (id(obj) in self.gl_object_memo):
# has already been pickled
return (None, None, id(obj))
else:
# Save the location of the GLC object's archive to the pickle file.
relative_filename = str(_uuid.uuid4())
filename = _os.path.join(self.gl_temp_storage_path, relative_filename)
self.mark_for_delete -= set([filename])
# Save the GLC object
obj.save(filename)
# Memoize.
self.gl_object_memo.add(id(obj))
# Return the tuple (class_name, relative_filename) in archive.
return (_get_gl_class_type(obj.__class__), relative_filename, id(obj))
# Not a GLC object. Default to cloud pickle
else:
return None | [
"def",
"persistent_id",
"(",
"self",
",",
"obj",
")",
":",
"# Get the class of the object (if it can be done)",
"obj_class",
"=",
"None",
"if",
"not",
"hasattr",
"(",
"obj",
",",
"'__class__'",
")",
"else",
"obj",
".",
"__class__",
"if",
"obj_class",
"is",
"None",
":",
"return",
"None",
"# If the object is a GLC class.",
"if",
"_is_not_pickle_safe_gl_class",
"(",
"obj_class",
")",
":",
"if",
"(",
"id",
"(",
"obj",
")",
"in",
"self",
".",
"gl_object_memo",
")",
":",
"# has already been pickled",
"return",
"(",
"None",
",",
"None",
",",
"id",
"(",
"obj",
")",
")",
"else",
":",
"# Save the location of the GLC object's archive to the pickle file.",
"relative_filename",
"=",
"str",
"(",
"_uuid",
".",
"uuid4",
"(",
")",
")",
"filename",
"=",
"_os",
".",
"path",
".",
"join",
"(",
"self",
".",
"gl_temp_storage_path",
",",
"relative_filename",
")",
"self",
".",
"mark_for_delete",
"-=",
"set",
"(",
"[",
"filename",
"]",
")",
"# Save the GLC object",
"obj",
".",
"save",
"(",
"filename",
")",
"# Memoize.",
"self",
".",
"gl_object_memo",
".",
"add",
"(",
"id",
"(",
"obj",
")",
")",
"# Return the tuple (class_name, relative_filename) in archive.",
"return",
"(",
"_get_gl_class_type",
"(",
"obj",
".",
"__class__",
")",
",",
"relative_filename",
",",
"id",
"(",
"obj",
")",
")",
"# Not a GLC object. Default to cloud pickle",
"else",
":",
"return",
"None"
] | Provide a persistent ID for "saving" GLC objects by reference. Return
None for all non GLC objects.
Parameters
----------
obj: Name of the object whose persistent ID is extracted.
Returns
--------
None if the object is not a GLC object. (ClassName, relative path)
if the object is a GLC object.
Notes
-----
Borrowed from pickle docs (https://docs.python.org/2/library/_pickle.html)
For the benefit of object persistence, the pickle module supports the
notion of a reference to an object outside the pickled data stream.
To pickle objects that have an external persistent id, the pickler must
have a custom persistent_id() method that takes an object as an argument and
returns either None or the persistent id for that object.
For GLC objects, the persistent_id is merely a relative file path (within
the ZIP archive) to the GLC archive where the GLC object is saved. For
example:
(SFrame, 'sframe-save-path')
(SGraph, 'sgraph-save-path')
(Model, 'model-save-path') | [
"Provide",
"a",
"persistent",
"ID",
"for",
"saving",
"GLC",
"objects",
"by",
"reference",
".",
"Return",
"None",
"for",
"all",
"non",
"GLC",
"objects",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/_gl_pickle.py#L287-L351 |
28,923 | apple/turicreate | src/unity/python/turicreate/_gl_pickle.py | GLPickler.close | def close(self):
"""
Close the pickle file, and the zip archive file. The single zip archive
file can now be shipped around to be loaded by the unpickler.
"""
if self.file is None:
return
# Close the pickle file.
self.file.close()
self.file = None
for f in self.mark_for_delete:
error = [False]
def register_error(*args):
error[0] = True
_shutil.rmtree(f, onerror = register_error)
if error[0]:
_atexit.register(_shutil.rmtree, f, ignore_errors=True) | python | def close(self):
"""
Close the pickle file, and the zip archive file. The single zip archive
file can now be shipped around to be loaded by the unpickler.
"""
if self.file is None:
return
# Close the pickle file.
self.file.close()
self.file = None
for f in self.mark_for_delete:
error = [False]
def register_error(*args):
error[0] = True
_shutil.rmtree(f, onerror = register_error)
if error[0]:
_atexit.register(_shutil.rmtree, f, ignore_errors=True) | [
"def",
"close",
"(",
"self",
")",
":",
"if",
"self",
".",
"file",
"is",
"None",
":",
"return",
"# Close the pickle file.",
"self",
".",
"file",
".",
"close",
"(",
")",
"self",
".",
"file",
"=",
"None",
"for",
"f",
"in",
"self",
".",
"mark_for_delete",
":",
"error",
"=",
"[",
"False",
"]",
"def",
"register_error",
"(",
"*",
"args",
")",
":",
"error",
"[",
"0",
"]",
"=",
"True",
"_shutil",
".",
"rmtree",
"(",
"f",
",",
"onerror",
"=",
"register_error",
")",
"if",
"error",
"[",
"0",
"]",
":",
"_atexit",
".",
"register",
"(",
"_shutil",
".",
"rmtree",
",",
"f",
",",
"ignore_errors",
"=",
"True",
")"
] | Close the pickle file, and the zip archive file. The single zip archive
file can now be shipped around to be loaded by the unpickler. | [
"Close",
"the",
"pickle",
"file",
"and",
"the",
"zip",
"archive",
"file",
".",
"The",
"single",
"zip",
"archive",
"file",
"can",
"now",
"be",
"shipped",
"around",
"to",
"be",
"loaded",
"by",
"the",
"unpickler",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/_gl_pickle.py#L353-L374 |
28,924 | apple/turicreate | src/unity/python/turicreate/_gl_pickle.py | GLUnpickler.persistent_load | def persistent_load(self, pid):
"""
Reconstruct a GLC object using the persistent ID.
This method should not be used externally. It is required by the unpickler super class.
Parameters
----------
pid : The persistent ID used in pickle file to save the GLC object.
Returns
----------
The GLC object.
"""
if len(pid) == 2:
# Pre GLC-1.3 release behavior, without memorization
type_tag, filename = pid
abs_path = _os.path.join(self.gl_temp_storage_path, filename)
return _get_gl_object_from_persistent_id(type_tag, abs_path)
else:
# Post GLC-1.3 release behavior, with memorization
type_tag, filename, object_id = pid
if object_id in self.gl_object_memo:
return self.gl_object_memo[object_id]
else:
abs_path = _os.path.join(self.gl_temp_storage_path, filename)
obj = _get_gl_object_from_persistent_id(type_tag, abs_path)
self.gl_object_memo[object_id] = obj
return obj | python | def persistent_load(self, pid):
"""
Reconstruct a GLC object using the persistent ID.
This method should not be used externally. It is required by the unpickler super class.
Parameters
----------
pid : The persistent ID used in pickle file to save the GLC object.
Returns
----------
The GLC object.
"""
if len(pid) == 2:
# Pre GLC-1.3 release behavior, without memorization
type_tag, filename = pid
abs_path = _os.path.join(self.gl_temp_storage_path, filename)
return _get_gl_object_from_persistent_id(type_tag, abs_path)
else:
# Post GLC-1.3 release behavior, with memorization
type_tag, filename, object_id = pid
if object_id in self.gl_object_memo:
return self.gl_object_memo[object_id]
else:
abs_path = _os.path.join(self.gl_temp_storage_path, filename)
obj = _get_gl_object_from_persistent_id(type_tag, abs_path)
self.gl_object_memo[object_id] = obj
return obj | [
"def",
"persistent_load",
"(",
"self",
",",
"pid",
")",
":",
"if",
"len",
"(",
"pid",
")",
"==",
"2",
":",
"# Pre GLC-1.3 release behavior, without memorization",
"type_tag",
",",
"filename",
"=",
"pid",
"abs_path",
"=",
"_os",
".",
"path",
".",
"join",
"(",
"self",
".",
"gl_temp_storage_path",
",",
"filename",
")",
"return",
"_get_gl_object_from_persistent_id",
"(",
"type_tag",
",",
"abs_path",
")",
"else",
":",
"# Post GLC-1.3 release behavior, with memorization",
"type_tag",
",",
"filename",
",",
"object_id",
"=",
"pid",
"if",
"object_id",
"in",
"self",
".",
"gl_object_memo",
":",
"return",
"self",
".",
"gl_object_memo",
"[",
"object_id",
"]",
"else",
":",
"abs_path",
"=",
"_os",
".",
"path",
".",
"join",
"(",
"self",
".",
"gl_temp_storage_path",
",",
"filename",
")",
"obj",
"=",
"_get_gl_object_from_persistent_id",
"(",
"type_tag",
",",
"abs_path",
")",
"self",
".",
"gl_object_memo",
"[",
"object_id",
"]",
"=",
"obj",
"return",
"obj"
] | Reconstruct a GLC object using the persistent ID.
This method should not be used externally. It is required by the unpickler super class.
Parameters
----------
pid : The persistent ID used in pickle file to save the GLC object.
Returns
----------
The GLC object. | [
"Reconstruct",
"a",
"GLC",
"object",
"using",
"the",
"persistent",
"ID",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/_gl_pickle.py#L472-L500 |
28,925 | apple/turicreate | src/unity/python/turicreate/_gl_pickle.py | GLUnpickler.close | def close(self):
"""
Clean up files that were created.
"""
if self.file:
self.file.close()
self.file = None
# If temp_file is a folder, we do not remove it because we may
# still need it after the unpickler is disposed
if self.tmp_file and _os.path.isfile(self.tmp_file):
_os.remove(self.tmp_file)
self.tmp_file = None | python | def close(self):
"""
Clean up files that were created.
"""
if self.file:
self.file.close()
self.file = None
# If temp_file is a folder, we do not remove it because we may
# still need it after the unpickler is disposed
if self.tmp_file and _os.path.isfile(self.tmp_file):
_os.remove(self.tmp_file)
self.tmp_file = None | [
"def",
"close",
"(",
"self",
")",
":",
"if",
"self",
".",
"file",
":",
"self",
".",
"file",
".",
"close",
"(",
")",
"self",
".",
"file",
"=",
"None",
"# If temp_file is a folder, we do not remove it because we may",
"# still need it after the unpickler is disposed",
"if",
"self",
".",
"tmp_file",
"and",
"_os",
".",
"path",
".",
"isfile",
"(",
"self",
".",
"tmp_file",
")",
":",
"_os",
".",
"remove",
"(",
"self",
".",
"tmp_file",
")",
"self",
".",
"tmp_file",
"=",
"None"
] | Clean up files that were created. | [
"Clean",
"up",
"files",
"that",
"were",
"created",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/_gl_pickle.py#L502-L514 |
28,926 | apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/converters/sklearn/_converter.py | convert | def convert(sk_obj, input_features = None,
output_feature_names = None):
"""
Convert scikit-learn pipeline, classifier, or regressor to Core ML format.
Parameters
----------
sk_obj: model | [model] of scikit-learn format.
Scikit learn model(s) to convert to a Core ML format.
The input model may be a single scikit learn model, a scikit learn
pipeline model, or a list of scikit learn models.
Currently supported scikit learn models are:
- Linear and Logistic Regression
- LinearSVC and LinearSVR
- SVC and SVR
- NuSVC and NuSVR
- Gradient Boosting Classifier and Regressor
- Decision Tree Classifier and Regressor
- Random Forest Classifier and Regressor
- Normalizer
- Imputer
- Standard Scaler
- DictVectorizer
- One Hot Encoder
The input model, or the last model in a pipeline or list of models,
determines whether this is exposed as a Transformer, Regressor,
or Classifier.
Note that there may not be a one-to-one correspondence between scikit
learn models and which Core ML models are used to represent them. For
example, many scikit learn models are embedded in a pipeline to handle
processing of input features.
input_features: str | dict | list
Optional name(s) that can be given to the inputs of the scikit-learn
model. Defaults to 'input'.
Input features can be specified in a number of forms.
- Single string: In this case, the input is assumed to be a single
array, with the number of dimensions set using num_dimensions.
- List of strings: In this case, the overall input dimensions to the
scikit-learn model is assumed to be the length of the list. If
neighboring names are identical, they are assumed to be an input
array of that length. For example:
["a", "b", "c"]
resolves to
[("a", Double), ("b", Double), ("c", Double)].
And:
["a", "a", "b"]
resolves to
[("a", Array(2)), ("b", Double)].
- Dictionary: Where the keys are the names and the indices or ranges of
feature indices.
In this case, it's presented as a mapping from keys to indices or
ranges of contiguous indices. For example,
{"a" : 0, "b" : [2,3], "c" : 1}
Resolves to
[("a", Double), ("c", Double), ("b", Array(2))].
Note that the ordering is determined by the indices.
- List of tuples of the form `(name, datatype)`. Here, `name` is the
name of the exposed feature, and `datatype` is an instance of
`String`, `Double`, `Int64`, `Array`, or `Dictionary`.
output_feature_names: string or list of strings
Optional name(s) that can be given to the inputs of the scikit-learn
model.
The output_feature_names is interpreted according to the model type:
- If the scikit-learn model is a transformer, it is the name of the
array feature output by the final sequence of the transformer
(defaults to "output").
- If it is a classifier, it should be a 2-tuple of names giving the top
class prediction and the array of scores for each class (defaults to
"classLabel" and "classScores").
- If it is a regressor, it should give the name of the prediction value
(defaults to "prediction").
Returns
-------
model:MLModel
Returns an MLModel instance representing a Core ML model.
Examples
--------
.. sourcecode:: python
>>> from sklearn.linear_model import LinearRegression
>>> import pandas as pd
# Load data
>>> data = pd.read_csv('houses.csv')
# Train a model
>>> model = LinearRegression()
>>> model.fit(data[["bedroom", "bath", "size"]], data["price"])
# Convert and save the scikit-learn model
>>> import coremltools
>>> coreml_model = coremltools.converters.sklearn.convert(model,
["bedroom", "bath", "size"],
"price")
>>> coreml_model.save('HousePricer.mlmodel')
"""
# This function is just a thin wrapper around the internal converter so
# that sklearn isn't actually imported unless this function is called
from ...models import MLModel
# NOTE: Providing user-defined class labels will be enabled when
# several issues with the ordering of the classes are worked out. For now,
# to use custom class labels, directly import the internal function below.
from ._converter_internal import _convert_sklearn_model
spec = _convert_sklearn_model(
sk_obj, input_features, output_feature_names, class_labels = None)
return MLModel(spec) | python | def convert(sk_obj, input_features = None,
output_feature_names = None):
"""
Convert scikit-learn pipeline, classifier, or regressor to Core ML format.
Parameters
----------
sk_obj: model | [model] of scikit-learn format.
Scikit learn model(s) to convert to a Core ML format.
The input model may be a single scikit learn model, a scikit learn
pipeline model, or a list of scikit learn models.
Currently supported scikit learn models are:
- Linear and Logistic Regression
- LinearSVC and LinearSVR
- SVC and SVR
- NuSVC and NuSVR
- Gradient Boosting Classifier and Regressor
- Decision Tree Classifier and Regressor
- Random Forest Classifier and Regressor
- Normalizer
- Imputer
- Standard Scaler
- DictVectorizer
- One Hot Encoder
The input model, or the last model in a pipeline or list of models,
determines whether this is exposed as a Transformer, Regressor,
or Classifier.
Note that there may not be a one-to-one correspondence between scikit
learn models and which Core ML models are used to represent them. For
example, many scikit learn models are embedded in a pipeline to handle
processing of input features.
input_features: str | dict | list
Optional name(s) that can be given to the inputs of the scikit-learn
model. Defaults to 'input'.
Input features can be specified in a number of forms.
- Single string: In this case, the input is assumed to be a single
array, with the number of dimensions set using num_dimensions.
- List of strings: In this case, the overall input dimensions to the
scikit-learn model is assumed to be the length of the list. If
neighboring names are identical, they are assumed to be an input
array of that length. For example:
["a", "b", "c"]
resolves to
[("a", Double), ("b", Double), ("c", Double)].
And:
["a", "a", "b"]
resolves to
[("a", Array(2)), ("b", Double)].
- Dictionary: Where the keys are the names and the indices or ranges of
feature indices.
In this case, it's presented as a mapping from keys to indices or
ranges of contiguous indices. For example,
{"a" : 0, "b" : [2,3], "c" : 1}
Resolves to
[("a", Double), ("c", Double), ("b", Array(2))].
Note that the ordering is determined by the indices.
- List of tuples of the form `(name, datatype)`. Here, `name` is the
name of the exposed feature, and `datatype` is an instance of
`String`, `Double`, `Int64`, `Array`, or `Dictionary`.
output_feature_names: string or list of strings
Optional name(s) that can be given to the inputs of the scikit-learn
model.
The output_feature_names is interpreted according to the model type:
- If the scikit-learn model is a transformer, it is the name of the
array feature output by the final sequence of the transformer
(defaults to "output").
- If it is a classifier, it should be a 2-tuple of names giving the top
class prediction and the array of scores for each class (defaults to
"classLabel" and "classScores").
- If it is a regressor, it should give the name of the prediction value
(defaults to "prediction").
Returns
-------
model:MLModel
Returns an MLModel instance representing a Core ML model.
Examples
--------
.. sourcecode:: python
>>> from sklearn.linear_model import LinearRegression
>>> import pandas as pd
# Load data
>>> data = pd.read_csv('houses.csv')
# Train a model
>>> model = LinearRegression()
>>> model.fit(data[["bedroom", "bath", "size"]], data["price"])
# Convert and save the scikit-learn model
>>> import coremltools
>>> coreml_model = coremltools.converters.sklearn.convert(model,
["bedroom", "bath", "size"],
"price")
>>> coreml_model.save('HousePricer.mlmodel')
"""
# This function is just a thin wrapper around the internal converter so
# that sklearn isn't actually imported unless this function is called
from ...models import MLModel
# NOTE: Providing user-defined class labels will be enabled when
# several issues with the ordering of the classes are worked out. For now,
# to use custom class labels, directly import the internal function below.
from ._converter_internal import _convert_sklearn_model
spec = _convert_sklearn_model(
sk_obj, input_features, output_feature_names, class_labels = None)
return MLModel(spec) | [
"def",
"convert",
"(",
"sk_obj",
",",
"input_features",
"=",
"None",
",",
"output_feature_names",
"=",
"None",
")",
":",
"# This function is just a thin wrapper around the internal converter so",
"# that sklearn isn't actually imported unless this function is called",
"from",
".",
".",
".",
"models",
"import",
"MLModel",
"# NOTE: Providing user-defined class labels will be enabled when",
"# several issues with the ordering of the classes are worked out. For now,",
"# to use custom class labels, directly import the internal function below.",
"from",
".",
"_converter_internal",
"import",
"_convert_sklearn_model",
"spec",
"=",
"_convert_sklearn_model",
"(",
"sk_obj",
",",
"input_features",
",",
"output_feature_names",
",",
"class_labels",
"=",
"None",
")",
"return",
"MLModel",
"(",
"spec",
")"
] | Convert scikit-learn pipeline, classifier, or regressor to Core ML format.
Parameters
----------
sk_obj: model | [model] of scikit-learn format.
Scikit learn model(s) to convert to a Core ML format.
The input model may be a single scikit learn model, a scikit learn
pipeline model, or a list of scikit learn models.
Currently supported scikit learn models are:
- Linear and Logistic Regression
- LinearSVC and LinearSVR
- SVC and SVR
- NuSVC and NuSVR
- Gradient Boosting Classifier and Regressor
- Decision Tree Classifier and Regressor
- Random Forest Classifier and Regressor
- Normalizer
- Imputer
- Standard Scaler
- DictVectorizer
- One Hot Encoder
The input model, or the last model in a pipeline or list of models,
determines whether this is exposed as a Transformer, Regressor,
or Classifier.
Note that there may not be a one-to-one correspondence between scikit
learn models and which Core ML models are used to represent them. For
example, many scikit learn models are embedded in a pipeline to handle
processing of input features.
input_features: str | dict | list
Optional name(s) that can be given to the inputs of the scikit-learn
model. Defaults to 'input'.
Input features can be specified in a number of forms.
- Single string: In this case, the input is assumed to be a single
array, with the number of dimensions set using num_dimensions.
- List of strings: In this case, the overall input dimensions to the
scikit-learn model is assumed to be the length of the list. If
neighboring names are identical, they are assumed to be an input
array of that length. For example:
["a", "b", "c"]
resolves to
[("a", Double), ("b", Double), ("c", Double)].
And:
["a", "a", "b"]
resolves to
[("a", Array(2)), ("b", Double)].
- Dictionary: Where the keys are the names and the indices or ranges of
feature indices.
In this case, it's presented as a mapping from keys to indices or
ranges of contiguous indices. For example,
{"a" : 0, "b" : [2,3], "c" : 1}
Resolves to
[("a", Double), ("c", Double), ("b", Array(2))].
Note that the ordering is determined by the indices.
- List of tuples of the form `(name, datatype)`. Here, `name` is the
name of the exposed feature, and `datatype` is an instance of
`String`, `Double`, `Int64`, `Array`, or `Dictionary`.
output_feature_names: string or list of strings
Optional name(s) that can be given to the inputs of the scikit-learn
model.
The output_feature_names is interpreted according to the model type:
- If the scikit-learn model is a transformer, it is the name of the
array feature output by the final sequence of the transformer
(defaults to "output").
- If it is a classifier, it should be a 2-tuple of names giving the top
class prediction and the array of scores for each class (defaults to
"classLabel" and "classScores").
- If it is a regressor, it should give the name of the prediction value
(defaults to "prediction").
Returns
-------
model:MLModel
Returns an MLModel instance representing a Core ML model.
Examples
--------
.. sourcecode:: python
>>> from sklearn.linear_model import LinearRegression
>>> import pandas as pd
# Load data
>>> data = pd.read_csv('houses.csv')
# Train a model
>>> model = LinearRegression()
>>> model.fit(data[["bedroom", "bath", "size"]], data["price"])
# Convert and save the scikit-learn model
>>> import coremltools
>>> coreml_model = coremltools.converters.sklearn.convert(model,
["bedroom", "bath", "size"],
"price")
>>> coreml_model.save('HousePricer.mlmodel') | [
"Convert",
"scikit",
"-",
"learn",
"pipeline",
"classifier",
"or",
"regressor",
"to",
"Core",
"ML",
"format",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/converters/sklearn/_converter.py#L10-L148 |
28,927 | apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/reflection.py | ParseMessage | def ParseMessage(descriptor, byte_str):
"""Generate a new Message instance from this Descriptor and a byte string.
Args:
descriptor: Protobuf Descriptor object
byte_str: Serialized protocol buffer byte string
Returns:
Newly created protobuf Message object.
"""
result_class = MakeClass(descriptor)
new_msg = result_class()
new_msg.ParseFromString(byte_str)
return new_msg | python | def ParseMessage(descriptor, byte_str):
"""Generate a new Message instance from this Descriptor and a byte string.
Args:
descriptor: Protobuf Descriptor object
byte_str: Serialized protocol buffer byte string
Returns:
Newly created protobuf Message object.
"""
result_class = MakeClass(descriptor)
new_msg = result_class()
new_msg.ParseFromString(byte_str)
return new_msg | [
"def",
"ParseMessage",
"(",
"descriptor",
",",
"byte_str",
")",
":",
"result_class",
"=",
"MakeClass",
"(",
"descriptor",
")",
"new_msg",
"=",
"result_class",
"(",
")",
"new_msg",
".",
"ParseFromString",
"(",
"byte_str",
")",
"return",
"new_msg"
] | Generate a new Message instance from this Descriptor and a byte string.
Args:
descriptor: Protobuf Descriptor object
byte_str: Serialized protocol buffer byte string
Returns:
Newly created protobuf Message object. | [
"Generate",
"a",
"new",
"Message",
"instance",
"from",
"this",
"Descriptor",
"and",
"a",
"byte",
"string",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/reflection.py#L67-L80 |
28,928 | apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/reflection.py | MakeClass | def MakeClass(descriptor):
"""Construct a class object for a protobuf described by descriptor.
Composite descriptors are handled by defining the new class as a member of the
parent class, recursing as deep as necessary.
This is the dynamic equivalent to:
class Parent(message.Message):
__metaclass__ = GeneratedProtocolMessageType
DESCRIPTOR = descriptor
class Child(message.Message):
__metaclass__ = GeneratedProtocolMessageType
DESCRIPTOR = descriptor.nested_types[0]
Sample usage:
file_descriptor = descriptor_pb2.FileDescriptorProto()
file_descriptor.ParseFromString(proto2_string)
msg_descriptor = descriptor.MakeDescriptor(file_descriptor.message_type[0])
msg_class = reflection.MakeClass(msg_descriptor)
msg = msg_class()
Args:
descriptor: A descriptor.Descriptor object describing the protobuf.
Returns:
The Message class object described by the descriptor.
"""
if descriptor in MESSAGE_CLASS_CACHE:
return MESSAGE_CLASS_CACHE[descriptor]
attributes = {}
for name, nested_type in descriptor.nested_types_by_name.items():
attributes[name] = MakeClass(nested_type)
attributes[GeneratedProtocolMessageType._DESCRIPTOR_KEY] = descriptor
result = GeneratedProtocolMessageType(
str(descriptor.name), (message.Message,), attributes)
MESSAGE_CLASS_CACHE[descriptor] = result
return result | python | def MakeClass(descriptor):
"""Construct a class object for a protobuf described by descriptor.
Composite descriptors are handled by defining the new class as a member of the
parent class, recursing as deep as necessary.
This is the dynamic equivalent to:
class Parent(message.Message):
__metaclass__ = GeneratedProtocolMessageType
DESCRIPTOR = descriptor
class Child(message.Message):
__metaclass__ = GeneratedProtocolMessageType
DESCRIPTOR = descriptor.nested_types[0]
Sample usage:
file_descriptor = descriptor_pb2.FileDescriptorProto()
file_descriptor.ParseFromString(proto2_string)
msg_descriptor = descriptor.MakeDescriptor(file_descriptor.message_type[0])
msg_class = reflection.MakeClass(msg_descriptor)
msg = msg_class()
Args:
descriptor: A descriptor.Descriptor object describing the protobuf.
Returns:
The Message class object described by the descriptor.
"""
if descriptor in MESSAGE_CLASS_CACHE:
return MESSAGE_CLASS_CACHE[descriptor]
attributes = {}
for name, nested_type in descriptor.nested_types_by_name.items():
attributes[name] = MakeClass(nested_type)
attributes[GeneratedProtocolMessageType._DESCRIPTOR_KEY] = descriptor
result = GeneratedProtocolMessageType(
str(descriptor.name), (message.Message,), attributes)
MESSAGE_CLASS_CACHE[descriptor] = result
return result | [
"def",
"MakeClass",
"(",
"descriptor",
")",
":",
"if",
"descriptor",
"in",
"MESSAGE_CLASS_CACHE",
":",
"return",
"MESSAGE_CLASS_CACHE",
"[",
"descriptor",
"]",
"attributes",
"=",
"{",
"}",
"for",
"name",
",",
"nested_type",
"in",
"descriptor",
".",
"nested_types_by_name",
".",
"items",
"(",
")",
":",
"attributes",
"[",
"name",
"]",
"=",
"MakeClass",
"(",
"nested_type",
")",
"attributes",
"[",
"GeneratedProtocolMessageType",
".",
"_DESCRIPTOR_KEY",
"]",
"=",
"descriptor",
"result",
"=",
"GeneratedProtocolMessageType",
"(",
"str",
"(",
"descriptor",
".",
"name",
")",
",",
"(",
"message",
".",
"Message",
",",
")",
",",
"attributes",
")",
"MESSAGE_CLASS_CACHE",
"[",
"descriptor",
"]",
"=",
"result",
"return",
"result"
] | Construct a class object for a protobuf described by descriptor.
Composite descriptors are handled by defining the new class as a member of the
parent class, recursing as deep as necessary.
This is the dynamic equivalent to:
class Parent(message.Message):
__metaclass__ = GeneratedProtocolMessageType
DESCRIPTOR = descriptor
class Child(message.Message):
__metaclass__ = GeneratedProtocolMessageType
DESCRIPTOR = descriptor.nested_types[0]
Sample usage:
file_descriptor = descriptor_pb2.FileDescriptorProto()
file_descriptor.ParseFromString(proto2_string)
msg_descriptor = descriptor.MakeDescriptor(file_descriptor.message_type[0])
msg_class = reflection.MakeClass(msg_descriptor)
msg = msg_class()
Args:
descriptor: A descriptor.Descriptor object describing the protobuf.
Returns:
The Message class object described by the descriptor. | [
"Construct",
"a",
"class",
"object",
"for",
"a",
"protobuf",
"described",
"by",
"descriptor",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/reflection.py#L83-L121 |
28,929 | apple/turicreate | src/unity/python/turicreate/toolkits/image_analysis/image_analysis.py | load_images | def load_images(url, format='auto', with_path=True, recursive=True, ignore_failure=True, random_order=False):
"""
Loads images from a directory. JPEG and PNG images are supported.
Parameters
----------
url : str
The string of the path where all the images are stored.
format : {'PNG' | 'JPG' | 'auto'}, optional
The format of the images in the directory. The default 'auto' parameter
value tries to infer the image type from the file extension. If a
format is specified, all images must be of that format.
with_path : bool, optional
Indicates whether a path column is added to the SFrame. If 'with_path'
is set to True, the returned SFrame contains a 'path' column, which
holds a path string for each Image object.
recursive : bool, optional
Indicates whether 'load_images' should do recursive directory traversal,
or a flat directory traversal.
ignore_failure : bool, optional
If true, prints warning for failed images and keep loading the rest of
the images.
random_order : bool, optional
Load images in random order.
Returns
-------
out : SFrame
Returns an SFrame with either an 'image' column or both an 'image' and
a 'path' column. The 'image' column is a column of Image objects. If
with_path is True, there is also a 'path' column which contains the image
path for each of each corresponding Image object.
Examples
--------
>>> url ='https://static.turi.com/datasets/images/nested'
>>> image_sframe = turicreate.image_analysis.load_images(url, "auto", with_path=False,
... recursive=True)
"""
from ... import extensions as _extensions
from ...util import _make_internal_url
return _extensions.load_images(url, format, with_path,
recursive, ignore_failure, random_order) | python | def load_images(url, format='auto', with_path=True, recursive=True, ignore_failure=True, random_order=False):
"""
Loads images from a directory. JPEG and PNG images are supported.
Parameters
----------
url : str
The string of the path where all the images are stored.
format : {'PNG' | 'JPG' | 'auto'}, optional
The format of the images in the directory. The default 'auto' parameter
value tries to infer the image type from the file extension. If a
format is specified, all images must be of that format.
with_path : bool, optional
Indicates whether a path column is added to the SFrame. If 'with_path'
is set to True, the returned SFrame contains a 'path' column, which
holds a path string for each Image object.
recursive : bool, optional
Indicates whether 'load_images' should do recursive directory traversal,
or a flat directory traversal.
ignore_failure : bool, optional
If true, prints warning for failed images and keep loading the rest of
the images.
random_order : bool, optional
Load images in random order.
Returns
-------
out : SFrame
Returns an SFrame with either an 'image' column or both an 'image' and
a 'path' column. The 'image' column is a column of Image objects. If
with_path is True, there is also a 'path' column which contains the image
path for each of each corresponding Image object.
Examples
--------
>>> url ='https://static.turi.com/datasets/images/nested'
>>> image_sframe = turicreate.image_analysis.load_images(url, "auto", with_path=False,
... recursive=True)
"""
from ... import extensions as _extensions
from ...util import _make_internal_url
return _extensions.load_images(url, format, with_path,
recursive, ignore_failure, random_order) | [
"def",
"load_images",
"(",
"url",
",",
"format",
"=",
"'auto'",
",",
"with_path",
"=",
"True",
",",
"recursive",
"=",
"True",
",",
"ignore_failure",
"=",
"True",
",",
"random_order",
"=",
"False",
")",
":",
"from",
".",
".",
".",
"import",
"extensions",
"as",
"_extensions",
"from",
".",
".",
".",
"util",
"import",
"_make_internal_url",
"return",
"_extensions",
".",
"load_images",
"(",
"url",
",",
"format",
",",
"with_path",
",",
"recursive",
",",
"ignore_failure",
",",
"random_order",
")"
] | Loads images from a directory. JPEG and PNG images are supported.
Parameters
----------
url : str
The string of the path where all the images are stored.
format : {'PNG' | 'JPG' | 'auto'}, optional
The format of the images in the directory. The default 'auto' parameter
value tries to infer the image type from the file extension. If a
format is specified, all images must be of that format.
with_path : bool, optional
Indicates whether a path column is added to the SFrame. If 'with_path'
is set to True, the returned SFrame contains a 'path' column, which
holds a path string for each Image object.
recursive : bool, optional
Indicates whether 'load_images' should do recursive directory traversal,
or a flat directory traversal.
ignore_failure : bool, optional
If true, prints warning for failed images and keep loading the rest of
the images.
random_order : bool, optional
Load images in random order.
Returns
-------
out : SFrame
Returns an SFrame with either an 'image' column or both an 'image' and
a 'path' column. The 'image' column is a column of Image objects. If
with_path is True, there is also a 'path' column which contains the image
path for each of each corresponding Image object.
Examples
--------
>>> url ='https://static.turi.com/datasets/images/nested'
>>> image_sframe = turicreate.image_analysis.load_images(url, "auto", with_path=False,
... recursive=True) | [
"Loads",
"images",
"from",
"a",
"directory",
".",
"JPEG",
"and",
"PNG",
"images",
"are",
"supported",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/image_analysis/image_analysis.py#L12-L60 |
28,930 | apple/turicreate | src/unity/python/turicreate/toolkits/image_analysis/image_analysis.py | _decode | def _decode(image_data):
"""
Internal helper function for decoding a single Image or an SArray of Images
"""
from ...data_structures.sarray import SArray as _SArray
from ... import extensions as _extensions
if type(image_data) is _SArray:
return _extensions.decode_image_sarray(image_data)
elif type(image_data) is _Image:
return _extensions.decode_image(image_data) | python | def _decode(image_data):
"""
Internal helper function for decoding a single Image or an SArray of Images
"""
from ...data_structures.sarray import SArray as _SArray
from ... import extensions as _extensions
if type(image_data) is _SArray:
return _extensions.decode_image_sarray(image_data)
elif type(image_data) is _Image:
return _extensions.decode_image(image_data) | [
"def",
"_decode",
"(",
"image_data",
")",
":",
"from",
".",
".",
".",
"data_structures",
".",
"sarray",
"import",
"SArray",
"as",
"_SArray",
"from",
".",
".",
".",
"import",
"extensions",
"as",
"_extensions",
"if",
"type",
"(",
"image_data",
")",
"is",
"_SArray",
":",
"return",
"_extensions",
".",
"decode_image_sarray",
"(",
"image_data",
")",
"elif",
"type",
"(",
"image_data",
")",
"is",
"_Image",
":",
"return",
"_extensions",
".",
"decode_image",
"(",
"image_data",
")"
] | Internal helper function for decoding a single Image or an SArray of Images | [
"Internal",
"helper",
"function",
"for",
"decoding",
"a",
"single",
"Image",
"or",
"an",
"SArray",
"of",
"Images"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/image_analysis/image_analysis.py#L63-L72 |
28,931 | apple/turicreate | src/unity/python/turicreate/toolkits/image_analysis/image_analysis.py | resize | def resize(image, width, height, channels=None, decode=False,
resample='nearest'):
"""
Resizes the image or SArray of Images to a specific width, height, and
number of channels.
Parameters
----------
image : turicreate.Image | SArray
The image or SArray of images to be resized.
width : int
The width the image is resized to.
height : int
The height the image is resized to.
channels : int, optional
The number of channels the image is resized to. 1 channel
corresponds to grayscale, 3 channels corresponds to RGB, and 4
channels corresponds to RGBA images.
decode : bool, optional
Whether to store the resized image in decoded format. Decoded takes
more space, but makes the resize and future operations on the image faster.
resample : 'nearest' or 'bilinear'
Specify the resampling filter:
- ``'nearest'``: Nearest neigbhor, extremely fast
- ``'bilinear'``: Bilinear, fast and with less aliasing artifacts
Returns
-------
out : turicreate.Image
Returns a resized Image object.
Notes
-----
Grayscale Images -> Images with one channel, representing a scale from
white to black
RGB Images -> Images with 3 channels, with each pixel having Green, Red,
and Blue values.
RGBA Images -> An RGB image with an opacity channel.
Examples
--------
Resize a single image
>>> img = turicreate.Image('https://static.turi.com/datasets/images/sample.jpg')
>>> resized_img = turicreate.image_analysis.resize(img,100,100,1)
Resize an SArray of images
>>> url ='https://static.turi.com/datasets/images/nested'
>>> image_sframe = turicreate.image_analysis.load_images(url, "auto", with_path=False,
... recursive=True)
>>> image_sarray = image_sframe["image"]
>>> resized_images = turicreate.image_analysis.resize(image_sarray, 100, 100, 1)
"""
if height < 0 or width < 0:
raise ValueError("Cannot resize to negative sizes")
if resample == 'nearest':
resample_method = 0
elif resample == 'bilinear':
resample_method = 1
else:
raise ValueError("Unknown resample option: '%s'" % resample)
from ...data_structures.sarray import SArray as _SArray
from ... import extensions as _extensions
if type(image) is _Image:
if channels is None:
channels = image.channels
if channels <= 0:
raise ValueError("cannot resize images to 0 or fewer channels")
return _extensions.resize_image(image, width, height, channels, decode, resample_method)
elif type(image) is _SArray:
if channels is None:
channels = 3
if channels <= 0:
raise ValueError("cannot resize images to 0 or fewer channels")
return image.apply(lambda x: _extensions.resize_image(x, width, height, channels, decode, resample_method))
else:
raise ValueError("Cannot call 'resize' on objects that are not either an Image or SArray of Images") | python | def resize(image, width, height, channels=None, decode=False,
resample='nearest'):
"""
Resizes the image or SArray of Images to a specific width, height, and
number of channels.
Parameters
----------
image : turicreate.Image | SArray
The image or SArray of images to be resized.
width : int
The width the image is resized to.
height : int
The height the image is resized to.
channels : int, optional
The number of channels the image is resized to. 1 channel
corresponds to grayscale, 3 channels corresponds to RGB, and 4
channels corresponds to RGBA images.
decode : bool, optional
Whether to store the resized image in decoded format. Decoded takes
more space, but makes the resize and future operations on the image faster.
resample : 'nearest' or 'bilinear'
Specify the resampling filter:
- ``'nearest'``: Nearest neigbhor, extremely fast
- ``'bilinear'``: Bilinear, fast and with less aliasing artifacts
Returns
-------
out : turicreate.Image
Returns a resized Image object.
Notes
-----
Grayscale Images -> Images with one channel, representing a scale from
white to black
RGB Images -> Images with 3 channels, with each pixel having Green, Red,
and Blue values.
RGBA Images -> An RGB image with an opacity channel.
Examples
--------
Resize a single image
>>> img = turicreate.Image('https://static.turi.com/datasets/images/sample.jpg')
>>> resized_img = turicreate.image_analysis.resize(img,100,100,1)
Resize an SArray of images
>>> url ='https://static.turi.com/datasets/images/nested'
>>> image_sframe = turicreate.image_analysis.load_images(url, "auto", with_path=False,
... recursive=True)
>>> image_sarray = image_sframe["image"]
>>> resized_images = turicreate.image_analysis.resize(image_sarray, 100, 100, 1)
"""
if height < 0 or width < 0:
raise ValueError("Cannot resize to negative sizes")
if resample == 'nearest':
resample_method = 0
elif resample == 'bilinear':
resample_method = 1
else:
raise ValueError("Unknown resample option: '%s'" % resample)
from ...data_structures.sarray import SArray as _SArray
from ... import extensions as _extensions
if type(image) is _Image:
if channels is None:
channels = image.channels
if channels <= 0:
raise ValueError("cannot resize images to 0 or fewer channels")
return _extensions.resize_image(image, width, height, channels, decode, resample_method)
elif type(image) is _SArray:
if channels is None:
channels = 3
if channels <= 0:
raise ValueError("cannot resize images to 0 or fewer channels")
return image.apply(lambda x: _extensions.resize_image(x, width, height, channels, decode, resample_method))
else:
raise ValueError("Cannot call 'resize' on objects that are not either an Image or SArray of Images") | [
"def",
"resize",
"(",
"image",
",",
"width",
",",
"height",
",",
"channels",
"=",
"None",
",",
"decode",
"=",
"False",
",",
"resample",
"=",
"'nearest'",
")",
":",
"if",
"height",
"<",
"0",
"or",
"width",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"\"Cannot resize to negative sizes\"",
")",
"if",
"resample",
"==",
"'nearest'",
":",
"resample_method",
"=",
"0",
"elif",
"resample",
"==",
"'bilinear'",
":",
"resample_method",
"=",
"1",
"else",
":",
"raise",
"ValueError",
"(",
"\"Unknown resample option: '%s'\"",
"%",
"resample",
")",
"from",
".",
".",
".",
"data_structures",
".",
"sarray",
"import",
"SArray",
"as",
"_SArray",
"from",
".",
".",
".",
"import",
"extensions",
"as",
"_extensions",
"if",
"type",
"(",
"image",
")",
"is",
"_Image",
":",
"if",
"channels",
"is",
"None",
":",
"channels",
"=",
"image",
".",
"channels",
"if",
"channels",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"\"cannot resize images to 0 or fewer channels\"",
")",
"return",
"_extensions",
".",
"resize_image",
"(",
"image",
",",
"width",
",",
"height",
",",
"channels",
",",
"decode",
",",
"resample_method",
")",
"elif",
"type",
"(",
"image",
")",
"is",
"_SArray",
":",
"if",
"channels",
"is",
"None",
":",
"channels",
"=",
"3",
"if",
"channels",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"\"cannot resize images to 0 or fewer channels\"",
")",
"return",
"image",
".",
"apply",
"(",
"lambda",
"x",
":",
"_extensions",
".",
"resize_image",
"(",
"x",
",",
"width",
",",
"height",
",",
"channels",
",",
"decode",
",",
"resample_method",
")",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Cannot call 'resize' on objects that are not either an Image or SArray of Images\"",
")"
] | Resizes the image or SArray of Images to a specific width, height, and
number of channels.
Parameters
----------
image : turicreate.Image | SArray
The image or SArray of images to be resized.
width : int
The width the image is resized to.
height : int
The height the image is resized to.
channels : int, optional
The number of channels the image is resized to. 1 channel
corresponds to grayscale, 3 channels corresponds to RGB, and 4
channels corresponds to RGBA images.
decode : bool, optional
Whether to store the resized image in decoded format. Decoded takes
more space, but makes the resize and future operations on the image faster.
resample : 'nearest' or 'bilinear'
Specify the resampling filter:
- ``'nearest'``: Nearest neigbhor, extremely fast
- ``'bilinear'``: Bilinear, fast and with less aliasing artifacts
Returns
-------
out : turicreate.Image
Returns a resized Image object.
Notes
-----
Grayscale Images -> Images with one channel, representing a scale from
white to black
RGB Images -> Images with 3 channels, with each pixel having Green, Red,
and Blue values.
RGBA Images -> An RGB image with an opacity channel.
Examples
--------
Resize a single image
>>> img = turicreate.Image('https://static.turi.com/datasets/images/sample.jpg')
>>> resized_img = turicreate.image_analysis.resize(img,100,100,1)
Resize an SArray of images
>>> url ='https://static.turi.com/datasets/images/nested'
>>> image_sframe = turicreate.image_analysis.load_images(url, "auto", with_path=False,
... recursive=True)
>>> image_sarray = image_sframe["image"]
>>> resized_images = turicreate.image_analysis.resize(image_sarray, 100, 100, 1) | [
"Resizes",
"the",
"image",
"or",
"SArray",
"of",
"Images",
"to",
"a",
"specific",
"width",
"height",
"and",
"number",
"of",
"channels",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/image_analysis/image_analysis.py#L76-L161 |
28,932 | apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/models/neural_network/quantization_utils.py | _convert_1bit_array_to_byte_array | def _convert_1bit_array_to_byte_array(arr):
"""
Convert bit array to byte array.
:param arr: list
Bits as a list where each element is an integer of 0 or 1
Returns
-------
numpy.array
1D numpy array of type uint8
"""
# Padding if necessary
while len(arr) < 8 or len(arr) % 8:
arr.append(0)
arr = _np.array(arr, dtype='uint8')
bit_arr = []
idx = 0
# Iterate and combine 8-bits into a uint8
for arr_idx in range(int(len(arr) / 8)):
bit_arr.append(((arr[idx] << 7) & (1 << 7)) |
((arr[idx+1] << 6) & (1 << 6)) |
((arr[idx+2] << 5) & (1 << 5)) |
((arr[idx+3] << 4) & (1 << 4)) |
((arr[idx+4] << 3) & (1 << 3)) |
((arr[idx+5] << 2) & (1 << 2)) |
((arr[idx+6] << 1) & (1 << 1)) |
((arr[idx+7] << 0) & (1 << 0))
)
idx += 8
return _np.array(bit_arr, dtype='uint8') | python | def _convert_1bit_array_to_byte_array(arr):
"""
Convert bit array to byte array.
:param arr: list
Bits as a list where each element is an integer of 0 or 1
Returns
-------
numpy.array
1D numpy array of type uint8
"""
# Padding if necessary
while len(arr) < 8 or len(arr) % 8:
arr.append(0)
arr = _np.array(arr, dtype='uint8')
bit_arr = []
idx = 0
# Iterate and combine 8-bits into a uint8
for arr_idx in range(int(len(arr) / 8)):
bit_arr.append(((arr[idx] << 7) & (1 << 7)) |
((arr[idx+1] << 6) & (1 << 6)) |
((arr[idx+2] << 5) & (1 << 5)) |
((arr[idx+3] << 4) & (1 << 4)) |
((arr[idx+4] << 3) & (1 << 3)) |
((arr[idx+5] << 2) & (1 << 2)) |
((arr[idx+6] << 1) & (1 << 1)) |
((arr[idx+7] << 0) & (1 << 0))
)
idx += 8
return _np.array(bit_arr, dtype='uint8') | [
"def",
"_convert_1bit_array_to_byte_array",
"(",
"arr",
")",
":",
"# Padding if necessary",
"while",
"len",
"(",
"arr",
")",
"<",
"8",
"or",
"len",
"(",
"arr",
")",
"%",
"8",
":",
"arr",
".",
"append",
"(",
"0",
")",
"arr",
"=",
"_np",
".",
"array",
"(",
"arr",
",",
"dtype",
"=",
"'uint8'",
")",
"bit_arr",
"=",
"[",
"]",
"idx",
"=",
"0",
"# Iterate and combine 8-bits into a uint8",
"for",
"arr_idx",
"in",
"range",
"(",
"int",
"(",
"len",
"(",
"arr",
")",
"/",
"8",
")",
")",
":",
"bit_arr",
".",
"append",
"(",
"(",
"(",
"arr",
"[",
"idx",
"]",
"<<",
"7",
")",
"&",
"(",
"1",
"<<",
"7",
")",
")",
"|",
"(",
"(",
"arr",
"[",
"idx",
"+",
"1",
"]",
"<<",
"6",
")",
"&",
"(",
"1",
"<<",
"6",
")",
")",
"|",
"(",
"(",
"arr",
"[",
"idx",
"+",
"2",
"]",
"<<",
"5",
")",
"&",
"(",
"1",
"<<",
"5",
")",
")",
"|",
"(",
"(",
"arr",
"[",
"idx",
"+",
"3",
"]",
"<<",
"4",
")",
"&",
"(",
"1",
"<<",
"4",
")",
")",
"|",
"(",
"(",
"arr",
"[",
"idx",
"+",
"4",
"]",
"<<",
"3",
")",
"&",
"(",
"1",
"<<",
"3",
")",
")",
"|",
"(",
"(",
"arr",
"[",
"idx",
"+",
"5",
"]",
"<<",
"2",
")",
"&",
"(",
"1",
"<<",
"2",
")",
")",
"|",
"(",
"(",
"arr",
"[",
"idx",
"+",
"6",
"]",
"<<",
"1",
")",
"&",
"(",
"1",
"<<",
"1",
")",
")",
"|",
"(",
"(",
"arr",
"[",
"idx",
"+",
"7",
"]",
"<<",
"0",
")",
"&",
"(",
"1",
"<<",
"0",
")",
")",
")",
"idx",
"+=",
"8",
"return",
"_np",
".",
"array",
"(",
"bit_arr",
",",
"dtype",
"=",
"'uint8'",
")"
] | Convert bit array to byte array.
:param arr: list
Bits as a list where each element is an integer of 0 or 1
Returns
-------
numpy.array
1D numpy array of type uint8 | [
"Convert",
"bit",
"array",
"to",
"byte",
"array",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/neural_network/quantization_utils.py#L34-L65 |
28,933 | apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/models/neural_network/quantization_utils.py | _decompose_bytes_to_bit_arr | def _decompose_bytes_to_bit_arr(arr):
"""
Unpack bytes to bits
:param arr: list
Byte Stream, as a list of uint8 values
Returns
-------
bit_arr: list
Decomposed bit stream as a list of 0/1s of length (len(arr) * 8)
"""
bit_arr = []
for idx in range(len(arr)):
for i in reversed(range(8)):
bit_arr.append((arr[idx] >> i) & (1 << 0))
return bit_arr | python | def _decompose_bytes_to_bit_arr(arr):
"""
Unpack bytes to bits
:param arr: list
Byte Stream, as a list of uint8 values
Returns
-------
bit_arr: list
Decomposed bit stream as a list of 0/1s of length (len(arr) * 8)
"""
bit_arr = []
for idx in range(len(arr)):
for i in reversed(range(8)):
bit_arr.append((arr[idx] >> i) & (1 << 0))
return bit_arr | [
"def",
"_decompose_bytes_to_bit_arr",
"(",
"arr",
")",
":",
"bit_arr",
"=",
"[",
"]",
"for",
"idx",
"in",
"range",
"(",
"len",
"(",
"arr",
")",
")",
":",
"for",
"i",
"in",
"reversed",
"(",
"range",
"(",
"8",
")",
")",
":",
"bit_arr",
".",
"append",
"(",
"(",
"arr",
"[",
"idx",
"]",
">>",
"i",
")",
"&",
"(",
"1",
"<<",
"0",
")",
")",
"return",
"bit_arr"
] | Unpack bytes to bits
:param arr: list
Byte Stream, as a list of uint8 values
Returns
-------
bit_arr: list
Decomposed bit stream as a list of 0/1s of length (len(arr) * 8) | [
"Unpack",
"bytes",
"to",
"bits"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/neural_network/quantization_utils.py#L77-L93 |
28,934 | apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/models/neural_network/quantization_utils.py | _get_linear_lookup_table_and_weight | def _get_linear_lookup_table_and_weight(nbits, wp):
"""
Generate a linear lookup table.
:param nbits: int
Number of bits to represent a quantized weight value
:param wp: numpy.array
Weight blob to be quantized
Returns
-------
lookup_table: numpy.array
Lookup table of shape (2^nbits, )
qw: numpy.array
Decomposed bit stream as a list of 0/1s of length (len(arr) * 8)
"""
w = wp.reshape(1, -1)
qw, scales, biases = _quantize_channelwise_linear(w, nbits, axis=0)
indices = _np.array(range(0, 2**nbits))
lookup_table = indices * scales[0] + biases[0]
return lookup_table, qw | python | def _get_linear_lookup_table_and_weight(nbits, wp):
"""
Generate a linear lookup table.
:param nbits: int
Number of bits to represent a quantized weight value
:param wp: numpy.array
Weight blob to be quantized
Returns
-------
lookup_table: numpy.array
Lookup table of shape (2^nbits, )
qw: numpy.array
Decomposed bit stream as a list of 0/1s of length (len(arr) * 8)
"""
w = wp.reshape(1, -1)
qw, scales, biases = _quantize_channelwise_linear(w, nbits, axis=0)
indices = _np.array(range(0, 2**nbits))
lookup_table = indices * scales[0] + biases[0]
return lookup_table, qw | [
"def",
"_get_linear_lookup_table_and_weight",
"(",
"nbits",
",",
"wp",
")",
":",
"w",
"=",
"wp",
".",
"reshape",
"(",
"1",
",",
"-",
"1",
")",
"qw",
",",
"scales",
",",
"biases",
"=",
"_quantize_channelwise_linear",
"(",
"w",
",",
"nbits",
",",
"axis",
"=",
"0",
")",
"indices",
"=",
"_np",
".",
"array",
"(",
"range",
"(",
"0",
",",
"2",
"**",
"nbits",
")",
")",
"lookup_table",
"=",
"indices",
"*",
"scales",
"[",
"0",
"]",
"+",
"biases",
"[",
"0",
"]",
"return",
"lookup_table",
",",
"qw"
] | Generate a linear lookup table.
:param nbits: int
Number of bits to represent a quantized weight value
:param wp: numpy.array
Weight blob to be quantized
Returns
-------
lookup_table: numpy.array
Lookup table of shape (2^nbits, )
qw: numpy.array
Decomposed bit stream as a list of 0/1s of length (len(arr) * 8) | [
"Generate",
"a",
"linear",
"lookup",
"table",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/neural_network/quantization_utils.py#L96-L117 |
28,935 | apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/models/neural_network/quantization_utils.py | _get_kmeans_lookup_table_and_weight | def _get_kmeans_lookup_table_and_weight(nbits, w, init='k-means++', tol=1e-2, n_init=1, rand_seed=0):
"""
Generate K-Means lookup table given a weight parameter field
:param nbits:
Number of bits for quantization
:param w:
Weight as numpy array
Returns
-------
lut: numpy.array
Lookup table, numpy array of shape (1 << nbits, );
wq: numpy.array
Quantized weight of type numpy.uint8
"""
if _HAS_SKLEARN:
from sklearn.cluster import KMeans
else:
raise Exception('sklearn package required for k-means quantization')
units = _np.prod(w.shape)
lut_len = 1 << nbits
n_clusters = units if (units < lut_len) else lut_len
wf = w.reshape(-1, 1)
kmeans = KMeans(n_clusters=n_clusters, init=init, tol=tol, n_init=n_init, random_state=rand_seed).fit(wf)
wq = kmeans.labels_[:units]
lut = _np.zeros(lut_len)
lut[:n_clusters] = kmeans.cluster_centers_.flatten()
return lut, wq | python | def _get_kmeans_lookup_table_and_weight(nbits, w, init='k-means++', tol=1e-2, n_init=1, rand_seed=0):
"""
Generate K-Means lookup table given a weight parameter field
:param nbits:
Number of bits for quantization
:param w:
Weight as numpy array
Returns
-------
lut: numpy.array
Lookup table, numpy array of shape (1 << nbits, );
wq: numpy.array
Quantized weight of type numpy.uint8
"""
if _HAS_SKLEARN:
from sklearn.cluster import KMeans
else:
raise Exception('sklearn package required for k-means quantization')
units = _np.prod(w.shape)
lut_len = 1 << nbits
n_clusters = units if (units < lut_len) else lut_len
wf = w.reshape(-1, 1)
kmeans = KMeans(n_clusters=n_clusters, init=init, tol=tol, n_init=n_init, random_state=rand_seed).fit(wf)
wq = kmeans.labels_[:units]
lut = _np.zeros(lut_len)
lut[:n_clusters] = kmeans.cluster_centers_.flatten()
return lut, wq | [
"def",
"_get_kmeans_lookup_table_and_weight",
"(",
"nbits",
",",
"w",
",",
"init",
"=",
"'k-means++'",
",",
"tol",
"=",
"1e-2",
",",
"n_init",
"=",
"1",
",",
"rand_seed",
"=",
"0",
")",
":",
"if",
"_HAS_SKLEARN",
":",
"from",
"sklearn",
".",
"cluster",
"import",
"KMeans",
"else",
":",
"raise",
"Exception",
"(",
"'sklearn package required for k-means quantization'",
")",
"units",
"=",
"_np",
".",
"prod",
"(",
"w",
".",
"shape",
")",
"lut_len",
"=",
"1",
"<<",
"nbits",
"n_clusters",
"=",
"units",
"if",
"(",
"units",
"<",
"lut_len",
")",
"else",
"lut_len",
"wf",
"=",
"w",
".",
"reshape",
"(",
"-",
"1",
",",
"1",
")",
"kmeans",
"=",
"KMeans",
"(",
"n_clusters",
"=",
"n_clusters",
",",
"init",
"=",
"init",
",",
"tol",
"=",
"tol",
",",
"n_init",
"=",
"n_init",
",",
"random_state",
"=",
"rand_seed",
")",
".",
"fit",
"(",
"wf",
")",
"wq",
"=",
"kmeans",
".",
"labels_",
"[",
":",
"units",
"]",
"lut",
"=",
"_np",
".",
"zeros",
"(",
"lut_len",
")",
"lut",
"[",
":",
"n_clusters",
"]",
"=",
"kmeans",
".",
"cluster_centers_",
".",
"flatten",
"(",
")",
"return",
"lut",
",",
"wq"
] | Generate K-Means lookup table given a weight parameter field
:param nbits:
Number of bits for quantization
:param w:
Weight as numpy array
Returns
-------
lut: numpy.array
Lookup table, numpy array of shape (1 << nbits, );
wq: numpy.array
Quantized weight of type numpy.uint8 | [
"Generate",
"K",
"-",
"Means",
"lookup",
"table",
"given",
"a",
"weight",
"parameter",
"field"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/neural_network/quantization_utils.py#L120-L149 |
28,936 | apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/models/neural_network/quantization_utils.py | _quantize_channelwise_linear | def _quantize_channelwise_linear(weight, nbits, axis=0):
"""
Linearly quantize weight blob.
:param weight: numpy.array
Weight to be quantized.
:param nbits: int
Number of bits per weight element
:param axis: int
Axis of the weight blob to compute channel-wise quantization, can be 0 or 1
Returns
-------
quantized_weight: numpy.array
quantized weight as float numpy array, with the same shape as weight
scale: numpy.array
per channel scale
bias: numpy.array
per channel bias
"""
if len(weight.shape) == 1: # vector situation, treat as 1 channel
weight = weight.reshape((1, weight.shape[0]))
rank = len(weight.shape)
if axis == 1:
transposed_axis_order = (1,0) + tuple(range(2,rank))
weight = _np.transpose(weight, transposed_axis_order)
num_channels = weight.shape[0]
shape = weight.shape
weight = weight.reshape((num_channels, -1)) # [C, L]
a = _np.amin(weight, axis=-1) # [C,]
b = _np.amax(weight, axis=-1) # [C,]
# Quantize weights to full range [0, (1 << nbits) - 1]
qa = 0
qb = (1 << nbits) - 1
# Use a mask to filter out channels with very close weight values
mask = (b - a) > 1e-5 # [C,1] (normal channels)
r_mask = ~mask # (all-same-value) channels
qw = _np.zeros_like(weight) # [C, L]
scale = _np.ones((num_channels,))
bias = _np.zeros((num_channels,))
if _np.any(mask): # normal channels
qw[mask] = (weight[mask] - a[mask][:,None]) / (b[mask] - a[mask])[:,None] * (qb - qa) + qa
scale[mask] = (b[mask] - a[mask]) / (qb - qa)
bias[mask] = - scale[mask] * qa + a[mask]
if _np.any(r_mask): # singular channels
qw[r_mask] = qa
scale[r_mask] = 0
bias[r_mask] = a[r_mask]
# Reshape
quantized_weight = qw.reshape(shape)
if axis == 1:
quantized_weight = _np.transpose(quantized_weight, transposed_axis_order)
return (quantized_weight, scale, bias) | python | def _quantize_channelwise_linear(weight, nbits, axis=0):
"""
Linearly quantize weight blob.
:param weight: numpy.array
Weight to be quantized.
:param nbits: int
Number of bits per weight element
:param axis: int
Axis of the weight blob to compute channel-wise quantization, can be 0 or 1
Returns
-------
quantized_weight: numpy.array
quantized weight as float numpy array, with the same shape as weight
scale: numpy.array
per channel scale
bias: numpy.array
per channel bias
"""
if len(weight.shape) == 1: # vector situation, treat as 1 channel
weight = weight.reshape((1, weight.shape[0]))
rank = len(weight.shape)
if axis == 1:
transposed_axis_order = (1,0) + tuple(range(2,rank))
weight = _np.transpose(weight, transposed_axis_order)
num_channels = weight.shape[0]
shape = weight.shape
weight = weight.reshape((num_channels, -1)) # [C, L]
a = _np.amin(weight, axis=-1) # [C,]
b = _np.amax(weight, axis=-1) # [C,]
# Quantize weights to full range [0, (1 << nbits) - 1]
qa = 0
qb = (1 << nbits) - 1
# Use a mask to filter out channels with very close weight values
mask = (b - a) > 1e-5 # [C,1] (normal channels)
r_mask = ~mask # (all-same-value) channels
qw = _np.zeros_like(weight) # [C, L]
scale = _np.ones((num_channels,))
bias = _np.zeros((num_channels,))
if _np.any(mask): # normal channels
qw[mask] = (weight[mask] - a[mask][:,None]) / (b[mask] - a[mask])[:,None] * (qb - qa) + qa
scale[mask] = (b[mask] - a[mask]) / (qb - qa)
bias[mask] = - scale[mask] * qa + a[mask]
if _np.any(r_mask): # singular channels
qw[r_mask] = qa
scale[r_mask] = 0
bias[r_mask] = a[r_mask]
# Reshape
quantized_weight = qw.reshape(shape)
if axis == 1:
quantized_weight = _np.transpose(quantized_weight, transposed_axis_order)
return (quantized_weight, scale, bias) | [
"def",
"_quantize_channelwise_linear",
"(",
"weight",
",",
"nbits",
",",
"axis",
"=",
"0",
")",
":",
"if",
"len",
"(",
"weight",
".",
"shape",
")",
"==",
"1",
":",
"# vector situation, treat as 1 channel",
"weight",
"=",
"weight",
".",
"reshape",
"(",
"(",
"1",
",",
"weight",
".",
"shape",
"[",
"0",
"]",
")",
")",
"rank",
"=",
"len",
"(",
"weight",
".",
"shape",
")",
"if",
"axis",
"==",
"1",
":",
"transposed_axis_order",
"=",
"(",
"1",
",",
"0",
")",
"+",
"tuple",
"(",
"range",
"(",
"2",
",",
"rank",
")",
")",
"weight",
"=",
"_np",
".",
"transpose",
"(",
"weight",
",",
"transposed_axis_order",
")",
"num_channels",
"=",
"weight",
".",
"shape",
"[",
"0",
"]",
"shape",
"=",
"weight",
".",
"shape",
"weight",
"=",
"weight",
".",
"reshape",
"(",
"(",
"num_channels",
",",
"-",
"1",
")",
")",
"# [C, L]",
"a",
"=",
"_np",
".",
"amin",
"(",
"weight",
",",
"axis",
"=",
"-",
"1",
")",
"# [C,]",
"b",
"=",
"_np",
".",
"amax",
"(",
"weight",
",",
"axis",
"=",
"-",
"1",
")",
"# [C,]",
"# Quantize weights to full range [0, (1 << nbits) - 1]",
"qa",
"=",
"0",
"qb",
"=",
"(",
"1",
"<<",
"nbits",
")",
"-",
"1",
"# Use a mask to filter out channels with very close weight values",
"mask",
"=",
"(",
"b",
"-",
"a",
")",
">",
"1e-5",
"# [C,1] (normal channels)",
"r_mask",
"=",
"~",
"mask",
"# (all-same-value) channels",
"qw",
"=",
"_np",
".",
"zeros_like",
"(",
"weight",
")",
"# [C, L]",
"scale",
"=",
"_np",
".",
"ones",
"(",
"(",
"num_channels",
",",
")",
")",
"bias",
"=",
"_np",
".",
"zeros",
"(",
"(",
"num_channels",
",",
")",
")",
"if",
"_np",
".",
"any",
"(",
"mask",
")",
":",
"# normal channels",
"qw",
"[",
"mask",
"]",
"=",
"(",
"weight",
"[",
"mask",
"]",
"-",
"a",
"[",
"mask",
"]",
"[",
":",
",",
"None",
"]",
")",
"/",
"(",
"b",
"[",
"mask",
"]",
"-",
"a",
"[",
"mask",
"]",
")",
"[",
":",
",",
"None",
"]",
"*",
"(",
"qb",
"-",
"qa",
")",
"+",
"qa",
"scale",
"[",
"mask",
"]",
"=",
"(",
"b",
"[",
"mask",
"]",
"-",
"a",
"[",
"mask",
"]",
")",
"/",
"(",
"qb",
"-",
"qa",
")",
"bias",
"[",
"mask",
"]",
"=",
"-",
"scale",
"[",
"mask",
"]",
"*",
"qa",
"+",
"a",
"[",
"mask",
"]",
"if",
"_np",
".",
"any",
"(",
"r_mask",
")",
":",
"# singular channels",
"qw",
"[",
"r_mask",
"]",
"=",
"qa",
"scale",
"[",
"r_mask",
"]",
"=",
"0",
"bias",
"[",
"r_mask",
"]",
"=",
"a",
"[",
"r_mask",
"]",
"# Reshape",
"quantized_weight",
"=",
"qw",
".",
"reshape",
"(",
"shape",
")",
"if",
"axis",
"==",
"1",
":",
"quantized_weight",
"=",
"_np",
".",
"transpose",
"(",
"quantized_weight",
",",
"transposed_axis_order",
")",
"return",
"(",
"quantized_weight",
",",
"scale",
",",
"bias",
")"
] | Linearly quantize weight blob.
:param weight: numpy.array
Weight to be quantized.
:param nbits: int
Number of bits per weight element
:param axis: int
Axis of the weight blob to compute channel-wise quantization, can be 0 or 1
Returns
-------
quantized_weight: numpy.array
quantized weight as float numpy array, with the same shape as weight
scale: numpy.array
per channel scale
bias: numpy.array
per channel bias | [
"Linearly",
"quantize",
"weight",
"blob",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/neural_network/quantization_utils.py#L151-L212 |
28,937 | apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/models/neural_network/quantization_utils.py | _quantize_wp | def _quantize_wp(wp, nbits, qm, axis=0, **kwargs):
"""
Quantize the weight blob
:param wp: numpy.array
Weight parameters
:param nbits: int
Number of bits
:param qm:
Quantization mode
:param lut_function: (``callable function``)
Python callable representing a look-up table
Returns
-------
scale: numpy.array
Per-channel scale
bias: numpy.array
Per-channel bias
lut: numpy.array
Lookup table
quantized_wp: numpy.array
Quantized weight of same shape as wp, with dtype numpy.uint8
"""
scale = bias = lut = None
# Linear Quantization
if qm == _QUANTIZATION_MODE_LINEAR_QUANTIZATION:
qw, scale, bias = _quantize_channelwise_linear(wp, nbits, axis)
# Lookup tables
elif qm == _QUANTIZATION_MODE_LOOKUP_TABLE_KMEANS:
lut, qw = _get_kmeans_lookup_table_and_weight(nbits, wp)
elif qm == _QUANTIZATION_MODE_CUSTOM_LOOKUP_TABLE:
if 'lut_function' not in kwargs.keys():
raise Exception('Custom lookup table quantization mode '
'selected but no lookup table function passed')
lut_function = kwargs['lut_function']
if not callable(lut_function):
raise Exception('Argument for Lookup Table passed in but is '
'not callable')
try:
lut, qw = lut_function(nbits, wp)
except Exception as e:
raise Exception('{}\nCall to Lookup Table function failed'
.format(e.message))
elif qm == _QUANTIZATION_MODE_LOOKUP_TABLE_LINEAR:
lut, qw = _get_linear_lookup_table_and_weight(nbits, wp)
else:
raise NotImplementedError('Quantization method "{}" not supported'.format(qm))
quantized_wp = _np.uint8(qw)
return scale, bias, lut, quantized_wp | python | def _quantize_wp(wp, nbits, qm, axis=0, **kwargs):
"""
Quantize the weight blob
:param wp: numpy.array
Weight parameters
:param nbits: int
Number of bits
:param qm:
Quantization mode
:param lut_function: (``callable function``)
Python callable representing a look-up table
Returns
-------
scale: numpy.array
Per-channel scale
bias: numpy.array
Per-channel bias
lut: numpy.array
Lookup table
quantized_wp: numpy.array
Quantized weight of same shape as wp, with dtype numpy.uint8
"""
scale = bias = lut = None
# Linear Quantization
if qm == _QUANTIZATION_MODE_LINEAR_QUANTIZATION:
qw, scale, bias = _quantize_channelwise_linear(wp, nbits, axis)
# Lookup tables
elif qm == _QUANTIZATION_MODE_LOOKUP_TABLE_KMEANS:
lut, qw = _get_kmeans_lookup_table_and_weight(nbits, wp)
elif qm == _QUANTIZATION_MODE_CUSTOM_LOOKUP_TABLE:
if 'lut_function' not in kwargs.keys():
raise Exception('Custom lookup table quantization mode '
'selected but no lookup table function passed')
lut_function = kwargs['lut_function']
if not callable(lut_function):
raise Exception('Argument for Lookup Table passed in but is '
'not callable')
try:
lut, qw = lut_function(nbits, wp)
except Exception as e:
raise Exception('{}\nCall to Lookup Table function failed'
.format(e.message))
elif qm == _QUANTIZATION_MODE_LOOKUP_TABLE_LINEAR:
lut, qw = _get_linear_lookup_table_and_weight(nbits, wp)
else:
raise NotImplementedError('Quantization method "{}" not supported'.format(qm))
quantized_wp = _np.uint8(qw)
return scale, bias, lut, quantized_wp | [
"def",
"_quantize_wp",
"(",
"wp",
",",
"nbits",
",",
"qm",
",",
"axis",
"=",
"0",
",",
"*",
"*",
"kwargs",
")",
":",
"scale",
"=",
"bias",
"=",
"lut",
"=",
"None",
"# Linear Quantization",
"if",
"qm",
"==",
"_QUANTIZATION_MODE_LINEAR_QUANTIZATION",
":",
"qw",
",",
"scale",
",",
"bias",
"=",
"_quantize_channelwise_linear",
"(",
"wp",
",",
"nbits",
",",
"axis",
")",
"# Lookup tables",
"elif",
"qm",
"==",
"_QUANTIZATION_MODE_LOOKUP_TABLE_KMEANS",
":",
"lut",
",",
"qw",
"=",
"_get_kmeans_lookup_table_and_weight",
"(",
"nbits",
",",
"wp",
")",
"elif",
"qm",
"==",
"_QUANTIZATION_MODE_CUSTOM_LOOKUP_TABLE",
":",
"if",
"'lut_function'",
"not",
"in",
"kwargs",
".",
"keys",
"(",
")",
":",
"raise",
"Exception",
"(",
"'Custom lookup table quantization mode '",
"'selected but no lookup table function passed'",
")",
"lut_function",
"=",
"kwargs",
"[",
"'lut_function'",
"]",
"if",
"not",
"callable",
"(",
"lut_function",
")",
":",
"raise",
"Exception",
"(",
"'Argument for Lookup Table passed in but is '",
"'not callable'",
")",
"try",
":",
"lut",
",",
"qw",
"=",
"lut_function",
"(",
"nbits",
",",
"wp",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"Exception",
"(",
"'{}\\nCall to Lookup Table function failed'",
".",
"format",
"(",
"e",
".",
"message",
")",
")",
"elif",
"qm",
"==",
"_QUANTIZATION_MODE_LOOKUP_TABLE_LINEAR",
":",
"lut",
",",
"qw",
"=",
"_get_linear_lookup_table_and_weight",
"(",
"nbits",
",",
"wp",
")",
"else",
":",
"raise",
"NotImplementedError",
"(",
"'Quantization method \"{}\" not supported'",
".",
"format",
"(",
"qm",
")",
")",
"quantized_wp",
"=",
"_np",
".",
"uint8",
"(",
"qw",
")",
"return",
"scale",
",",
"bias",
",",
"lut",
",",
"quantized_wp"
] | Quantize the weight blob
:param wp: numpy.array
Weight parameters
:param nbits: int
Number of bits
:param qm:
Quantization mode
:param lut_function: (``callable function``)
Python callable representing a look-up table
Returns
-------
scale: numpy.array
Per-channel scale
bias: numpy.array
Per-channel bias
lut: numpy.array
Lookup table
quantized_wp: numpy.array
Quantized weight of same shape as wp, with dtype numpy.uint8 | [
"Quantize",
"the",
"weight",
"blob"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/neural_network/quantization_utils.py#L215-L266 |
28,938 | apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/models/neural_network/quantization_utils.py | _quantize_wp_field | def _quantize_wp_field(wp, nbits, qm, shape, axis=0, **kwargs):
"""
Quantize WeightParam field in Neural Network Protobuf
:param wp: MLModel.NeuralNetwork.WeightParam
WeightParam field
:param nbits: int
Number of bits to be quantized
:param qm: str
Quantization mode
:param shape: tuple
Tensor shape held by wp
:param axis: int
Axis over which quantization is performed on, can be either 0 or 1
:param lut_function: (``callable function``)
Python callable representing a LUT table function
"""
# De-quantization
if qm == _QUANTIZATION_MODE_DEQUANTIZE:
return _dequantize_wp(wp, shape, axis)
# If the float32 field is empty do nothing and return
if len(wp.floatValue) == 0:
return
# Half precision (16-bit) quantization
if nbits == 16:
return _wp_to_fp16wp(wp)
if nbits > 8:
raise Exception('Only 8-bit and lower quantization is supported')
if qm not in _SUPPORTED_QUANTIZATION_MODES:
raise Exception('Quantization mode {} not supported'.format(qm))
# axis parameter check
if axis == 1 and len(shape) != 4:
raise Exception('Quantization on second axis is only supported '
'for rank-4 weight blob.')
if axis != 0 and axis != 1:
raise Exception('Invalid quantization axis {} passed in. Allowed'
'values are 0 (first axis) and 1 (second axis)'.format(axis))
# WeightParam size check - non-linear quantizations are applied on layer level
num_channels = shape[axis] if qm == _QUANTIZATION_MODE_LINEAR_QUANTIZATION else 1
if len(wp.floatValue) % num_channels:
raise Exception('Number of quantization channels does not divide evenly into weights')
qparams = wp.quantization
qparams.numberOfBits = nbits
weights = _np.array(wp.floatValue).reshape(shape)
scale, bias, lut, uint8_weights = _quantize_wp(weights, nbits, qm, axis, **kwargs)
uint8_weights = uint8_weights.flatten()
if qm == _QUANTIZATION_MODE_LINEAR_QUANTIZATION:
qparams.linearQuantization.scale.extend(scale)
qparams.linearQuantization.bias.extend(bias)
else:
qparams.lookupTableQuantization.floatValue.extend(lut)
wp.rawValue = bytes()
if nbits == 8:
wp.rawValue += uint8_weights.tobytes()
else:
wp.rawValue += _convert_array_to_nbit_quantized_bytes(uint8_weights, nbits).tobytes()
del wp.floatValue[:] | python | def _quantize_wp_field(wp, nbits, qm, shape, axis=0, **kwargs):
"""
Quantize WeightParam field in Neural Network Protobuf
:param wp: MLModel.NeuralNetwork.WeightParam
WeightParam field
:param nbits: int
Number of bits to be quantized
:param qm: str
Quantization mode
:param shape: tuple
Tensor shape held by wp
:param axis: int
Axis over which quantization is performed on, can be either 0 or 1
:param lut_function: (``callable function``)
Python callable representing a LUT table function
"""
# De-quantization
if qm == _QUANTIZATION_MODE_DEQUANTIZE:
return _dequantize_wp(wp, shape, axis)
# If the float32 field is empty do nothing and return
if len(wp.floatValue) == 0:
return
# Half precision (16-bit) quantization
if nbits == 16:
return _wp_to_fp16wp(wp)
if nbits > 8:
raise Exception('Only 8-bit and lower quantization is supported')
if qm not in _SUPPORTED_QUANTIZATION_MODES:
raise Exception('Quantization mode {} not supported'.format(qm))
# axis parameter check
if axis == 1 and len(shape) != 4:
raise Exception('Quantization on second axis is only supported '
'for rank-4 weight blob.')
if axis != 0 and axis != 1:
raise Exception('Invalid quantization axis {} passed in. Allowed'
'values are 0 (first axis) and 1 (second axis)'.format(axis))
# WeightParam size check - non-linear quantizations are applied on layer level
num_channels = shape[axis] if qm == _QUANTIZATION_MODE_LINEAR_QUANTIZATION else 1
if len(wp.floatValue) % num_channels:
raise Exception('Number of quantization channels does not divide evenly into weights')
qparams = wp.quantization
qparams.numberOfBits = nbits
weights = _np.array(wp.floatValue).reshape(shape)
scale, bias, lut, uint8_weights = _quantize_wp(weights, nbits, qm, axis, **kwargs)
uint8_weights = uint8_weights.flatten()
if qm == _QUANTIZATION_MODE_LINEAR_QUANTIZATION:
qparams.linearQuantization.scale.extend(scale)
qparams.linearQuantization.bias.extend(bias)
else:
qparams.lookupTableQuantization.floatValue.extend(lut)
wp.rawValue = bytes()
if nbits == 8:
wp.rawValue += uint8_weights.tobytes()
else:
wp.rawValue += _convert_array_to_nbit_quantized_bytes(uint8_weights, nbits).tobytes()
del wp.floatValue[:] | [
"def",
"_quantize_wp_field",
"(",
"wp",
",",
"nbits",
",",
"qm",
",",
"shape",
",",
"axis",
"=",
"0",
",",
"*",
"*",
"kwargs",
")",
":",
"# De-quantization",
"if",
"qm",
"==",
"_QUANTIZATION_MODE_DEQUANTIZE",
":",
"return",
"_dequantize_wp",
"(",
"wp",
",",
"shape",
",",
"axis",
")",
"# If the float32 field is empty do nothing and return",
"if",
"len",
"(",
"wp",
".",
"floatValue",
")",
"==",
"0",
":",
"return",
"# Half precision (16-bit) quantization",
"if",
"nbits",
"==",
"16",
":",
"return",
"_wp_to_fp16wp",
"(",
"wp",
")",
"if",
"nbits",
">",
"8",
":",
"raise",
"Exception",
"(",
"'Only 8-bit and lower quantization is supported'",
")",
"if",
"qm",
"not",
"in",
"_SUPPORTED_QUANTIZATION_MODES",
":",
"raise",
"Exception",
"(",
"'Quantization mode {} not supported'",
".",
"format",
"(",
"qm",
")",
")",
"# axis parameter check",
"if",
"axis",
"==",
"1",
"and",
"len",
"(",
"shape",
")",
"!=",
"4",
":",
"raise",
"Exception",
"(",
"'Quantization on second axis is only supported '",
"'for rank-4 weight blob.'",
")",
"if",
"axis",
"!=",
"0",
"and",
"axis",
"!=",
"1",
":",
"raise",
"Exception",
"(",
"'Invalid quantization axis {} passed in. Allowed'",
"'values are 0 (first axis) and 1 (second axis)'",
".",
"format",
"(",
"axis",
")",
")",
"# WeightParam size check - non-linear quantizations are applied on layer level",
"num_channels",
"=",
"shape",
"[",
"axis",
"]",
"if",
"qm",
"==",
"_QUANTIZATION_MODE_LINEAR_QUANTIZATION",
"else",
"1",
"if",
"len",
"(",
"wp",
".",
"floatValue",
")",
"%",
"num_channels",
":",
"raise",
"Exception",
"(",
"'Number of quantization channels does not divide evenly into weights'",
")",
"qparams",
"=",
"wp",
".",
"quantization",
"qparams",
".",
"numberOfBits",
"=",
"nbits",
"weights",
"=",
"_np",
".",
"array",
"(",
"wp",
".",
"floatValue",
")",
".",
"reshape",
"(",
"shape",
")",
"scale",
",",
"bias",
",",
"lut",
",",
"uint8_weights",
"=",
"_quantize_wp",
"(",
"weights",
",",
"nbits",
",",
"qm",
",",
"axis",
",",
"*",
"*",
"kwargs",
")",
"uint8_weights",
"=",
"uint8_weights",
".",
"flatten",
"(",
")",
"if",
"qm",
"==",
"_QUANTIZATION_MODE_LINEAR_QUANTIZATION",
":",
"qparams",
".",
"linearQuantization",
".",
"scale",
".",
"extend",
"(",
"scale",
")",
"qparams",
".",
"linearQuantization",
".",
"bias",
".",
"extend",
"(",
"bias",
")",
"else",
":",
"qparams",
".",
"lookupTableQuantization",
".",
"floatValue",
".",
"extend",
"(",
"lut",
")",
"wp",
".",
"rawValue",
"=",
"bytes",
"(",
")",
"if",
"nbits",
"==",
"8",
":",
"wp",
".",
"rawValue",
"+=",
"uint8_weights",
".",
"tobytes",
"(",
")",
"else",
":",
"wp",
".",
"rawValue",
"+=",
"_convert_array_to_nbit_quantized_bytes",
"(",
"uint8_weights",
",",
"nbits",
")",
".",
"tobytes",
"(",
")",
"del",
"wp",
".",
"floatValue",
"[",
":",
"]"
] | Quantize WeightParam field in Neural Network Protobuf
:param wp: MLModel.NeuralNetwork.WeightParam
WeightParam field
:param nbits: int
Number of bits to be quantized
:param qm: str
Quantization mode
:param shape: tuple
Tensor shape held by wp
:param axis: int
Axis over which quantization is performed on, can be either 0 or 1
:param lut_function: (``callable function``)
Python callable representing a LUT table function | [
"Quantize",
"WeightParam",
"field",
"in",
"Neural",
"Network",
"Protobuf"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/neural_network/quantization_utils.py#L269-L336 |
28,939 | apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/models/neural_network/quantization_utils.py | compare_models | def compare_models(full_precision_model, quantized_model,
sample_data):
"""
Utility function to compare the performance of a full precision vs quantized model
:param full_precision_model: MLModel
The full precision model with float32 weights
:param quantized_model: MLModel
Quantized version of the model with quantized weights
:param sample_data: str | [dict]
Data used to characterize performance of the quantized model in
comparison to the full precision model. Either a list of sample input
dictionaries or an absolute path to a directory containing images.
Path to a directory containing images is only valid for models with
one image input. For all other models a list of sample inputs must be
provided.
:return:
None. Performance metrics are printed out
"""
emessage = ("""
Invalid sample data provided. Only a list of dictionaries
containing sample data or path to a folder containing images is
supported""")
spec = full_precision_model.get_spec()
num_inputs = len(spec.description.input)
if isinstance(sample_data, str):
input_type = spec.description.input[0].type.WhichOneof('Type')
if num_inputs != 1 or input_type != 'imageType':
raise Exception("""Unable to analyze quantized models. Sample data
was a path to a directory which is only supported with models with
one image type input. Please try passing in a list of sample inputs
as sample data.
""")
_characterize_qmodel_perf_with_data_dir(full_precision_model, quantized_model.get_spec(), sample_data)
elif isinstance(sample_data, list):
if not all(type(d) is dict for d in sample_data):
raise Exception(emessage)
_characterize_quantized_model_perf(full_precision_model, quantized_model.get_spec(), sample_data)
else:
raise Exception(emessage) | python | def compare_models(full_precision_model, quantized_model,
sample_data):
"""
Utility function to compare the performance of a full precision vs quantized model
:param full_precision_model: MLModel
The full precision model with float32 weights
:param quantized_model: MLModel
Quantized version of the model with quantized weights
:param sample_data: str | [dict]
Data used to characterize performance of the quantized model in
comparison to the full precision model. Either a list of sample input
dictionaries or an absolute path to a directory containing images.
Path to a directory containing images is only valid for models with
one image input. For all other models a list of sample inputs must be
provided.
:return:
None. Performance metrics are printed out
"""
emessage = ("""
Invalid sample data provided. Only a list of dictionaries
containing sample data or path to a folder containing images is
supported""")
spec = full_precision_model.get_spec()
num_inputs = len(spec.description.input)
if isinstance(sample_data, str):
input_type = spec.description.input[0].type.WhichOneof('Type')
if num_inputs != 1 or input_type != 'imageType':
raise Exception("""Unable to analyze quantized models. Sample data
was a path to a directory which is only supported with models with
one image type input. Please try passing in a list of sample inputs
as sample data.
""")
_characterize_qmodel_perf_with_data_dir(full_precision_model, quantized_model.get_spec(), sample_data)
elif isinstance(sample_data, list):
if not all(type(d) is dict for d in sample_data):
raise Exception(emessage)
_characterize_quantized_model_perf(full_precision_model, quantized_model.get_spec(), sample_data)
else:
raise Exception(emessage) | [
"def",
"compare_models",
"(",
"full_precision_model",
",",
"quantized_model",
",",
"sample_data",
")",
":",
"emessage",
"=",
"(",
"\"\"\"\n Invalid sample data provided. Only a list of dictionaries\n containing sample data or path to a folder containing images is\n supported\"\"\"",
")",
"spec",
"=",
"full_precision_model",
".",
"get_spec",
"(",
")",
"num_inputs",
"=",
"len",
"(",
"spec",
".",
"description",
".",
"input",
")",
"if",
"isinstance",
"(",
"sample_data",
",",
"str",
")",
":",
"input_type",
"=",
"spec",
".",
"description",
".",
"input",
"[",
"0",
"]",
".",
"type",
".",
"WhichOneof",
"(",
"'Type'",
")",
"if",
"num_inputs",
"!=",
"1",
"or",
"input_type",
"!=",
"'imageType'",
":",
"raise",
"Exception",
"(",
"\"\"\"Unable to analyze quantized models. Sample data\n was a path to a directory which is only supported with models with\n one image type input. Please try passing in a list of sample inputs\n as sample data.\n \"\"\"",
")",
"_characterize_qmodel_perf_with_data_dir",
"(",
"full_precision_model",
",",
"quantized_model",
".",
"get_spec",
"(",
")",
",",
"sample_data",
")",
"elif",
"isinstance",
"(",
"sample_data",
",",
"list",
")",
":",
"if",
"not",
"all",
"(",
"type",
"(",
"d",
")",
"is",
"dict",
"for",
"d",
"in",
"sample_data",
")",
":",
"raise",
"Exception",
"(",
"emessage",
")",
"_characterize_quantized_model_perf",
"(",
"full_precision_model",
",",
"quantized_model",
".",
"get_spec",
"(",
")",
",",
"sample_data",
")",
"else",
":",
"raise",
"Exception",
"(",
"emessage",
")"
] | Utility function to compare the performance of a full precision vs quantized model
:param full_precision_model: MLModel
The full precision model with float32 weights
:param quantized_model: MLModel
Quantized version of the model with quantized weights
:param sample_data: str | [dict]
Data used to characterize performance of the quantized model in
comparison to the full precision model. Either a list of sample input
dictionaries or an absolute path to a directory containing images.
Path to a directory containing images is only valid for models with
one image input. For all other models a list of sample inputs must be
provided.
:return:
None. Performance metrics are printed out | [
"Utility",
"function",
"to",
"compare",
"the",
"performance",
"of",
"a",
"full",
"precision",
"vs",
"quantized",
"model"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/neural_network/quantization_utils.py#L829-L874 |
28,940 | apple/turicreate | src/unity/python/turicreate/toolkits/recommender/item_similarity_recommender.py | create | def create(observation_data,
user_id='user_id', item_id='item_id', target=None,
user_data=None, item_data=None,
nearest_items=None,
similarity_type='jaccard',
threshold=0.001,
only_top_k=64,
verbose=True,
target_memory_usage = 8*1024*1024*1024,
**kwargs):
"""
Create a recommender that uses item-item similarities based on
users in common.
Parameters
----------
observation_data : SFrame
The dataset to use for training the model. It must contain a column of
user ids and a column of item ids. Each row represents an observed
interaction between the user and the item. The (user, item) pairs
are stored with the model so that they can later be excluded from
recommendations if desired. It can optionally contain a target ratings
column. All other columns are interpreted by the underlying model as
side features for the observations.
The user id and item id columns must be of type 'int' or 'str'. The
target column must be of type 'int' or 'float'.
user_id : string, optional
The name of the column in `observation_data` that corresponds to the
user id.
item_id : string, optional
The name of the column in `observation_data` that corresponds to the
item id.
target : string, optional
The `observation_data` can optionally contain a column of scores
representing ratings given by the users. If present, the name of this
column may be specified variables `target`.
user_data : SFrame, optional
Side information for the users. This SFrame must have a column with
the same name as what is specified by the `user_id` input parameter.
`user_data` can provide any amount of additional user-specific
information. (NB: This argument is currently ignored by this model.)
item_data : SFrame, optional
Side information for the items. This SFrame must have a column with
the same name as what is specified by the `item_id` input parameter.
`item_data` can provide any amount of additional item-specific
information. (NB: This argument is currently ignored by this model.)
similarity_type : {'jaccard', 'cosine', 'pearson'}, optional
Similarity metric to use. See ItemSimilarityRecommender for details.
Default: 'jaccard'.
threshold : float, optional
Predictions ignore items below this similarity value.
Default: 0.001.
only_top_k : int, optional
Number of similar items to store for each item. Default value is
64. Decreasing this decreases the amount of memory required for the
model, but may also decrease the accuracy.
nearest_items : SFrame, optional
A set of each item's nearest items. When provided, this overrides
the similarity computed above.
See Notes in the documentation for ItemSimilarityRecommender.
Default: None.
target_memory_usage : int, optional
The target memory usage for the processing buffers and lookup
tables. The actual memory usage may be higher or lower than this,
but decreasing this decreases memory usage at the expense of
training time, and increasing this can dramatically speed up the
training time. Default is 8GB = 8589934592.
seed_item_set_size : int, optional
For users that have not yet rated any items, or have only
rated uniquely occurring items with no similar item info,
the model seeds the user's item set with the average
ratings of the seed_item_set_size most popular items when
making predictions and recommendations. If set to 0, then
recommendations based on either popularity (no target present)
or average item score (target present) are made in this case.
training_method : (advanced), optional.
The internal processing is done with a combination of nearest
neighbor searching, dense tables for tracking item-item
similarities, and sparse item-item tables. If 'auto' is chosen
(default), then the estimated computation time is estimated for
each, and the computation balanced between the methods in order to
minimize training time given the target memory usage. This allows
the user to force the use of one of these methods. All should give
equivalent results; the only difference would be training time.
Possible values are {'auto', 'dense', 'sparse', 'nn', 'nn:dense',
'nn:sparse'}. 'dense' uses a dense matrix to store item-item
interactions as a lookup, and may do multiple passes to control
memory requirements. 'sparse' does the same but with a sparse lookup
table; this is better if the data has many infrequent items. "nn"
uses a brute-force nearest neighbors search. "nn:dense" and
"nn:sparse" use nearest neighbors for the most frequent items
(see nearest_neighbors_interaction_proportion_threshold below),
and either sparse or dense matrices for the remainder. "auto"
chooses the method predicted to be the fastest based on the
properties of the data.
nearest_neighbors_interaction_proportion_threshold : (advanced) float
Any item that has was rated by more than this proportion of
users is treated by doing a nearest neighbors search. For
frequent items, this is almost always faster, but it is slower
for infrequent items. Furthermore, decreasing this causes more
items to be processed using the nearest neighbor path, which may
decrease memory requirements.
degree_approximation_threshold : (advanced) int, optional
Users with more than this many item interactions may be
approximated. The approximation is done by a combination of
sampling and choosing the interactions likely to have the most
impact on the model. Increasing this can increase the training time
and may or may not increase the quality of the model. Default = 4096.
max_data_passes : (advanced) int, optional
The maximum number of passes through the data allowed in
building the similarity lookup tables. If it is not possible to
build the recommender in this many passes (calculated before
that stage of training), then additional approximations are
applied; namely decreasing degree_approximation_threshold. If
this is not possible, an error is raised. To decrease the
number of passes required, increase target_memory_usage or
decrease nearest_neighbors_interaction_proportion_threshold.
Default = 1024.
Examples
--------
Given basic user-item observation data, an
:class:`~turicreate.recommender.item_similarity_recommender.ItemSimilarityRecommender` is created:
>>> sf = turicreate.SFrame({'user_id': ['0', '0', '0', '1', '1', '2', '2', '2'],
... 'item_id': ['a', 'b', 'c', 'a', 'b', 'b', 'c', 'd']})
>>> m = turicreate.item_similarity_recommender.create(sf)
>>> recs = m.recommend()
When a target is available, one can specify the desired similarity. For
example we may choose to use a cosine similarity, and use it to make
predictions or recommendations.
>>> sf2 = turicreate.SFrame({'user_id': ['0', '0', '0', '1', '1', '2', '2', '2'],
... 'item_id': ['a', 'b', 'c', 'a', 'b', 'b', 'c', 'd'],
... 'rating': [1, 3, 2, 5, 4, 1, 4, 3]})
>>> m2 = turicreate.item_similarity_recommender.create(sf2, target="rating",
... similarity_type='cosine')
>>> m2.predict(sf)
>>> m2.recommend()
Notes
-----
Currently, :class:`~turicreate.recommender.item_similarity_recommender.ItemSimilarityRecommender`
does not leverage the use of side features `user_data` and `item_data`.
**Incorporating pre-defined similar items**
For item similarity models, one may choose to provide user-specified
nearest neighbors graph using the keyword argument `nearest_items`. This is
an SFrame containing, for each item, the nearest items and the similarity
score between them. If provided, these item similarity scores are used for
recommendations. The SFrame must contain (at least) three columns:
* 'item_id': a column with the same name as that provided to the `item_id`
argument (which defaults to the string "item_id").
* 'similar': a column containing the nearest items for the given item id.
This should have the same type as the `item_id` column.
* 'score': a numeric score measuring how similar these two items are.
For example, suppose you first create an ItemSimilarityRecommender and use
:class:`~turicreate.recommender.ItemSimilarityRecommender.get_similar_items`:
>>> sf = turicreate.SFrame({'user_id': ["0", "0", "0", "1", "1", "2", "2", "2"],
... 'item_id': ["a", "b", "c", "a", "b", "b", "c", "d"]})
>>> m = turicreate.item_similarity_recommender.create(sf)
>>> nn = m.get_similar_items()
>>> m2 = turicreate.item_similarity_recommender.create(sf, nearest_items=nn)
With the above code, the item similarities computed for model `m` can be
used to create a new recommender object, `m2`. Note that we could have
created `nn` from some other means, but now use `m2` to make
recommendations via `m2.recommend()`.
See Also
--------
ItemSimilarityRecommender
"""
from turicreate._cython.cy_server import QuietProgress
opts = {}
model_proxy = _turicreate.extensions.item_similarity()
model_proxy.init_options(opts)
if user_data is None:
user_data = _turicreate.SFrame()
if item_data is None:
item_data = _turicreate.SFrame()
if nearest_items is None:
nearest_items = _turicreate.SFrame()
if "training_method" in kwargs and kwargs["training_method"] in ["in_memory", "sgraph"]:
print("WARNING: training_method = " + str(kwargs["training_method"]) + " deprecated; see documentation.")
kwargs["training_method"] = "auto"
opts = {'user_id': user_id,
'item_id': item_id,
'target': target,
'similarity_type': similarity_type,
'threshold': threshold,
'target_memory_usage' : float(target_memory_usage),
'max_item_neighborhood_size': only_top_k}
extra_data = {"nearest_items" : nearest_items}
if kwargs:
try:
possible_args = set(_get_default_options()["name"])
except (RuntimeError, KeyError):
possible_args = set()
bad_arguments = set(kwargs.keys()).difference(possible_args)
if bad_arguments:
raise TypeError("Bad Keyword Arguments: " + ', '.join(bad_arguments))
opts.update(kwargs)
extra_data = {"nearest_items" : nearest_items}
opts.update(kwargs)
with QuietProgress(verbose):
model_proxy.train(observation_data, user_data, item_data, opts, extra_data)
return ItemSimilarityRecommender(model_proxy) | python | def create(observation_data,
user_id='user_id', item_id='item_id', target=None,
user_data=None, item_data=None,
nearest_items=None,
similarity_type='jaccard',
threshold=0.001,
only_top_k=64,
verbose=True,
target_memory_usage = 8*1024*1024*1024,
**kwargs):
"""
Create a recommender that uses item-item similarities based on
users in common.
Parameters
----------
observation_data : SFrame
The dataset to use for training the model. It must contain a column of
user ids and a column of item ids. Each row represents an observed
interaction between the user and the item. The (user, item) pairs
are stored with the model so that they can later be excluded from
recommendations if desired. It can optionally contain a target ratings
column. All other columns are interpreted by the underlying model as
side features for the observations.
The user id and item id columns must be of type 'int' or 'str'. The
target column must be of type 'int' or 'float'.
user_id : string, optional
The name of the column in `observation_data` that corresponds to the
user id.
item_id : string, optional
The name of the column in `observation_data` that corresponds to the
item id.
target : string, optional
The `observation_data` can optionally contain a column of scores
representing ratings given by the users. If present, the name of this
column may be specified variables `target`.
user_data : SFrame, optional
Side information for the users. This SFrame must have a column with
the same name as what is specified by the `user_id` input parameter.
`user_data` can provide any amount of additional user-specific
information. (NB: This argument is currently ignored by this model.)
item_data : SFrame, optional
Side information for the items. This SFrame must have a column with
the same name as what is specified by the `item_id` input parameter.
`item_data` can provide any amount of additional item-specific
information. (NB: This argument is currently ignored by this model.)
similarity_type : {'jaccard', 'cosine', 'pearson'}, optional
Similarity metric to use. See ItemSimilarityRecommender for details.
Default: 'jaccard'.
threshold : float, optional
Predictions ignore items below this similarity value.
Default: 0.001.
only_top_k : int, optional
Number of similar items to store for each item. Default value is
64. Decreasing this decreases the amount of memory required for the
model, but may also decrease the accuracy.
nearest_items : SFrame, optional
A set of each item's nearest items. When provided, this overrides
the similarity computed above.
See Notes in the documentation for ItemSimilarityRecommender.
Default: None.
target_memory_usage : int, optional
The target memory usage for the processing buffers and lookup
tables. The actual memory usage may be higher or lower than this,
but decreasing this decreases memory usage at the expense of
training time, and increasing this can dramatically speed up the
training time. Default is 8GB = 8589934592.
seed_item_set_size : int, optional
For users that have not yet rated any items, or have only
rated uniquely occurring items with no similar item info,
the model seeds the user's item set with the average
ratings of the seed_item_set_size most popular items when
making predictions and recommendations. If set to 0, then
recommendations based on either popularity (no target present)
or average item score (target present) are made in this case.
training_method : (advanced), optional.
The internal processing is done with a combination of nearest
neighbor searching, dense tables for tracking item-item
similarities, and sparse item-item tables. If 'auto' is chosen
(default), then the estimated computation time is estimated for
each, and the computation balanced between the methods in order to
minimize training time given the target memory usage. This allows
the user to force the use of one of these methods. All should give
equivalent results; the only difference would be training time.
Possible values are {'auto', 'dense', 'sparse', 'nn', 'nn:dense',
'nn:sparse'}. 'dense' uses a dense matrix to store item-item
interactions as a lookup, and may do multiple passes to control
memory requirements. 'sparse' does the same but with a sparse lookup
table; this is better if the data has many infrequent items. "nn"
uses a brute-force nearest neighbors search. "nn:dense" and
"nn:sparse" use nearest neighbors for the most frequent items
(see nearest_neighbors_interaction_proportion_threshold below),
and either sparse or dense matrices for the remainder. "auto"
chooses the method predicted to be the fastest based on the
properties of the data.
nearest_neighbors_interaction_proportion_threshold : (advanced) float
Any item that has was rated by more than this proportion of
users is treated by doing a nearest neighbors search. For
frequent items, this is almost always faster, but it is slower
for infrequent items. Furthermore, decreasing this causes more
items to be processed using the nearest neighbor path, which may
decrease memory requirements.
degree_approximation_threshold : (advanced) int, optional
Users with more than this many item interactions may be
approximated. The approximation is done by a combination of
sampling and choosing the interactions likely to have the most
impact on the model. Increasing this can increase the training time
and may or may not increase the quality of the model. Default = 4096.
max_data_passes : (advanced) int, optional
The maximum number of passes through the data allowed in
building the similarity lookup tables. If it is not possible to
build the recommender in this many passes (calculated before
that stage of training), then additional approximations are
applied; namely decreasing degree_approximation_threshold. If
this is not possible, an error is raised. To decrease the
number of passes required, increase target_memory_usage or
decrease nearest_neighbors_interaction_proportion_threshold.
Default = 1024.
Examples
--------
Given basic user-item observation data, an
:class:`~turicreate.recommender.item_similarity_recommender.ItemSimilarityRecommender` is created:
>>> sf = turicreate.SFrame({'user_id': ['0', '0', '0', '1', '1', '2', '2', '2'],
... 'item_id': ['a', 'b', 'c', 'a', 'b', 'b', 'c', 'd']})
>>> m = turicreate.item_similarity_recommender.create(sf)
>>> recs = m.recommend()
When a target is available, one can specify the desired similarity. For
example we may choose to use a cosine similarity, and use it to make
predictions or recommendations.
>>> sf2 = turicreate.SFrame({'user_id': ['0', '0', '0', '1', '1', '2', '2', '2'],
... 'item_id': ['a', 'b', 'c', 'a', 'b', 'b', 'c', 'd'],
... 'rating': [1, 3, 2, 5, 4, 1, 4, 3]})
>>> m2 = turicreate.item_similarity_recommender.create(sf2, target="rating",
... similarity_type='cosine')
>>> m2.predict(sf)
>>> m2.recommend()
Notes
-----
Currently, :class:`~turicreate.recommender.item_similarity_recommender.ItemSimilarityRecommender`
does not leverage the use of side features `user_data` and `item_data`.
**Incorporating pre-defined similar items**
For item similarity models, one may choose to provide user-specified
nearest neighbors graph using the keyword argument `nearest_items`. This is
an SFrame containing, for each item, the nearest items and the similarity
score between them. If provided, these item similarity scores are used for
recommendations. The SFrame must contain (at least) three columns:
* 'item_id': a column with the same name as that provided to the `item_id`
argument (which defaults to the string "item_id").
* 'similar': a column containing the nearest items for the given item id.
This should have the same type as the `item_id` column.
* 'score': a numeric score measuring how similar these two items are.
For example, suppose you first create an ItemSimilarityRecommender and use
:class:`~turicreate.recommender.ItemSimilarityRecommender.get_similar_items`:
>>> sf = turicreate.SFrame({'user_id': ["0", "0", "0", "1", "1", "2", "2", "2"],
... 'item_id': ["a", "b", "c", "a", "b", "b", "c", "d"]})
>>> m = turicreate.item_similarity_recommender.create(sf)
>>> nn = m.get_similar_items()
>>> m2 = turicreate.item_similarity_recommender.create(sf, nearest_items=nn)
With the above code, the item similarities computed for model `m` can be
used to create a new recommender object, `m2`. Note that we could have
created `nn` from some other means, but now use `m2` to make
recommendations via `m2.recommend()`.
See Also
--------
ItemSimilarityRecommender
"""
from turicreate._cython.cy_server import QuietProgress
opts = {}
model_proxy = _turicreate.extensions.item_similarity()
model_proxy.init_options(opts)
if user_data is None:
user_data = _turicreate.SFrame()
if item_data is None:
item_data = _turicreate.SFrame()
if nearest_items is None:
nearest_items = _turicreate.SFrame()
if "training_method" in kwargs and kwargs["training_method"] in ["in_memory", "sgraph"]:
print("WARNING: training_method = " + str(kwargs["training_method"]) + " deprecated; see documentation.")
kwargs["training_method"] = "auto"
opts = {'user_id': user_id,
'item_id': item_id,
'target': target,
'similarity_type': similarity_type,
'threshold': threshold,
'target_memory_usage' : float(target_memory_usage),
'max_item_neighborhood_size': only_top_k}
extra_data = {"nearest_items" : nearest_items}
if kwargs:
try:
possible_args = set(_get_default_options()["name"])
except (RuntimeError, KeyError):
possible_args = set()
bad_arguments = set(kwargs.keys()).difference(possible_args)
if bad_arguments:
raise TypeError("Bad Keyword Arguments: " + ', '.join(bad_arguments))
opts.update(kwargs)
extra_data = {"nearest_items" : nearest_items}
opts.update(kwargs)
with QuietProgress(verbose):
model_proxy.train(observation_data, user_data, item_data, opts, extra_data)
return ItemSimilarityRecommender(model_proxy) | [
"def",
"create",
"(",
"observation_data",
",",
"user_id",
"=",
"'user_id'",
",",
"item_id",
"=",
"'item_id'",
",",
"target",
"=",
"None",
",",
"user_data",
"=",
"None",
",",
"item_data",
"=",
"None",
",",
"nearest_items",
"=",
"None",
",",
"similarity_type",
"=",
"'jaccard'",
",",
"threshold",
"=",
"0.001",
",",
"only_top_k",
"=",
"64",
",",
"verbose",
"=",
"True",
",",
"target_memory_usage",
"=",
"8",
"*",
"1024",
"*",
"1024",
"*",
"1024",
",",
"*",
"*",
"kwargs",
")",
":",
"from",
"turicreate",
".",
"_cython",
".",
"cy_server",
"import",
"QuietProgress",
"opts",
"=",
"{",
"}",
"model_proxy",
"=",
"_turicreate",
".",
"extensions",
".",
"item_similarity",
"(",
")",
"model_proxy",
".",
"init_options",
"(",
"opts",
")",
"if",
"user_data",
"is",
"None",
":",
"user_data",
"=",
"_turicreate",
".",
"SFrame",
"(",
")",
"if",
"item_data",
"is",
"None",
":",
"item_data",
"=",
"_turicreate",
".",
"SFrame",
"(",
")",
"if",
"nearest_items",
"is",
"None",
":",
"nearest_items",
"=",
"_turicreate",
".",
"SFrame",
"(",
")",
"if",
"\"training_method\"",
"in",
"kwargs",
"and",
"kwargs",
"[",
"\"training_method\"",
"]",
"in",
"[",
"\"in_memory\"",
",",
"\"sgraph\"",
"]",
":",
"print",
"(",
"\"WARNING: training_method = \"",
"+",
"str",
"(",
"kwargs",
"[",
"\"training_method\"",
"]",
")",
"+",
"\" deprecated; see documentation.\"",
")",
"kwargs",
"[",
"\"training_method\"",
"]",
"=",
"\"auto\"",
"opts",
"=",
"{",
"'user_id'",
":",
"user_id",
",",
"'item_id'",
":",
"item_id",
",",
"'target'",
":",
"target",
",",
"'similarity_type'",
":",
"similarity_type",
",",
"'threshold'",
":",
"threshold",
",",
"'target_memory_usage'",
":",
"float",
"(",
"target_memory_usage",
")",
",",
"'max_item_neighborhood_size'",
":",
"only_top_k",
"}",
"extra_data",
"=",
"{",
"\"nearest_items\"",
":",
"nearest_items",
"}",
"if",
"kwargs",
":",
"try",
":",
"possible_args",
"=",
"set",
"(",
"_get_default_options",
"(",
")",
"[",
"\"name\"",
"]",
")",
"except",
"(",
"RuntimeError",
",",
"KeyError",
")",
":",
"possible_args",
"=",
"set",
"(",
")",
"bad_arguments",
"=",
"set",
"(",
"kwargs",
".",
"keys",
"(",
")",
")",
".",
"difference",
"(",
"possible_args",
")",
"if",
"bad_arguments",
":",
"raise",
"TypeError",
"(",
"\"Bad Keyword Arguments: \"",
"+",
"', '",
".",
"join",
"(",
"bad_arguments",
")",
")",
"opts",
".",
"update",
"(",
"kwargs",
")",
"extra_data",
"=",
"{",
"\"nearest_items\"",
":",
"nearest_items",
"}",
"opts",
".",
"update",
"(",
"kwargs",
")",
"with",
"QuietProgress",
"(",
"verbose",
")",
":",
"model_proxy",
".",
"train",
"(",
"observation_data",
",",
"user_data",
",",
"item_data",
",",
"opts",
",",
"extra_data",
")",
"return",
"ItemSimilarityRecommender",
"(",
"model_proxy",
")"
] | Create a recommender that uses item-item similarities based on
users in common.
Parameters
----------
observation_data : SFrame
The dataset to use for training the model. It must contain a column of
user ids and a column of item ids. Each row represents an observed
interaction between the user and the item. The (user, item) pairs
are stored with the model so that they can later be excluded from
recommendations if desired. It can optionally contain a target ratings
column. All other columns are interpreted by the underlying model as
side features for the observations.
The user id and item id columns must be of type 'int' or 'str'. The
target column must be of type 'int' or 'float'.
user_id : string, optional
The name of the column in `observation_data` that corresponds to the
user id.
item_id : string, optional
The name of the column in `observation_data` that corresponds to the
item id.
target : string, optional
The `observation_data` can optionally contain a column of scores
representing ratings given by the users. If present, the name of this
column may be specified variables `target`.
user_data : SFrame, optional
Side information for the users. This SFrame must have a column with
the same name as what is specified by the `user_id` input parameter.
`user_data` can provide any amount of additional user-specific
information. (NB: This argument is currently ignored by this model.)
item_data : SFrame, optional
Side information for the items. This SFrame must have a column with
the same name as what is specified by the `item_id` input parameter.
`item_data` can provide any amount of additional item-specific
information. (NB: This argument is currently ignored by this model.)
similarity_type : {'jaccard', 'cosine', 'pearson'}, optional
Similarity metric to use. See ItemSimilarityRecommender for details.
Default: 'jaccard'.
threshold : float, optional
Predictions ignore items below this similarity value.
Default: 0.001.
only_top_k : int, optional
Number of similar items to store for each item. Default value is
64. Decreasing this decreases the amount of memory required for the
model, but may also decrease the accuracy.
nearest_items : SFrame, optional
A set of each item's nearest items. When provided, this overrides
the similarity computed above.
See Notes in the documentation for ItemSimilarityRecommender.
Default: None.
target_memory_usage : int, optional
The target memory usage for the processing buffers and lookup
tables. The actual memory usage may be higher or lower than this,
but decreasing this decreases memory usage at the expense of
training time, and increasing this can dramatically speed up the
training time. Default is 8GB = 8589934592.
seed_item_set_size : int, optional
For users that have not yet rated any items, or have only
rated uniquely occurring items with no similar item info,
the model seeds the user's item set with the average
ratings of the seed_item_set_size most popular items when
making predictions and recommendations. If set to 0, then
recommendations based on either popularity (no target present)
or average item score (target present) are made in this case.
training_method : (advanced), optional.
The internal processing is done with a combination of nearest
neighbor searching, dense tables for tracking item-item
similarities, and sparse item-item tables. If 'auto' is chosen
(default), then the estimated computation time is estimated for
each, and the computation balanced between the methods in order to
minimize training time given the target memory usage. This allows
the user to force the use of one of these methods. All should give
equivalent results; the only difference would be training time.
Possible values are {'auto', 'dense', 'sparse', 'nn', 'nn:dense',
'nn:sparse'}. 'dense' uses a dense matrix to store item-item
interactions as a lookup, and may do multiple passes to control
memory requirements. 'sparse' does the same but with a sparse lookup
table; this is better if the data has many infrequent items. "nn"
uses a brute-force nearest neighbors search. "nn:dense" and
"nn:sparse" use nearest neighbors for the most frequent items
(see nearest_neighbors_interaction_proportion_threshold below),
and either sparse or dense matrices for the remainder. "auto"
chooses the method predicted to be the fastest based on the
properties of the data.
nearest_neighbors_interaction_proportion_threshold : (advanced) float
Any item that has was rated by more than this proportion of
users is treated by doing a nearest neighbors search. For
frequent items, this is almost always faster, but it is slower
for infrequent items. Furthermore, decreasing this causes more
items to be processed using the nearest neighbor path, which may
decrease memory requirements.
degree_approximation_threshold : (advanced) int, optional
Users with more than this many item interactions may be
approximated. The approximation is done by a combination of
sampling and choosing the interactions likely to have the most
impact on the model. Increasing this can increase the training time
and may or may not increase the quality of the model. Default = 4096.
max_data_passes : (advanced) int, optional
The maximum number of passes through the data allowed in
building the similarity lookup tables. If it is not possible to
build the recommender in this many passes (calculated before
that stage of training), then additional approximations are
applied; namely decreasing degree_approximation_threshold. If
this is not possible, an error is raised. To decrease the
number of passes required, increase target_memory_usage or
decrease nearest_neighbors_interaction_proportion_threshold.
Default = 1024.
Examples
--------
Given basic user-item observation data, an
:class:`~turicreate.recommender.item_similarity_recommender.ItemSimilarityRecommender` is created:
>>> sf = turicreate.SFrame({'user_id': ['0', '0', '0', '1', '1', '2', '2', '2'],
... 'item_id': ['a', 'b', 'c', 'a', 'b', 'b', 'c', 'd']})
>>> m = turicreate.item_similarity_recommender.create(sf)
>>> recs = m.recommend()
When a target is available, one can specify the desired similarity. For
example we may choose to use a cosine similarity, and use it to make
predictions or recommendations.
>>> sf2 = turicreate.SFrame({'user_id': ['0', '0', '0', '1', '1', '2', '2', '2'],
... 'item_id': ['a', 'b', 'c', 'a', 'b', 'b', 'c', 'd'],
... 'rating': [1, 3, 2, 5, 4, 1, 4, 3]})
>>> m2 = turicreate.item_similarity_recommender.create(sf2, target="rating",
... similarity_type='cosine')
>>> m2.predict(sf)
>>> m2.recommend()
Notes
-----
Currently, :class:`~turicreate.recommender.item_similarity_recommender.ItemSimilarityRecommender`
does not leverage the use of side features `user_data` and `item_data`.
**Incorporating pre-defined similar items**
For item similarity models, one may choose to provide user-specified
nearest neighbors graph using the keyword argument `nearest_items`. This is
an SFrame containing, for each item, the nearest items and the similarity
score between them. If provided, these item similarity scores are used for
recommendations. The SFrame must contain (at least) three columns:
* 'item_id': a column with the same name as that provided to the `item_id`
argument (which defaults to the string "item_id").
* 'similar': a column containing the nearest items for the given item id.
This should have the same type as the `item_id` column.
* 'score': a numeric score measuring how similar these two items are.
For example, suppose you first create an ItemSimilarityRecommender and use
:class:`~turicreate.recommender.ItemSimilarityRecommender.get_similar_items`:
>>> sf = turicreate.SFrame({'user_id': ["0", "0", "0", "1", "1", "2", "2", "2"],
... 'item_id': ["a", "b", "c", "a", "b", "b", "c", "d"]})
>>> m = turicreate.item_similarity_recommender.create(sf)
>>> nn = m.get_similar_items()
>>> m2 = turicreate.item_similarity_recommender.create(sf, nearest_items=nn)
With the above code, the item similarities computed for model `m` can be
used to create a new recommender object, `m2`. Note that we could have
created `nn` from some other means, but now use `m2` to make
recommendations via `m2.recommend()`.
See Also
--------
ItemSimilarityRecommender | [
"Create",
"a",
"recommender",
"that",
"uses",
"item",
"-",
"item",
"similarities",
"based",
"on",
"users",
"in",
"common",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/recommender/item_similarity_recommender.py#L17-L259 |
28,941 | apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/converters/keras/_layers2.py | convert_advanced_relu | def convert_advanced_relu(builder, layer, input_names, output_names, keras_layer):
"""
Convert an ReLU layer with maximum value from keras to coreml.
Parameters
----------
keras_layer: layer
A keras layer object.
builder: NeuralNetworkBuilder
A neural network builder object.
"""
# Get input and output names
input_name, output_name = (input_names[0], output_names[0])
if keras_layer.max_value is None:
builder.add_activation(layer, 'RELU', input_name, output_name)
return
# No direct support of RELU with max-activation value - use negate and
# clip layers
relu_output_name = output_name + '_relu'
builder.add_activation(layer, 'RELU', input_name, relu_output_name)
# negate it
neg_output_name = relu_output_name + '_neg'
builder.add_activation(layer+'__neg__', 'LINEAR', relu_output_name,
neg_output_name,[-1.0, 0])
# apply threshold
clip_output_name = relu_output_name + '_clip'
builder.add_unary(layer+'__clip__', neg_output_name, clip_output_name,
'threshold', alpha = -keras_layer.max_value)
# negate it back
builder.add_activation(layer+'_neg2', 'LINEAR', clip_output_name,
output_name,[-1.0, 0]) | python | def convert_advanced_relu(builder, layer, input_names, output_names, keras_layer):
"""
Convert an ReLU layer with maximum value from keras to coreml.
Parameters
----------
keras_layer: layer
A keras layer object.
builder: NeuralNetworkBuilder
A neural network builder object.
"""
# Get input and output names
input_name, output_name = (input_names[0], output_names[0])
if keras_layer.max_value is None:
builder.add_activation(layer, 'RELU', input_name, output_name)
return
# No direct support of RELU with max-activation value - use negate and
# clip layers
relu_output_name = output_name + '_relu'
builder.add_activation(layer, 'RELU', input_name, relu_output_name)
# negate it
neg_output_name = relu_output_name + '_neg'
builder.add_activation(layer+'__neg__', 'LINEAR', relu_output_name,
neg_output_name,[-1.0, 0])
# apply threshold
clip_output_name = relu_output_name + '_clip'
builder.add_unary(layer+'__clip__', neg_output_name, clip_output_name,
'threshold', alpha = -keras_layer.max_value)
# negate it back
builder.add_activation(layer+'_neg2', 'LINEAR', clip_output_name,
output_name,[-1.0, 0]) | [
"def",
"convert_advanced_relu",
"(",
"builder",
",",
"layer",
",",
"input_names",
",",
"output_names",
",",
"keras_layer",
")",
":",
"# Get input and output names",
"input_name",
",",
"output_name",
"=",
"(",
"input_names",
"[",
"0",
"]",
",",
"output_names",
"[",
"0",
"]",
")",
"if",
"keras_layer",
".",
"max_value",
"is",
"None",
":",
"builder",
".",
"add_activation",
"(",
"layer",
",",
"'RELU'",
",",
"input_name",
",",
"output_name",
")",
"return",
"# No direct support of RELU with max-activation value - use negate and",
"# clip layers",
"relu_output_name",
"=",
"output_name",
"+",
"'_relu'",
"builder",
".",
"add_activation",
"(",
"layer",
",",
"'RELU'",
",",
"input_name",
",",
"relu_output_name",
")",
"# negate it",
"neg_output_name",
"=",
"relu_output_name",
"+",
"'_neg'",
"builder",
".",
"add_activation",
"(",
"layer",
"+",
"'__neg__'",
",",
"'LINEAR'",
",",
"relu_output_name",
",",
"neg_output_name",
",",
"[",
"-",
"1.0",
",",
"0",
"]",
")",
"# apply threshold",
"clip_output_name",
"=",
"relu_output_name",
"+",
"'_clip'",
"builder",
".",
"add_unary",
"(",
"layer",
"+",
"'__clip__'",
",",
"neg_output_name",
",",
"clip_output_name",
",",
"'threshold'",
",",
"alpha",
"=",
"-",
"keras_layer",
".",
"max_value",
")",
"# negate it back",
"builder",
".",
"add_activation",
"(",
"layer",
"+",
"'_neg2'",
",",
"'LINEAR'",
",",
"clip_output_name",
",",
"output_name",
",",
"[",
"-",
"1.0",
",",
"0",
"]",
")"
] | Convert an ReLU layer with maximum value from keras to coreml.
Parameters
----------
keras_layer: layer
A keras layer object.
builder: NeuralNetworkBuilder
A neural network builder object. | [
"Convert",
"an",
"ReLU",
"layer",
"with",
"maximum",
"value",
"from",
"keras",
"to",
"coreml",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/converters/keras/_layers2.py#L269-L302 |
28,942 | apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/converters/keras/_layers2.py | convert_separable_convolution | def convert_separable_convolution(builder, layer, input_names, output_names, keras_layer):
"""
Convert separable convolution layer from keras to coreml.
Parameters
----------
keras_layer: layer
A keras layer object.
builder: NeuralNetworkBuilder
A neural network builder object.
"""
_check_data_format(keras_layer)
# Get input and output names
input_name, output_name = (input_names[0], output_names[0])
has_bias = keras_layer.use_bias
# Get the weights from _keras.
weight_list = keras_layer.get_weights()
output_blob_shape = list(filter(None, keras_layer.output_shape))
output_channels = output_blob_shape[-1]
# D: depth mutliplier
# w[0] is (H,W,Cin,D)
# w[1] is (1,1,Cin * D, Cout)
W0 = weight_list[0]
W1 = weight_list[1]
height, width, input_channels, depth_mult = W0.shape
b = weight_list[2] if has_bias else None
W0 = _np.reshape(W0, (height, width, 1, input_channels * depth_mult))
stride_height, stride_width = keras_layer.strides
# Dilations
if (type(keras_layer.dilation_rate) is list) or (type(keras_layer.dilation_rate) is tuple):
dilations = [keras_layer.dilation_rate[0], keras_layer.dilation_rate[1]]
else:
dilations = [keras_layer.dilation_rate, keras_layer.dilation_rate]
intermediate_name = output_name + '_intermin_'
builder.add_convolution(name = layer + '_step_1',
kernel_channels = 1,
output_channels = input_channels * depth_mult,
height = height,
width = width,
stride_height = stride_height,
stride_width = stride_width,
border_mode = keras_layer.padding,
groups = input_channels,
W = W0,
b = None,
has_bias = False,
is_deconv = False,
output_shape = None,
input_name = input_name,
output_name = intermediate_name,
dilation_factors = dilations)
builder.add_convolution(name = layer + '_step_2',
kernel_channels = input_channels * depth_mult,
output_channels = output_channels,
height = 1,
width = 1,
stride_height = 1,
stride_width = 1,
border_mode = keras_layer.padding,
groups = 1,
W = W1,
b = b,
has_bias = has_bias,
is_deconv = False,
output_shape = None,
input_name = intermediate_name,
output_name = output_name,
dilation_factors = [1,1]) | python | def convert_separable_convolution(builder, layer, input_names, output_names, keras_layer):
"""
Convert separable convolution layer from keras to coreml.
Parameters
----------
keras_layer: layer
A keras layer object.
builder: NeuralNetworkBuilder
A neural network builder object.
"""
_check_data_format(keras_layer)
# Get input and output names
input_name, output_name = (input_names[0], output_names[0])
has_bias = keras_layer.use_bias
# Get the weights from _keras.
weight_list = keras_layer.get_weights()
output_blob_shape = list(filter(None, keras_layer.output_shape))
output_channels = output_blob_shape[-1]
# D: depth mutliplier
# w[0] is (H,W,Cin,D)
# w[1] is (1,1,Cin * D, Cout)
W0 = weight_list[0]
W1 = weight_list[1]
height, width, input_channels, depth_mult = W0.shape
b = weight_list[2] if has_bias else None
W0 = _np.reshape(W0, (height, width, 1, input_channels * depth_mult))
stride_height, stride_width = keras_layer.strides
# Dilations
if (type(keras_layer.dilation_rate) is list) or (type(keras_layer.dilation_rate) is tuple):
dilations = [keras_layer.dilation_rate[0], keras_layer.dilation_rate[1]]
else:
dilations = [keras_layer.dilation_rate, keras_layer.dilation_rate]
intermediate_name = output_name + '_intermin_'
builder.add_convolution(name = layer + '_step_1',
kernel_channels = 1,
output_channels = input_channels * depth_mult,
height = height,
width = width,
stride_height = stride_height,
stride_width = stride_width,
border_mode = keras_layer.padding,
groups = input_channels,
W = W0,
b = None,
has_bias = False,
is_deconv = False,
output_shape = None,
input_name = input_name,
output_name = intermediate_name,
dilation_factors = dilations)
builder.add_convolution(name = layer + '_step_2',
kernel_channels = input_channels * depth_mult,
output_channels = output_channels,
height = 1,
width = 1,
stride_height = 1,
stride_width = 1,
border_mode = keras_layer.padding,
groups = 1,
W = W1,
b = b,
has_bias = has_bias,
is_deconv = False,
output_shape = None,
input_name = intermediate_name,
output_name = output_name,
dilation_factors = [1,1]) | [
"def",
"convert_separable_convolution",
"(",
"builder",
",",
"layer",
",",
"input_names",
",",
"output_names",
",",
"keras_layer",
")",
":",
"_check_data_format",
"(",
"keras_layer",
")",
"# Get input and output names",
"input_name",
",",
"output_name",
"=",
"(",
"input_names",
"[",
"0",
"]",
",",
"output_names",
"[",
"0",
"]",
")",
"has_bias",
"=",
"keras_layer",
".",
"use_bias",
"# Get the weights from _keras.",
"weight_list",
"=",
"keras_layer",
".",
"get_weights",
"(",
")",
"output_blob_shape",
"=",
"list",
"(",
"filter",
"(",
"None",
",",
"keras_layer",
".",
"output_shape",
")",
")",
"output_channels",
"=",
"output_blob_shape",
"[",
"-",
"1",
"]",
"# D: depth mutliplier",
"# w[0] is (H,W,Cin,D)",
"# w[1] is (1,1,Cin * D, Cout)",
"W0",
"=",
"weight_list",
"[",
"0",
"]",
"W1",
"=",
"weight_list",
"[",
"1",
"]",
"height",
",",
"width",
",",
"input_channels",
",",
"depth_mult",
"=",
"W0",
".",
"shape",
"b",
"=",
"weight_list",
"[",
"2",
"]",
"if",
"has_bias",
"else",
"None",
"W0",
"=",
"_np",
".",
"reshape",
"(",
"W0",
",",
"(",
"height",
",",
"width",
",",
"1",
",",
"input_channels",
"*",
"depth_mult",
")",
")",
"stride_height",
",",
"stride_width",
"=",
"keras_layer",
".",
"strides",
"# Dilations",
"if",
"(",
"type",
"(",
"keras_layer",
".",
"dilation_rate",
")",
"is",
"list",
")",
"or",
"(",
"type",
"(",
"keras_layer",
".",
"dilation_rate",
")",
"is",
"tuple",
")",
":",
"dilations",
"=",
"[",
"keras_layer",
".",
"dilation_rate",
"[",
"0",
"]",
",",
"keras_layer",
".",
"dilation_rate",
"[",
"1",
"]",
"]",
"else",
":",
"dilations",
"=",
"[",
"keras_layer",
".",
"dilation_rate",
",",
"keras_layer",
".",
"dilation_rate",
"]",
"intermediate_name",
"=",
"output_name",
"+",
"'_intermin_'",
"builder",
".",
"add_convolution",
"(",
"name",
"=",
"layer",
"+",
"'_step_1'",
",",
"kernel_channels",
"=",
"1",
",",
"output_channels",
"=",
"input_channels",
"*",
"depth_mult",
",",
"height",
"=",
"height",
",",
"width",
"=",
"width",
",",
"stride_height",
"=",
"stride_height",
",",
"stride_width",
"=",
"stride_width",
",",
"border_mode",
"=",
"keras_layer",
".",
"padding",
",",
"groups",
"=",
"input_channels",
",",
"W",
"=",
"W0",
",",
"b",
"=",
"None",
",",
"has_bias",
"=",
"False",
",",
"is_deconv",
"=",
"False",
",",
"output_shape",
"=",
"None",
",",
"input_name",
"=",
"input_name",
",",
"output_name",
"=",
"intermediate_name",
",",
"dilation_factors",
"=",
"dilations",
")",
"builder",
".",
"add_convolution",
"(",
"name",
"=",
"layer",
"+",
"'_step_2'",
",",
"kernel_channels",
"=",
"input_channels",
"*",
"depth_mult",
",",
"output_channels",
"=",
"output_channels",
",",
"height",
"=",
"1",
",",
"width",
"=",
"1",
",",
"stride_height",
"=",
"1",
",",
"stride_width",
"=",
"1",
",",
"border_mode",
"=",
"keras_layer",
".",
"padding",
",",
"groups",
"=",
"1",
",",
"W",
"=",
"W1",
",",
"b",
"=",
"b",
",",
"has_bias",
"=",
"has_bias",
",",
"is_deconv",
"=",
"False",
",",
"output_shape",
"=",
"None",
",",
"input_name",
"=",
"intermediate_name",
",",
"output_name",
"=",
"output_name",
",",
"dilation_factors",
"=",
"[",
"1",
",",
"1",
"]",
")"
] | Convert separable convolution layer from keras to coreml.
Parameters
----------
keras_layer: layer
A keras layer object.
builder: NeuralNetworkBuilder
A neural network builder object. | [
"Convert",
"separable",
"convolution",
"layer",
"from",
"keras",
"to",
"coreml",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/converters/keras/_layers2.py#L451-L529 |
28,943 | apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/converters/keras/_layers2.py | convert_batchnorm | def convert_batchnorm(builder, layer, input_names, output_names, keras_layer):
"""
Convert a Batch Normalization layer.
Parameters
keras_layer: layer
A keras layer object.
builder: NeuralNetworkBuilder
A neural network builder object.
"""
# Get input and output names
input_name, output_name = (input_names[0], output_names[0])
axis = keras_layer.axis
nb_channels = keras_layer.input_shape[axis]
# Set parameters
# Parameter arrangement in Keras: gamma, beta, mean, variance
idx = 0
gamma, beta = None, None
if keras_layer.scale:
gamma = keras_layer.get_weights()[idx]
idx += 1
if keras_layer.center:
beta = keras_layer.get_weights()[idx]
idx += 1
mean = keras_layer.get_weights()[idx]
std = keras_layer.get_weights()[idx+1]
gamma = _np.ones(mean.shape) if gamma is None else gamma
beta = _np.zeros(mean.shape) if beta is None else beta
# compute adjusted parameters
variance = std * std
f = 1.0 / _np.sqrt(std + keras_layer.epsilon)
gamma1 = gamma*f
beta1 = beta - gamma*mean*f
mean[:] = 0.0 #mean
variance[:] = 1.0 - .00001 #stddev
builder.add_batchnorm(
name = layer,
channels = nb_channels,
gamma = gamma1,
beta = beta1,
mean = mean,
variance = variance,
input_name = input_name,
output_name = output_name) | python | def convert_batchnorm(builder, layer, input_names, output_names, keras_layer):
"""
Convert a Batch Normalization layer.
Parameters
keras_layer: layer
A keras layer object.
builder: NeuralNetworkBuilder
A neural network builder object.
"""
# Get input and output names
input_name, output_name = (input_names[0], output_names[0])
axis = keras_layer.axis
nb_channels = keras_layer.input_shape[axis]
# Set parameters
# Parameter arrangement in Keras: gamma, beta, mean, variance
idx = 0
gamma, beta = None, None
if keras_layer.scale:
gamma = keras_layer.get_weights()[idx]
idx += 1
if keras_layer.center:
beta = keras_layer.get_weights()[idx]
idx += 1
mean = keras_layer.get_weights()[idx]
std = keras_layer.get_weights()[idx+1]
gamma = _np.ones(mean.shape) if gamma is None else gamma
beta = _np.zeros(mean.shape) if beta is None else beta
# compute adjusted parameters
variance = std * std
f = 1.0 / _np.sqrt(std + keras_layer.epsilon)
gamma1 = gamma*f
beta1 = beta - gamma*mean*f
mean[:] = 0.0 #mean
variance[:] = 1.0 - .00001 #stddev
builder.add_batchnorm(
name = layer,
channels = nb_channels,
gamma = gamma1,
beta = beta1,
mean = mean,
variance = variance,
input_name = input_name,
output_name = output_name) | [
"def",
"convert_batchnorm",
"(",
"builder",
",",
"layer",
",",
"input_names",
",",
"output_names",
",",
"keras_layer",
")",
":",
"# Get input and output names",
"input_name",
",",
"output_name",
"=",
"(",
"input_names",
"[",
"0",
"]",
",",
"output_names",
"[",
"0",
"]",
")",
"axis",
"=",
"keras_layer",
".",
"axis",
"nb_channels",
"=",
"keras_layer",
".",
"input_shape",
"[",
"axis",
"]",
"# Set parameters",
"# Parameter arrangement in Keras: gamma, beta, mean, variance",
"idx",
"=",
"0",
"gamma",
",",
"beta",
"=",
"None",
",",
"None",
"if",
"keras_layer",
".",
"scale",
":",
"gamma",
"=",
"keras_layer",
".",
"get_weights",
"(",
")",
"[",
"idx",
"]",
"idx",
"+=",
"1",
"if",
"keras_layer",
".",
"center",
":",
"beta",
"=",
"keras_layer",
".",
"get_weights",
"(",
")",
"[",
"idx",
"]",
"idx",
"+=",
"1",
"mean",
"=",
"keras_layer",
".",
"get_weights",
"(",
")",
"[",
"idx",
"]",
"std",
"=",
"keras_layer",
".",
"get_weights",
"(",
")",
"[",
"idx",
"+",
"1",
"]",
"gamma",
"=",
"_np",
".",
"ones",
"(",
"mean",
".",
"shape",
")",
"if",
"gamma",
"is",
"None",
"else",
"gamma",
"beta",
"=",
"_np",
".",
"zeros",
"(",
"mean",
".",
"shape",
")",
"if",
"beta",
"is",
"None",
"else",
"beta",
"# compute adjusted parameters",
"variance",
"=",
"std",
"*",
"std",
"f",
"=",
"1.0",
"/",
"_np",
".",
"sqrt",
"(",
"std",
"+",
"keras_layer",
".",
"epsilon",
")",
"gamma1",
"=",
"gamma",
"*",
"f",
"beta1",
"=",
"beta",
"-",
"gamma",
"*",
"mean",
"*",
"f",
"mean",
"[",
":",
"]",
"=",
"0.0",
"#mean",
"variance",
"[",
":",
"]",
"=",
"1.0",
"-",
".00001",
"#stddev",
"builder",
".",
"add_batchnorm",
"(",
"name",
"=",
"layer",
",",
"channels",
"=",
"nb_channels",
",",
"gamma",
"=",
"gamma1",
",",
"beta",
"=",
"beta1",
",",
"mean",
"=",
"mean",
",",
"variance",
"=",
"variance",
",",
"input_name",
"=",
"input_name",
",",
"output_name",
"=",
"output_name",
")"
] | Convert a Batch Normalization layer.
Parameters
keras_layer: layer
A keras layer object.
builder: NeuralNetworkBuilder
A neural network builder object. | [
"Convert",
"a",
"Batch",
"Normalization",
"layer",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/converters/keras/_layers2.py#L531-L581 |
28,944 | apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/converters/keras/_layers2.py | convert_merge | def convert_merge(builder, layer, input_names, output_names, keras_layer):
"""
Convert concat layer from keras to coreml.
Parameters
----------
keras_layer: layer
A keras layer object.
builder: NeuralNetworkBuilder
A neural network builder object.
"""
# Get input and output names
output_name = output_names[0]
mode = _get_elementwise_name_from_keras_layer(keras_layer)
builder.add_elementwise(name = layer, input_names = input_names,
output_name = output_name, mode = mode) | python | def convert_merge(builder, layer, input_names, output_names, keras_layer):
"""
Convert concat layer from keras to coreml.
Parameters
----------
keras_layer: layer
A keras layer object.
builder: NeuralNetworkBuilder
A neural network builder object.
"""
# Get input and output names
output_name = output_names[0]
mode = _get_elementwise_name_from_keras_layer(keras_layer)
builder.add_elementwise(name = layer, input_names = input_names,
output_name = output_name, mode = mode) | [
"def",
"convert_merge",
"(",
"builder",
",",
"layer",
",",
"input_names",
",",
"output_names",
",",
"keras_layer",
")",
":",
"# Get input and output names",
"output_name",
"=",
"output_names",
"[",
"0",
"]",
"mode",
"=",
"_get_elementwise_name_from_keras_layer",
"(",
"keras_layer",
")",
"builder",
".",
"add_elementwise",
"(",
"name",
"=",
"layer",
",",
"input_names",
"=",
"input_names",
",",
"output_name",
"=",
"output_name",
",",
"mode",
"=",
"mode",
")"
] | Convert concat layer from keras to coreml.
Parameters
----------
keras_layer: layer
A keras layer object.
builder: NeuralNetworkBuilder
A neural network builder object. | [
"Convert",
"concat",
"layer",
"from",
"keras",
"to",
"coreml",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/converters/keras/_layers2.py#L621-L638 |
28,945 | apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/converters/keras/_layers2.py | convert_pooling | def convert_pooling(builder, layer, input_names, output_names, keras_layer):
"""
Convert pooling layer from keras to coreml.
Parameters
----------
keras_layer: layer
A keras layer object.
builder: NeuralNetworkBuilder
A neural network builder object.
"""
_check_data_format(keras_layer)
# Get input and output names
input_name, output_name = (input_names[0], output_names[0])
# Pooling layer type
if isinstance(keras_layer, _keras.layers.convolutional.MaxPooling2D) or \
isinstance(keras_layer, _keras.layers.convolutional.MaxPooling1D) or \
isinstance(keras_layer, _keras.layers.pooling.GlobalMaxPooling2D) or \
isinstance(keras_layer, _keras.layers.pooling.GlobalMaxPooling1D):
layer_type_str = 'MAX'
elif isinstance(keras_layer, _keras.layers.convolutional.AveragePooling2D) or \
isinstance(keras_layer, _keras.layers.convolutional.AveragePooling1D) or \
isinstance(keras_layer, _keras.layers.pooling.GlobalAveragePooling2D) or \
isinstance(keras_layer, _keras.layers.pooling.GlobalAveragePooling1D):
layer_type_str = 'AVERAGE'
else:
raise TypeError("Pooling type %s not supported" % keras_layer)
# if it's global, set the global flag
if isinstance(keras_layer, _keras.layers.pooling.GlobalMaxPooling2D) or \
isinstance(keras_layer, _keras.layers.pooling.GlobalAveragePooling2D):
# 2D global pooling
global_pooling = True
height, width = (0, 0)
stride_height, stride_width = (0,0)
padding_type = 'VALID'
elif isinstance(keras_layer, _keras.layers.pooling.GlobalMaxPooling1D) or \
isinstance(keras_layer, _keras.layers.pooling.GlobalAveragePooling1D):
# 1D global pooling: 1D global pooling seems problematic in the backend,
# use this work-around
global_pooling = False
_, width, channels = keras_layer.input_shape
height = 1
stride_height, stride_width = height, width
padding_type = 'VALID'
else:
global_pooling = False
# Set pool sizes and strides
# 1D cases:
if isinstance(keras_layer, _keras.layers.convolutional.MaxPooling1D) or \
isinstance(keras_layer, _keras.layers.pooling.GlobalMaxPooling1D) or \
isinstance(keras_layer, _keras.layers.convolutional.AveragePooling1D) or \
isinstance(keras_layer, _keras.layers.pooling.GlobalAveragePooling1D):
pool_size = keras_layer.pool_size if type(keras_layer.pool_size) is int else keras_layer.pool_size[0]
height, width = 1, pool_size
if keras_layer.strides is not None:
strides = keras_layer.strides if type(keras_layer.strides) is int else keras_layer.strides[0]
stride_height, stride_width = 1, strides
else:
stride_height, stride_width = 1, pool_size
# 2D cases:
else:
height, width = keras_layer.pool_size
if keras_layer.strides is None:
stride_height, stride_width = height, width
else:
stride_height, stride_width = keras_layer.strides
# Padding
padding = keras_layer.padding
if keras_layer.padding == 'valid':
padding_type = 'VALID'
elif keras_layer.padding == 'same':
padding_type = 'SAME'
else:
raise TypeError("Border mode %s not supported" % padding)
builder.add_pooling(name = layer,
height = height,
width = width,
stride_height = stride_height,
stride_width = stride_width,
layer_type = layer_type_str,
padding_type = padding_type,
input_name = input_name,
output_name = output_name,
exclude_pad_area = True,
is_global = global_pooling) | python | def convert_pooling(builder, layer, input_names, output_names, keras_layer):
"""
Convert pooling layer from keras to coreml.
Parameters
----------
keras_layer: layer
A keras layer object.
builder: NeuralNetworkBuilder
A neural network builder object.
"""
_check_data_format(keras_layer)
# Get input and output names
input_name, output_name = (input_names[0], output_names[0])
# Pooling layer type
if isinstance(keras_layer, _keras.layers.convolutional.MaxPooling2D) or \
isinstance(keras_layer, _keras.layers.convolutional.MaxPooling1D) or \
isinstance(keras_layer, _keras.layers.pooling.GlobalMaxPooling2D) or \
isinstance(keras_layer, _keras.layers.pooling.GlobalMaxPooling1D):
layer_type_str = 'MAX'
elif isinstance(keras_layer, _keras.layers.convolutional.AveragePooling2D) or \
isinstance(keras_layer, _keras.layers.convolutional.AveragePooling1D) or \
isinstance(keras_layer, _keras.layers.pooling.GlobalAveragePooling2D) or \
isinstance(keras_layer, _keras.layers.pooling.GlobalAveragePooling1D):
layer_type_str = 'AVERAGE'
else:
raise TypeError("Pooling type %s not supported" % keras_layer)
# if it's global, set the global flag
if isinstance(keras_layer, _keras.layers.pooling.GlobalMaxPooling2D) or \
isinstance(keras_layer, _keras.layers.pooling.GlobalAveragePooling2D):
# 2D global pooling
global_pooling = True
height, width = (0, 0)
stride_height, stride_width = (0,0)
padding_type = 'VALID'
elif isinstance(keras_layer, _keras.layers.pooling.GlobalMaxPooling1D) or \
isinstance(keras_layer, _keras.layers.pooling.GlobalAveragePooling1D):
# 1D global pooling: 1D global pooling seems problematic in the backend,
# use this work-around
global_pooling = False
_, width, channels = keras_layer.input_shape
height = 1
stride_height, stride_width = height, width
padding_type = 'VALID'
else:
global_pooling = False
# Set pool sizes and strides
# 1D cases:
if isinstance(keras_layer, _keras.layers.convolutional.MaxPooling1D) or \
isinstance(keras_layer, _keras.layers.pooling.GlobalMaxPooling1D) or \
isinstance(keras_layer, _keras.layers.convolutional.AveragePooling1D) or \
isinstance(keras_layer, _keras.layers.pooling.GlobalAveragePooling1D):
pool_size = keras_layer.pool_size if type(keras_layer.pool_size) is int else keras_layer.pool_size[0]
height, width = 1, pool_size
if keras_layer.strides is not None:
strides = keras_layer.strides if type(keras_layer.strides) is int else keras_layer.strides[0]
stride_height, stride_width = 1, strides
else:
stride_height, stride_width = 1, pool_size
# 2D cases:
else:
height, width = keras_layer.pool_size
if keras_layer.strides is None:
stride_height, stride_width = height, width
else:
stride_height, stride_width = keras_layer.strides
# Padding
padding = keras_layer.padding
if keras_layer.padding == 'valid':
padding_type = 'VALID'
elif keras_layer.padding == 'same':
padding_type = 'SAME'
else:
raise TypeError("Border mode %s not supported" % padding)
builder.add_pooling(name = layer,
height = height,
width = width,
stride_height = stride_height,
stride_width = stride_width,
layer_type = layer_type_str,
padding_type = padding_type,
input_name = input_name,
output_name = output_name,
exclude_pad_area = True,
is_global = global_pooling) | [
"def",
"convert_pooling",
"(",
"builder",
",",
"layer",
",",
"input_names",
",",
"output_names",
",",
"keras_layer",
")",
":",
"_check_data_format",
"(",
"keras_layer",
")",
"# Get input and output names",
"input_name",
",",
"output_name",
"=",
"(",
"input_names",
"[",
"0",
"]",
",",
"output_names",
"[",
"0",
"]",
")",
"# Pooling layer type",
"if",
"isinstance",
"(",
"keras_layer",
",",
"_keras",
".",
"layers",
".",
"convolutional",
".",
"MaxPooling2D",
")",
"or",
"isinstance",
"(",
"keras_layer",
",",
"_keras",
".",
"layers",
".",
"convolutional",
".",
"MaxPooling1D",
")",
"or",
"isinstance",
"(",
"keras_layer",
",",
"_keras",
".",
"layers",
".",
"pooling",
".",
"GlobalMaxPooling2D",
")",
"or",
"isinstance",
"(",
"keras_layer",
",",
"_keras",
".",
"layers",
".",
"pooling",
".",
"GlobalMaxPooling1D",
")",
":",
"layer_type_str",
"=",
"'MAX'",
"elif",
"isinstance",
"(",
"keras_layer",
",",
"_keras",
".",
"layers",
".",
"convolutional",
".",
"AveragePooling2D",
")",
"or",
"isinstance",
"(",
"keras_layer",
",",
"_keras",
".",
"layers",
".",
"convolutional",
".",
"AveragePooling1D",
")",
"or",
"isinstance",
"(",
"keras_layer",
",",
"_keras",
".",
"layers",
".",
"pooling",
".",
"GlobalAveragePooling2D",
")",
"or",
"isinstance",
"(",
"keras_layer",
",",
"_keras",
".",
"layers",
".",
"pooling",
".",
"GlobalAveragePooling1D",
")",
":",
"layer_type_str",
"=",
"'AVERAGE'",
"else",
":",
"raise",
"TypeError",
"(",
"\"Pooling type %s not supported\"",
"%",
"keras_layer",
")",
"# if it's global, set the global flag",
"if",
"isinstance",
"(",
"keras_layer",
",",
"_keras",
".",
"layers",
".",
"pooling",
".",
"GlobalMaxPooling2D",
")",
"or",
"isinstance",
"(",
"keras_layer",
",",
"_keras",
".",
"layers",
".",
"pooling",
".",
"GlobalAveragePooling2D",
")",
":",
"# 2D global pooling",
"global_pooling",
"=",
"True",
"height",
",",
"width",
"=",
"(",
"0",
",",
"0",
")",
"stride_height",
",",
"stride_width",
"=",
"(",
"0",
",",
"0",
")",
"padding_type",
"=",
"'VALID'",
"elif",
"isinstance",
"(",
"keras_layer",
",",
"_keras",
".",
"layers",
".",
"pooling",
".",
"GlobalMaxPooling1D",
")",
"or",
"isinstance",
"(",
"keras_layer",
",",
"_keras",
".",
"layers",
".",
"pooling",
".",
"GlobalAveragePooling1D",
")",
":",
"# 1D global pooling: 1D global pooling seems problematic in the backend,",
"# use this work-around",
"global_pooling",
"=",
"False",
"_",
",",
"width",
",",
"channels",
"=",
"keras_layer",
".",
"input_shape",
"height",
"=",
"1",
"stride_height",
",",
"stride_width",
"=",
"height",
",",
"width",
"padding_type",
"=",
"'VALID'",
"else",
":",
"global_pooling",
"=",
"False",
"# Set pool sizes and strides",
"# 1D cases:",
"if",
"isinstance",
"(",
"keras_layer",
",",
"_keras",
".",
"layers",
".",
"convolutional",
".",
"MaxPooling1D",
")",
"or",
"isinstance",
"(",
"keras_layer",
",",
"_keras",
".",
"layers",
".",
"pooling",
".",
"GlobalMaxPooling1D",
")",
"or",
"isinstance",
"(",
"keras_layer",
",",
"_keras",
".",
"layers",
".",
"convolutional",
".",
"AveragePooling1D",
")",
"or",
"isinstance",
"(",
"keras_layer",
",",
"_keras",
".",
"layers",
".",
"pooling",
".",
"GlobalAveragePooling1D",
")",
":",
"pool_size",
"=",
"keras_layer",
".",
"pool_size",
"if",
"type",
"(",
"keras_layer",
".",
"pool_size",
")",
"is",
"int",
"else",
"keras_layer",
".",
"pool_size",
"[",
"0",
"]",
"height",
",",
"width",
"=",
"1",
",",
"pool_size",
"if",
"keras_layer",
".",
"strides",
"is",
"not",
"None",
":",
"strides",
"=",
"keras_layer",
".",
"strides",
"if",
"type",
"(",
"keras_layer",
".",
"strides",
")",
"is",
"int",
"else",
"keras_layer",
".",
"strides",
"[",
"0",
"]",
"stride_height",
",",
"stride_width",
"=",
"1",
",",
"strides",
"else",
":",
"stride_height",
",",
"stride_width",
"=",
"1",
",",
"pool_size",
"# 2D cases:",
"else",
":",
"height",
",",
"width",
"=",
"keras_layer",
".",
"pool_size",
"if",
"keras_layer",
".",
"strides",
"is",
"None",
":",
"stride_height",
",",
"stride_width",
"=",
"height",
",",
"width",
"else",
":",
"stride_height",
",",
"stride_width",
"=",
"keras_layer",
".",
"strides",
"# Padding",
"padding",
"=",
"keras_layer",
".",
"padding",
"if",
"keras_layer",
".",
"padding",
"==",
"'valid'",
":",
"padding_type",
"=",
"'VALID'",
"elif",
"keras_layer",
".",
"padding",
"==",
"'same'",
":",
"padding_type",
"=",
"'SAME'",
"else",
":",
"raise",
"TypeError",
"(",
"\"Border mode %s not supported\"",
"%",
"padding",
")",
"builder",
".",
"add_pooling",
"(",
"name",
"=",
"layer",
",",
"height",
"=",
"height",
",",
"width",
"=",
"width",
",",
"stride_height",
"=",
"stride_height",
",",
"stride_width",
"=",
"stride_width",
",",
"layer_type",
"=",
"layer_type_str",
",",
"padding_type",
"=",
"padding_type",
",",
"input_name",
"=",
"input_name",
",",
"output_name",
"=",
"output_name",
",",
"exclude_pad_area",
"=",
"True",
",",
"is_global",
"=",
"global_pooling",
")"
] | Convert pooling layer from keras to coreml.
Parameters
----------
keras_layer: layer
A keras layer object.
builder: NeuralNetworkBuilder
A neural network builder object. | [
"Convert",
"pooling",
"layer",
"from",
"keras",
"to",
"coreml",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/converters/keras/_layers2.py#L640-L729 |
28,946 | apple/turicreate | src/external/xgboost/subtree/rabit/wrapper/rabit.py | _loadlib | def _loadlib(lib='standard'):
"""Load rabit library."""
global _LIB
if _LIB is not None:
warnings.warn('rabit.int call was ignored because it has'\
' already been initialized', level=2)
return
if lib == 'standard':
_LIB = ctypes.cdll.LoadLibrary(WRAPPER_PATH % '')
elif lib == 'mock':
_LIB = ctypes.cdll.LoadLibrary(WRAPPER_PATH % '_mock')
elif lib == 'mpi':
_LIB = ctypes.cdll.LoadLibrary(WRAPPER_PATH % '_mpi')
else:
raise Exception('unknown rabit lib %s, can be standard, mock, mpi' % lib)
_LIB.RabitGetRank.restype = ctypes.c_int
_LIB.RabitGetWorldSize.restype = ctypes.c_int
_LIB.RabitVersionNumber.restype = ctypes.c_int | python | def _loadlib(lib='standard'):
"""Load rabit library."""
global _LIB
if _LIB is not None:
warnings.warn('rabit.int call was ignored because it has'\
' already been initialized', level=2)
return
if lib == 'standard':
_LIB = ctypes.cdll.LoadLibrary(WRAPPER_PATH % '')
elif lib == 'mock':
_LIB = ctypes.cdll.LoadLibrary(WRAPPER_PATH % '_mock')
elif lib == 'mpi':
_LIB = ctypes.cdll.LoadLibrary(WRAPPER_PATH % '_mpi')
else:
raise Exception('unknown rabit lib %s, can be standard, mock, mpi' % lib)
_LIB.RabitGetRank.restype = ctypes.c_int
_LIB.RabitGetWorldSize.restype = ctypes.c_int
_LIB.RabitVersionNumber.restype = ctypes.c_int | [
"def",
"_loadlib",
"(",
"lib",
"=",
"'standard'",
")",
":",
"global",
"_LIB",
"if",
"_LIB",
"is",
"not",
"None",
":",
"warnings",
".",
"warn",
"(",
"'rabit.int call was ignored because it has'",
"' already been initialized'",
",",
"level",
"=",
"2",
")",
"return",
"if",
"lib",
"==",
"'standard'",
":",
"_LIB",
"=",
"ctypes",
".",
"cdll",
".",
"LoadLibrary",
"(",
"WRAPPER_PATH",
"%",
"''",
")",
"elif",
"lib",
"==",
"'mock'",
":",
"_LIB",
"=",
"ctypes",
".",
"cdll",
".",
"LoadLibrary",
"(",
"WRAPPER_PATH",
"%",
"'_mock'",
")",
"elif",
"lib",
"==",
"'mpi'",
":",
"_LIB",
"=",
"ctypes",
".",
"cdll",
".",
"LoadLibrary",
"(",
"WRAPPER_PATH",
"%",
"'_mpi'",
")",
"else",
":",
"raise",
"Exception",
"(",
"'unknown rabit lib %s, can be standard, mock, mpi'",
"%",
"lib",
")",
"_LIB",
".",
"RabitGetRank",
".",
"restype",
"=",
"ctypes",
".",
"c_int",
"_LIB",
".",
"RabitGetWorldSize",
".",
"restype",
"=",
"ctypes",
".",
"c_int",
"_LIB",
".",
"RabitVersionNumber",
".",
"restype",
"=",
"ctypes",
".",
"c_int"
] | Load rabit library. | [
"Load",
"rabit",
"library",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/xgboost/subtree/rabit/wrapper/rabit.py#L25-L42 |
28,947 | apple/turicreate | src/external/xgboost/subtree/rabit/wrapper/rabit.py | init | def init(args=None, lib='standard'):
"""Intialize the rabit module, call this once before using anything.
Parameters
----------
args: list of str, optional
The list of arguments used to initialized the rabit
usually you need to pass in sys.argv.
Defaults to sys.argv when it is None.
lib: {'standard', 'mock', 'mpi'}
Type of library we want to load
"""
if args is None:
args = sys.argv
_loadlib(lib)
arr = (ctypes.c_char_p * len(args))()
arr[:] = args
_LIB.RabitInit(len(args), arr) | python | def init(args=None, lib='standard'):
"""Intialize the rabit module, call this once before using anything.
Parameters
----------
args: list of str, optional
The list of arguments used to initialized the rabit
usually you need to pass in sys.argv.
Defaults to sys.argv when it is None.
lib: {'standard', 'mock', 'mpi'}
Type of library we want to load
"""
if args is None:
args = sys.argv
_loadlib(lib)
arr = (ctypes.c_char_p * len(args))()
arr[:] = args
_LIB.RabitInit(len(args), arr) | [
"def",
"init",
"(",
"args",
"=",
"None",
",",
"lib",
"=",
"'standard'",
")",
":",
"if",
"args",
"is",
"None",
":",
"args",
"=",
"sys",
".",
"argv",
"_loadlib",
"(",
"lib",
")",
"arr",
"=",
"(",
"ctypes",
".",
"c_char_p",
"*",
"len",
"(",
"args",
")",
")",
"(",
")",
"arr",
"[",
":",
"]",
"=",
"args",
"_LIB",
".",
"RabitInit",
"(",
"len",
"(",
"args",
")",
",",
"arr",
")"
] | Intialize the rabit module, call this once before using anything.
Parameters
----------
args: list of str, optional
The list of arguments used to initialized the rabit
usually you need to pass in sys.argv.
Defaults to sys.argv when it is None.
lib: {'standard', 'mock', 'mpi'}
Type of library we want to load | [
"Intialize",
"the",
"rabit",
"module",
"call",
"this",
"once",
"before",
"using",
"anything",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/xgboost/subtree/rabit/wrapper/rabit.py#L56-L73 |
28,948 | apple/turicreate | src/external/xgboost/subtree/rabit/wrapper/rabit.py | allreduce | def allreduce(data, op, prepare_fun=None):
"""Perform allreduce, return the result.
Parameters
----------
data: numpy array
Input data.
op: int
Reduction operators, can be MIN, MAX, SUM, BITOR
prepare_fun: function
Lazy preprocessing function, if it is not None, prepare_fun(data)
will be called by the function before performing allreduce, to intialize the data
If the result of Allreduce can be recovered directly,
then prepare_fun will NOT be called
Returns
-------
result : array_like
The result of allreduce, have same shape as data
Notes
-----
This function is not thread-safe.
"""
if not isinstance(data, np.ndarray):
raise Exception('allreduce only takes in numpy.ndarray')
buf = data.ravel()
if buf.base is data.base:
buf = buf.copy()
if buf.dtype not in DTYPE_ENUM__:
raise Exception('data type %s not supported' % str(buf.dtype))
if prepare_fun is None:
_LIB.RabitAllreduce(buf.ctypes.data_as(ctypes.c_void_p),
buf.size, DTYPE_ENUM__[buf.dtype],
op, None, None)
else:
func_ptr = ctypes.CFUNCTYPE(None, ctypes.c_void_p)
def pfunc(args):
"""prepare function."""
prepare_fun(data)
_LIB.RabitAllreduce(buf.ctypes.data_as(ctypes.c_void_p),
buf.size, DTYPE_ENUM__[buf.dtype],
op, func_ptr(pfunc), None)
return buf | python | def allreduce(data, op, prepare_fun=None):
"""Perform allreduce, return the result.
Parameters
----------
data: numpy array
Input data.
op: int
Reduction operators, can be MIN, MAX, SUM, BITOR
prepare_fun: function
Lazy preprocessing function, if it is not None, prepare_fun(data)
will be called by the function before performing allreduce, to intialize the data
If the result of Allreduce can be recovered directly,
then prepare_fun will NOT be called
Returns
-------
result : array_like
The result of allreduce, have same shape as data
Notes
-----
This function is not thread-safe.
"""
if not isinstance(data, np.ndarray):
raise Exception('allreduce only takes in numpy.ndarray')
buf = data.ravel()
if buf.base is data.base:
buf = buf.copy()
if buf.dtype not in DTYPE_ENUM__:
raise Exception('data type %s not supported' % str(buf.dtype))
if prepare_fun is None:
_LIB.RabitAllreduce(buf.ctypes.data_as(ctypes.c_void_p),
buf.size, DTYPE_ENUM__[buf.dtype],
op, None, None)
else:
func_ptr = ctypes.CFUNCTYPE(None, ctypes.c_void_p)
def pfunc(args):
"""prepare function."""
prepare_fun(data)
_LIB.RabitAllreduce(buf.ctypes.data_as(ctypes.c_void_p),
buf.size, DTYPE_ENUM__[buf.dtype],
op, func_ptr(pfunc), None)
return buf | [
"def",
"allreduce",
"(",
"data",
",",
"op",
",",
"prepare_fun",
"=",
"None",
")",
":",
"if",
"not",
"isinstance",
"(",
"data",
",",
"np",
".",
"ndarray",
")",
":",
"raise",
"Exception",
"(",
"'allreduce only takes in numpy.ndarray'",
")",
"buf",
"=",
"data",
".",
"ravel",
"(",
")",
"if",
"buf",
".",
"base",
"is",
"data",
".",
"base",
":",
"buf",
"=",
"buf",
".",
"copy",
"(",
")",
"if",
"buf",
".",
"dtype",
"not",
"in",
"DTYPE_ENUM__",
":",
"raise",
"Exception",
"(",
"'data type %s not supported'",
"%",
"str",
"(",
"buf",
".",
"dtype",
")",
")",
"if",
"prepare_fun",
"is",
"None",
":",
"_LIB",
".",
"RabitAllreduce",
"(",
"buf",
".",
"ctypes",
".",
"data_as",
"(",
"ctypes",
".",
"c_void_p",
")",
",",
"buf",
".",
"size",
",",
"DTYPE_ENUM__",
"[",
"buf",
".",
"dtype",
"]",
",",
"op",
",",
"None",
",",
"None",
")",
"else",
":",
"func_ptr",
"=",
"ctypes",
".",
"CFUNCTYPE",
"(",
"None",
",",
"ctypes",
".",
"c_void_p",
")",
"def",
"pfunc",
"(",
"args",
")",
":",
"\"\"\"prepare function.\"\"\"",
"prepare_fun",
"(",
"data",
")",
"_LIB",
".",
"RabitAllreduce",
"(",
"buf",
".",
"ctypes",
".",
"data_as",
"(",
"ctypes",
".",
"c_void_p",
")",
",",
"buf",
".",
"size",
",",
"DTYPE_ENUM__",
"[",
"buf",
".",
"dtype",
"]",
",",
"op",
",",
"func_ptr",
"(",
"pfunc",
")",
",",
"None",
")",
"return",
"buf"
] | Perform allreduce, return the result.
Parameters
----------
data: numpy array
Input data.
op: int
Reduction operators, can be MIN, MAX, SUM, BITOR
prepare_fun: function
Lazy preprocessing function, if it is not None, prepare_fun(data)
will be called by the function before performing allreduce, to intialize the data
If the result of Allreduce can be recovered directly,
then prepare_fun will NOT be called
Returns
-------
result : array_like
The result of allreduce, have same shape as data
Notes
-----
This function is not thread-safe. | [
"Perform",
"allreduce",
"return",
"the",
"result",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/xgboost/subtree/rabit/wrapper/rabit.py#L183-L226 |
28,949 | apple/turicreate | src/external/xgboost/subtree/rabit/wrapper/rabit.py | load_checkpoint | def load_checkpoint(with_local=False):
"""Load latest check point.
Parameters
----------
with_local: bool, optional
whether the checkpoint contains local model
Returns
-------
tuple : tuple
if with_local: return (version, gobal_model, local_model)
else return (version, gobal_model)
if returned version == 0, this means no model has been CheckPointed
and global_model, local_model returned will be None
"""
gptr = ctypes.POINTER(ctypes.c_char)()
global_len = ctypes.c_ulong()
if with_local:
lptr = ctypes.POINTER(ctypes.c_char)()
local_len = ctypes.c_ulong()
version = _LIB.RabitLoadCheckPoint(
ctypes.byref(gptr),
ctypes.byref(global_len),
ctypes.byref(lptr),
ctypes.byref(local_len))
if version == 0:
return (version, None, None)
return (version,
_load_model(gptr, global_len.value),
_load_model(lptr, local_len.value))
else:
version = _LIB.RabitLoadCheckPoint(
ctypes.byref(gptr),
ctypes.byref(global_len),
None, None)
if version == 0:
return (version, None)
return (version,
_load_model(gptr, global_len.value)) | python | def load_checkpoint(with_local=False):
"""Load latest check point.
Parameters
----------
with_local: bool, optional
whether the checkpoint contains local model
Returns
-------
tuple : tuple
if with_local: return (version, gobal_model, local_model)
else return (version, gobal_model)
if returned version == 0, this means no model has been CheckPointed
and global_model, local_model returned will be None
"""
gptr = ctypes.POINTER(ctypes.c_char)()
global_len = ctypes.c_ulong()
if with_local:
lptr = ctypes.POINTER(ctypes.c_char)()
local_len = ctypes.c_ulong()
version = _LIB.RabitLoadCheckPoint(
ctypes.byref(gptr),
ctypes.byref(global_len),
ctypes.byref(lptr),
ctypes.byref(local_len))
if version == 0:
return (version, None, None)
return (version,
_load_model(gptr, global_len.value),
_load_model(lptr, local_len.value))
else:
version = _LIB.RabitLoadCheckPoint(
ctypes.byref(gptr),
ctypes.byref(global_len),
None, None)
if version == 0:
return (version, None)
return (version,
_load_model(gptr, global_len.value)) | [
"def",
"load_checkpoint",
"(",
"with_local",
"=",
"False",
")",
":",
"gptr",
"=",
"ctypes",
".",
"POINTER",
"(",
"ctypes",
".",
"c_char",
")",
"(",
")",
"global_len",
"=",
"ctypes",
".",
"c_ulong",
"(",
")",
"if",
"with_local",
":",
"lptr",
"=",
"ctypes",
".",
"POINTER",
"(",
"ctypes",
".",
"c_char",
")",
"(",
")",
"local_len",
"=",
"ctypes",
".",
"c_ulong",
"(",
")",
"version",
"=",
"_LIB",
".",
"RabitLoadCheckPoint",
"(",
"ctypes",
".",
"byref",
"(",
"gptr",
")",
",",
"ctypes",
".",
"byref",
"(",
"global_len",
")",
",",
"ctypes",
".",
"byref",
"(",
"lptr",
")",
",",
"ctypes",
".",
"byref",
"(",
"local_len",
")",
")",
"if",
"version",
"==",
"0",
":",
"return",
"(",
"version",
",",
"None",
",",
"None",
")",
"return",
"(",
"version",
",",
"_load_model",
"(",
"gptr",
",",
"global_len",
".",
"value",
")",
",",
"_load_model",
"(",
"lptr",
",",
"local_len",
".",
"value",
")",
")",
"else",
":",
"version",
"=",
"_LIB",
".",
"RabitLoadCheckPoint",
"(",
"ctypes",
".",
"byref",
"(",
"gptr",
")",
",",
"ctypes",
".",
"byref",
"(",
"global_len",
")",
",",
"None",
",",
"None",
")",
"if",
"version",
"==",
"0",
":",
"return",
"(",
"version",
",",
"None",
")",
"return",
"(",
"version",
",",
"_load_model",
"(",
"gptr",
",",
"global_len",
".",
"value",
")",
")"
] | Load latest check point.
Parameters
----------
with_local: bool, optional
whether the checkpoint contains local model
Returns
-------
tuple : tuple
if with_local: return (version, gobal_model, local_model)
else return (version, gobal_model)
if returned version == 0, this means no model has been CheckPointed
and global_model, local_model returned will be None | [
"Load",
"latest",
"check",
"point",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/xgboost/subtree/rabit/wrapper/rabit.py#L242-L281 |
28,950 | apple/turicreate | src/external/xgboost/subtree/rabit/wrapper/rabit.py | checkpoint | def checkpoint(global_model, local_model=None):
"""Checkpoint the model.
This means we finished a stage of execution.
Every time we call check point, there is a version number which will increase by one.
Parameters
----------
global_model: anytype that can be pickled
globally shared model/state when calling this function,
the caller need to gauranttees that global_model is the same in all nodes
local_model: anytype that can be pickled
Local model, that is specific to current node/rank.
This can be None when no local state is needed.
Notes
-----
local_model requires explicit replication of the model for fault-tolerance.
This will bring replication cost in checkpoint function.
while global_model do not need explicit replication.
It is recommended to use global_model if possible.
"""
sglobal = pickle.dumps(global_model)
if local_model is None:
_LIB.RabitCheckPoint(sglobal, len(sglobal), None, 0)
del sglobal
else:
slocal = pickle.dumps(local_model)
_LIB.RabitCheckPoint(sglobal, len(sglobal), slocal, len(slocal))
del slocal
del sglobal | python | def checkpoint(global_model, local_model=None):
"""Checkpoint the model.
This means we finished a stage of execution.
Every time we call check point, there is a version number which will increase by one.
Parameters
----------
global_model: anytype that can be pickled
globally shared model/state when calling this function,
the caller need to gauranttees that global_model is the same in all nodes
local_model: anytype that can be pickled
Local model, that is specific to current node/rank.
This can be None when no local state is needed.
Notes
-----
local_model requires explicit replication of the model for fault-tolerance.
This will bring replication cost in checkpoint function.
while global_model do not need explicit replication.
It is recommended to use global_model if possible.
"""
sglobal = pickle.dumps(global_model)
if local_model is None:
_LIB.RabitCheckPoint(sglobal, len(sglobal), None, 0)
del sglobal
else:
slocal = pickle.dumps(local_model)
_LIB.RabitCheckPoint(sglobal, len(sglobal), slocal, len(slocal))
del slocal
del sglobal | [
"def",
"checkpoint",
"(",
"global_model",
",",
"local_model",
"=",
"None",
")",
":",
"sglobal",
"=",
"pickle",
".",
"dumps",
"(",
"global_model",
")",
"if",
"local_model",
"is",
"None",
":",
"_LIB",
".",
"RabitCheckPoint",
"(",
"sglobal",
",",
"len",
"(",
"sglobal",
")",
",",
"None",
",",
"0",
")",
"del",
"sglobal",
"else",
":",
"slocal",
"=",
"pickle",
".",
"dumps",
"(",
"local_model",
")",
"_LIB",
".",
"RabitCheckPoint",
"(",
"sglobal",
",",
"len",
"(",
"sglobal",
")",
",",
"slocal",
",",
"len",
"(",
"slocal",
")",
")",
"del",
"slocal",
"del",
"sglobal"
] | Checkpoint the model.
This means we finished a stage of execution.
Every time we call check point, there is a version number which will increase by one.
Parameters
----------
global_model: anytype that can be pickled
globally shared model/state when calling this function,
the caller need to gauranttees that global_model is the same in all nodes
local_model: anytype that can be pickled
Local model, that is specific to current node/rank.
This can be None when no local state is needed.
Notes
-----
local_model requires explicit replication of the model for fault-tolerance.
This will bring replication cost in checkpoint function.
while global_model do not need explicit replication.
It is recommended to use global_model if possible. | [
"Checkpoint",
"the",
"model",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/xgboost/subtree/rabit/wrapper/rabit.py#L283-L314 |
28,951 | apple/turicreate | src/unity/python/turicreate/toolkits/recommender/ranking_factorization_recommender.py | create | def create(observation_data,
user_id='user_id', item_id='item_id', target=None,
user_data=None, item_data=None,
num_factors=32,
regularization=1e-9,
linear_regularization=1e-9,
side_data_factorization=True,
ranking_regularization=0.25,
unobserved_rating_value=None,
num_sampled_negative_examples=4,
max_iterations=25,
sgd_step_size=0,
random_seed=0,
binary_target = False,
solver = 'auto',
verbose=True,
**kwargs):
"""Create a RankingFactorizationRecommender that learns latent factors for each
user and item and uses them to make rating predictions.
Parameters
----------
observation_data : SFrame
The dataset to use for training the model. It must contain a column of
user ids and a column of item ids. Each row represents an observed
interaction between the user and the item. The (user, item) pairs
are stored with the model so that they can later be excluded from
recommendations if desired. It can optionally contain a target ratings
column. All other columns are interpreted by the underlying model as
side features for the observations.
The user id and item id columns must be of type 'int' or 'str'. The
target column must be of type 'int' or 'float'.
user_id : string, optional
The name of the column in `observation_data` that corresponds to the
user id.
item_id : string, optional
The name of the column in `observation_data` that corresponds to the
item id.
target : string, optional
The `observation_data` can optionally contain a column of scores
representing ratings given by the users. If present, the name of this
column may be specified variables `target`.
user_data : SFrame, optional
Side information for the users. This SFrame must have a column with
the same name as what is specified by the `user_id` input parameter.
`user_data` can provide any amount of additional user-specific
information.
item_data : SFrame, optional
Side information for the items. This SFrame must have a column with
the same name as what is specified by the `item_id` input parameter.
`item_data` can provide any amount of additional item-specific
information.
num_factors : int, optional
Number of latent factors.
regularization : float, optional
L2 regularization for interaction terms. Default: 1e-10; a typical range
for this parameter is between 1e-12 and 1. Setting this to 0 may cause
numerical issues.
linear_regularization : float, optional
L2 regularization for linear term. Default: 1e-10; a typical range for this
parameter is between 1e-12 and 1. Setting this to 0 may cause numerical issues.
side_data_factorization : boolean, optional
Use factorization for modeling any additional features beyond the user
and item columns. If True, and side features or any additional columns are
present, then a Factorization Machine model is trained. Otherwise, only
the linear terms are fit to these features. See
:class:`turicreate.recommender.ranking_factorization_recommender.RankingFactorizationRecommender`
for more information. Default: True.
ranking_regularization : float, optional
Penalize the predicted value of user-item pairs not in the
training set. Larger values increase this penalization.
Suggested values: 0, 0.1, 0.5, 1. NOTE: if no target column
is present, this parameter is ignored.
unobserved_rating_value : float, optional
Penalize unobserved items with a larger predicted score than this value.
By default, the estimated 5% quantile is used (mean - 1.96*std_dev).
num_sampled_negative_examples : integer, optional
For each (user, item) pair in the data, the ranking sgd solver evaluates
this many randomly chosen unseen items for the negative example step.
Increasing this can give better performance at the expense of speed,
particularly when the number of items is large. Default is 4.
binary_target : boolean, optional
Assume the target column is composed of 0's and 1's. If True, use
logistic loss to fit the model.
max_iterations : int, optional
The training algorithm will make at most this many iterations through
the observed data. Default: 50.
sgd_step_size : float, optional
Step size for stochastic gradient descent. Smaller values generally
lead to more accurate models that take more time to train. The
default setting of 0 means that the step size is chosen by trying
several options on a small subset of the data.
random_seed : int, optional
The random seed used to choose the initial starting point for
model training. Note that some randomness in the training is
unavoidable, so models trained with the same random seed may still
differ. Default: 0.
solver : string, optional
Name of the solver to be used to solve the regression. See the
references for more detail on each solver. The available solvers for
this model are:
- *auto (default)*: automatically chooses the best solver for the data
and model parameters.
- *ials*: Implicit Alternating Least Squares [1].
- *adagrad*: Adaptive Gradient Stochastic Gradient Descent.
- *sgd*: Stochastic Gradient Descent
verbose : bool, optional
Enables verbose output.
kwargs : optional
Optional advanced keyword arguments passed in to the model
optimization procedure. These parameters do not typically
need to be changed.
Examples
--------
**Basic usage**
When given just user and item pairs, one can create a RankingFactorizationRecommender
as follows.
>>> sf = turicreate.SFrame({'user_id': ["0", "0", "0", "1", "1", "2", "2", "2"],
... 'item_id': ["a", "b", "c", "a", "b", "b", "c", "d"])
>>> from turicreate.recommender import ranking_factorization_recommender
>>> m1 = ranking_factorization_recommender.create(sf)
When a target column is present, one can include this to try and recommend
items that are rated highly.
>>> sf = turicreate.SFrame({'user_id': ["0", "0", "0", "1", "1", "2", "2", "2"],
... 'item_id': ["a", "b", "c", "a", "b", "b", "c", "d"],
... 'rating': [1, 3, 2, 5, 4, 1, 4, 3]})
>>> m1 = ranking_factorization_recommender.create(sf, target='rating')
**Including side features**
>>> user_info = turicreate.SFrame({'user_id': ["0", "1", "2"],
... 'name': ["Alice", "Bob", "Charlie"],
... 'numeric_feature': [0.1, 12, 22]})
>>> item_info = turicreate.SFrame({'item_id': ["a", "b", "c", "d"],
... 'name': ["item1", "item2", "item3", "item4"],
... 'dict_feature': [{'a' : 23}, {'a' : 13},
... {'b' : 1},
... {'a' : 23, 'b' : 32}]})
>>> m2 = ranking_factorization_recommender.create(sf, target='rating',
... user_data=user_info,
... item_data=item_info)
**Customizing ranking regularization**
Create a model that pushes predicted ratings of unobserved user-item
pairs toward 1 or below.
>>> m3 = ranking_factorization_recommender.create(sf, target='rating',
... ranking_regularization = 0.1,
... unobserved_rating_value = 1)
**Using the implicit alternating least squares model**
Ranking factorization also implements implicit alternating least squares [1] as
an alternative solver. This is enable using ``solver = 'ials'``.
>>> m3 = ranking_factorization_recommender.create(sf, target='rating',
solver = 'ials')
See Also
--------
:class:`turicreate.recommender.factorization_recommender.FactorizationRecommender`,
:class:`turicreate.recommender.ranking_factorization_recommender.RankingFactorizationRecommender`
References
-----------
[1] Collaborative Filtering for Implicit Feedback Datasets Hu, Y.; Koren,
Y.; Volinsky, C. IEEE International Conference on Data Mining
(ICDM 2008), IEEE (2008).
"""
from turicreate._cython.cy_server import QuietProgress
opts = {}
model_proxy = _turicreate.extensions.ranking_factorization_recommender()
model_proxy.init_options(opts)
if user_data is None:
user_data = _turicreate.SFrame()
if item_data is None:
item_data = _turicreate.SFrame()
nearest_items = _turicreate.SFrame()
if target is None:
binary_target = True
opts = {'user_id' : user_id,
'item_id' : item_id,
'target' : target,
'random_seed' : random_seed,
'num_factors' : num_factors,
'regularization' : regularization,
'linear_regularization' : linear_regularization,
'ranking_regularization' : ranking_regularization,
'binary_target' : binary_target,
'max_iterations' : max_iterations,
'side_data_factorization' : side_data_factorization,
'num_sampled_negative_examples' : num_sampled_negative_examples,
'solver' : solver,
# Has no effect here.
'sgd_step_size' : sgd_step_size}
if unobserved_rating_value is not None:
opts["unobserved_rating_value"] = unobserved_rating_value
if kwargs:
try:
possible_args = set(_get_default_options()["name"])
except (RuntimeError, KeyError):
possible_args = set()
bad_arguments = set(kwargs.keys()).difference(possible_args)
if bad_arguments:
raise TypeError("Bad Keyword Arguments: " + ', '.join(bad_arguments))
opts.update(kwargs)
extra_data = {"nearest_items" : _turicreate.SFrame()}
with QuietProgress(verbose):
model_proxy.train(observation_data, user_data, item_data, opts, extra_data)
return RankingFactorizationRecommender(model_proxy) | python | def create(observation_data,
user_id='user_id', item_id='item_id', target=None,
user_data=None, item_data=None,
num_factors=32,
regularization=1e-9,
linear_regularization=1e-9,
side_data_factorization=True,
ranking_regularization=0.25,
unobserved_rating_value=None,
num_sampled_negative_examples=4,
max_iterations=25,
sgd_step_size=0,
random_seed=0,
binary_target = False,
solver = 'auto',
verbose=True,
**kwargs):
"""Create a RankingFactorizationRecommender that learns latent factors for each
user and item and uses them to make rating predictions.
Parameters
----------
observation_data : SFrame
The dataset to use for training the model. It must contain a column of
user ids and a column of item ids. Each row represents an observed
interaction between the user and the item. The (user, item) pairs
are stored with the model so that they can later be excluded from
recommendations if desired. It can optionally contain a target ratings
column. All other columns are interpreted by the underlying model as
side features for the observations.
The user id and item id columns must be of type 'int' or 'str'. The
target column must be of type 'int' or 'float'.
user_id : string, optional
The name of the column in `observation_data` that corresponds to the
user id.
item_id : string, optional
The name of the column in `observation_data` that corresponds to the
item id.
target : string, optional
The `observation_data` can optionally contain a column of scores
representing ratings given by the users. If present, the name of this
column may be specified variables `target`.
user_data : SFrame, optional
Side information for the users. This SFrame must have a column with
the same name as what is specified by the `user_id` input parameter.
`user_data` can provide any amount of additional user-specific
information.
item_data : SFrame, optional
Side information for the items. This SFrame must have a column with
the same name as what is specified by the `item_id` input parameter.
`item_data` can provide any amount of additional item-specific
information.
num_factors : int, optional
Number of latent factors.
regularization : float, optional
L2 regularization for interaction terms. Default: 1e-10; a typical range
for this parameter is between 1e-12 and 1. Setting this to 0 may cause
numerical issues.
linear_regularization : float, optional
L2 regularization for linear term. Default: 1e-10; a typical range for this
parameter is between 1e-12 and 1. Setting this to 0 may cause numerical issues.
side_data_factorization : boolean, optional
Use factorization for modeling any additional features beyond the user
and item columns. If True, and side features or any additional columns are
present, then a Factorization Machine model is trained. Otherwise, only
the linear terms are fit to these features. See
:class:`turicreate.recommender.ranking_factorization_recommender.RankingFactorizationRecommender`
for more information. Default: True.
ranking_regularization : float, optional
Penalize the predicted value of user-item pairs not in the
training set. Larger values increase this penalization.
Suggested values: 0, 0.1, 0.5, 1. NOTE: if no target column
is present, this parameter is ignored.
unobserved_rating_value : float, optional
Penalize unobserved items with a larger predicted score than this value.
By default, the estimated 5% quantile is used (mean - 1.96*std_dev).
num_sampled_negative_examples : integer, optional
For each (user, item) pair in the data, the ranking sgd solver evaluates
this many randomly chosen unseen items for the negative example step.
Increasing this can give better performance at the expense of speed,
particularly when the number of items is large. Default is 4.
binary_target : boolean, optional
Assume the target column is composed of 0's and 1's. If True, use
logistic loss to fit the model.
max_iterations : int, optional
The training algorithm will make at most this many iterations through
the observed data. Default: 50.
sgd_step_size : float, optional
Step size for stochastic gradient descent. Smaller values generally
lead to more accurate models that take more time to train. The
default setting of 0 means that the step size is chosen by trying
several options on a small subset of the data.
random_seed : int, optional
The random seed used to choose the initial starting point for
model training. Note that some randomness in the training is
unavoidable, so models trained with the same random seed may still
differ. Default: 0.
solver : string, optional
Name of the solver to be used to solve the regression. See the
references for more detail on each solver. The available solvers for
this model are:
- *auto (default)*: automatically chooses the best solver for the data
and model parameters.
- *ials*: Implicit Alternating Least Squares [1].
- *adagrad*: Adaptive Gradient Stochastic Gradient Descent.
- *sgd*: Stochastic Gradient Descent
verbose : bool, optional
Enables verbose output.
kwargs : optional
Optional advanced keyword arguments passed in to the model
optimization procedure. These parameters do not typically
need to be changed.
Examples
--------
**Basic usage**
When given just user and item pairs, one can create a RankingFactorizationRecommender
as follows.
>>> sf = turicreate.SFrame({'user_id': ["0", "0", "0", "1", "1", "2", "2", "2"],
... 'item_id': ["a", "b", "c", "a", "b", "b", "c", "d"])
>>> from turicreate.recommender import ranking_factorization_recommender
>>> m1 = ranking_factorization_recommender.create(sf)
When a target column is present, one can include this to try and recommend
items that are rated highly.
>>> sf = turicreate.SFrame({'user_id': ["0", "0", "0", "1", "1", "2", "2", "2"],
... 'item_id': ["a", "b", "c", "a", "b", "b", "c", "d"],
... 'rating': [1, 3, 2, 5, 4, 1, 4, 3]})
>>> m1 = ranking_factorization_recommender.create(sf, target='rating')
**Including side features**
>>> user_info = turicreate.SFrame({'user_id': ["0", "1", "2"],
... 'name': ["Alice", "Bob", "Charlie"],
... 'numeric_feature': [0.1, 12, 22]})
>>> item_info = turicreate.SFrame({'item_id': ["a", "b", "c", "d"],
... 'name': ["item1", "item2", "item3", "item4"],
... 'dict_feature': [{'a' : 23}, {'a' : 13},
... {'b' : 1},
... {'a' : 23, 'b' : 32}]})
>>> m2 = ranking_factorization_recommender.create(sf, target='rating',
... user_data=user_info,
... item_data=item_info)
**Customizing ranking regularization**
Create a model that pushes predicted ratings of unobserved user-item
pairs toward 1 or below.
>>> m3 = ranking_factorization_recommender.create(sf, target='rating',
... ranking_regularization = 0.1,
... unobserved_rating_value = 1)
**Using the implicit alternating least squares model**
Ranking factorization also implements implicit alternating least squares [1] as
an alternative solver. This is enable using ``solver = 'ials'``.
>>> m3 = ranking_factorization_recommender.create(sf, target='rating',
solver = 'ials')
See Also
--------
:class:`turicreate.recommender.factorization_recommender.FactorizationRecommender`,
:class:`turicreate.recommender.ranking_factorization_recommender.RankingFactorizationRecommender`
References
-----------
[1] Collaborative Filtering for Implicit Feedback Datasets Hu, Y.; Koren,
Y.; Volinsky, C. IEEE International Conference on Data Mining
(ICDM 2008), IEEE (2008).
"""
from turicreate._cython.cy_server import QuietProgress
opts = {}
model_proxy = _turicreate.extensions.ranking_factorization_recommender()
model_proxy.init_options(opts)
if user_data is None:
user_data = _turicreate.SFrame()
if item_data is None:
item_data = _turicreate.SFrame()
nearest_items = _turicreate.SFrame()
if target is None:
binary_target = True
opts = {'user_id' : user_id,
'item_id' : item_id,
'target' : target,
'random_seed' : random_seed,
'num_factors' : num_factors,
'regularization' : regularization,
'linear_regularization' : linear_regularization,
'ranking_regularization' : ranking_regularization,
'binary_target' : binary_target,
'max_iterations' : max_iterations,
'side_data_factorization' : side_data_factorization,
'num_sampled_negative_examples' : num_sampled_negative_examples,
'solver' : solver,
# Has no effect here.
'sgd_step_size' : sgd_step_size}
if unobserved_rating_value is not None:
opts["unobserved_rating_value"] = unobserved_rating_value
if kwargs:
try:
possible_args = set(_get_default_options()["name"])
except (RuntimeError, KeyError):
possible_args = set()
bad_arguments = set(kwargs.keys()).difference(possible_args)
if bad_arguments:
raise TypeError("Bad Keyword Arguments: " + ', '.join(bad_arguments))
opts.update(kwargs)
extra_data = {"nearest_items" : _turicreate.SFrame()}
with QuietProgress(verbose):
model_proxy.train(observation_data, user_data, item_data, opts, extra_data)
return RankingFactorizationRecommender(model_proxy) | [
"def",
"create",
"(",
"observation_data",
",",
"user_id",
"=",
"'user_id'",
",",
"item_id",
"=",
"'item_id'",
",",
"target",
"=",
"None",
",",
"user_data",
"=",
"None",
",",
"item_data",
"=",
"None",
",",
"num_factors",
"=",
"32",
",",
"regularization",
"=",
"1e-9",
",",
"linear_regularization",
"=",
"1e-9",
",",
"side_data_factorization",
"=",
"True",
",",
"ranking_regularization",
"=",
"0.25",
",",
"unobserved_rating_value",
"=",
"None",
",",
"num_sampled_negative_examples",
"=",
"4",
",",
"max_iterations",
"=",
"25",
",",
"sgd_step_size",
"=",
"0",
",",
"random_seed",
"=",
"0",
",",
"binary_target",
"=",
"False",
",",
"solver",
"=",
"'auto'",
",",
"verbose",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"from",
"turicreate",
".",
"_cython",
".",
"cy_server",
"import",
"QuietProgress",
"opts",
"=",
"{",
"}",
"model_proxy",
"=",
"_turicreate",
".",
"extensions",
".",
"ranking_factorization_recommender",
"(",
")",
"model_proxy",
".",
"init_options",
"(",
"opts",
")",
"if",
"user_data",
"is",
"None",
":",
"user_data",
"=",
"_turicreate",
".",
"SFrame",
"(",
")",
"if",
"item_data",
"is",
"None",
":",
"item_data",
"=",
"_turicreate",
".",
"SFrame",
"(",
")",
"nearest_items",
"=",
"_turicreate",
".",
"SFrame",
"(",
")",
"if",
"target",
"is",
"None",
":",
"binary_target",
"=",
"True",
"opts",
"=",
"{",
"'user_id'",
":",
"user_id",
",",
"'item_id'",
":",
"item_id",
",",
"'target'",
":",
"target",
",",
"'random_seed'",
":",
"random_seed",
",",
"'num_factors'",
":",
"num_factors",
",",
"'regularization'",
":",
"regularization",
",",
"'linear_regularization'",
":",
"linear_regularization",
",",
"'ranking_regularization'",
":",
"ranking_regularization",
",",
"'binary_target'",
":",
"binary_target",
",",
"'max_iterations'",
":",
"max_iterations",
",",
"'side_data_factorization'",
":",
"side_data_factorization",
",",
"'num_sampled_negative_examples'",
":",
"num_sampled_negative_examples",
",",
"'solver'",
":",
"solver",
",",
"# Has no effect here.",
"'sgd_step_size'",
":",
"sgd_step_size",
"}",
"if",
"unobserved_rating_value",
"is",
"not",
"None",
":",
"opts",
"[",
"\"unobserved_rating_value\"",
"]",
"=",
"unobserved_rating_value",
"if",
"kwargs",
":",
"try",
":",
"possible_args",
"=",
"set",
"(",
"_get_default_options",
"(",
")",
"[",
"\"name\"",
"]",
")",
"except",
"(",
"RuntimeError",
",",
"KeyError",
")",
":",
"possible_args",
"=",
"set",
"(",
")",
"bad_arguments",
"=",
"set",
"(",
"kwargs",
".",
"keys",
"(",
")",
")",
".",
"difference",
"(",
"possible_args",
")",
"if",
"bad_arguments",
":",
"raise",
"TypeError",
"(",
"\"Bad Keyword Arguments: \"",
"+",
"', '",
".",
"join",
"(",
"bad_arguments",
")",
")",
"opts",
".",
"update",
"(",
"kwargs",
")",
"extra_data",
"=",
"{",
"\"nearest_items\"",
":",
"_turicreate",
".",
"SFrame",
"(",
")",
"}",
"with",
"QuietProgress",
"(",
"verbose",
")",
":",
"model_proxy",
".",
"train",
"(",
"observation_data",
",",
"user_data",
",",
"item_data",
",",
"opts",
",",
"extra_data",
")",
"return",
"RankingFactorizationRecommender",
"(",
"model_proxy",
")"
] | Create a RankingFactorizationRecommender that learns latent factors for each
user and item and uses them to make rating predictions.
Parameters
----------
observation_data : SFrame
The dataset to use for training the model. It must contain a column of
user ids and a column of item ids. Each row represents an observed
interaction between the user and the item. The (user, item) pairs
are stored with the model so that they can later be excluded from
recommendations if desired. It can optionally contain a target ratings
column. All other columns are interpreted by the underlying model as
side features for the observations.
The user id and item id columns must be of type 'int' or 'str'. The
target column must be of type 'int' or 'float'.
user_id : string, optional
The name of the column in `observation_data` that corresponds to the
user id.
item_id : string, optional
The name of the column in `observation_data` that corresponds to the
item id.
target : string, optional
The `observation_data` can optionally contain a column of scores
representing ratings given by the users. If present, the name of this
column may be specified variables `target`.
user_data : SFrame, optional
Side information for the users. This SFrame must have a column with
the same name as what is specified by the `user_id` input parameter.
`user_data` can provide any amount of additional user-specific
information.
item_data : SFrame, optional
Side information for the items. This SFrame must have a column with
the same name as what is specified by the `item_id` input parameter.
`item_data` can provide any amount of additional item-specific
information.
num_factors : int, optional
Number of latent factors.
regularization : float, optional
L2 regularization for interaction terms. Default: 1e-10; a typical range
for this parameter is between 1e-12 and 1. Setting this to 0 may cause
numerical issues.
linear_regularization : float, optional
L2 regularization for linear term. Default: 1e-10; a typical range for this
parameter is between 1e-12 and 1. Setting this to 0 may cause numerical issues.
side_data_factorization : boolean, optional
Use factorization for modeling any additional features beyond the user
and item columns. If True, and side features or any additional columns are
present, then a Factorization Machine model is trained. Otherwise, only
the linear terms are fit to these features. See
:class:`turicreate.recommender.ranking_factorization_recommender.RankingFactorizationRecommender`
for more information. Default: True.
ranking_regularization : float, optional
Penalize the predicted value of user-item pairs not in the
training set. Larger values increase this penalization.
Suggested values: 0, 0.1, 0.5, 1. NOTE: if no target column
is present, this parameter is ignored.
unobserved_rating_value : float, optional
Penalize unobserved items with a larger predicted score than this value.
By default, the estimated 5% quantile is used (mean - 1.96*std_dev).
num_sampled_negative_examples : integer, optional
For each (user, item) pair in the data, the ranking sgd solver evaluates
this many randomly chosen unseen items for the negative example step.
Increasing this can give better performance at the expense of speed,
particularly when the number of items is large. Default is 4.
binary_target : boolean, optional
Assume the target column is composed of 0's and 1's. If True, use
logistic loss to fit the model.
max_iterations : int, optional
The training algorithm will make at most this many iterations through
the observed data. Default: 50.
sgd_step_size : float, optional
Step size for stochastic gradient descent. Smaller values generally
lead to more accurate models that take more time to train. The
default setting of 0 means that the step size is chosen by trying
several options on a small subset of the data.
random_seed : int, optional
The random seed used to choose the initial starting point for
model training. Note that some randomness in the training is
unavoidable, so models trained with the same random seed may still
differ. Default: 0.
solver : string, optional
Name of the solver to be used to solve the regression. See the
references for more detail on each solver. The available solvers for
this model are:
- *auto (default)*: automatically chooses the best solver for the data
and model parameters.
- *ials*: Implicit Alternating Least Squares [1].
- *adagrad*: Adaptive Gradient Stochastic Gradient Descent.
- *sgd*: Stochastic Gradient Descent
verbose : bool, optional
Enables verbose output.
kwargs : optional
Optional advanced keyword arguments passed in to the model
optimization procedure. These parameters do not typically
need to be changed.
Examples
--------
**Basic usage**
When given just user and item pairs, one can create a RankingFactorizationRecommender
as follows.
>>> sf = turicreate.SFrame({'user_id': ["0", "0", "0", "1", "1", "2", "2", "2"],
... 'item_id': ["a", "b", "c", "a", "b", "b", "c", "d"])
>>> from turicreate.recommender import ranking_factorization_recommender
>>> m1 = ranking_factorization_recommender.create(sf)
When a target column is present, one can include this to try and recommend
items that are rated highly.
>>> sf = turicreate.SFrame({'user_id': ["0", "0", "0", "1", "1", "2", "2", "2"],
... 'item_id': ["a", "b", "c", "a", "b", "b", "c", "d"],
... 'rating': [1, 3, 2, 5, 4, 1, 4, 3]})
>>> m1 = ranking_factorization_recommender.create(sf, target='rating')
**Including side features**
>>> user_info = turicreate.SFrame({'user_id': ["0", "1", "2"],
... 'name': ["Alice", "Bob", "Charlie"],
... 'numeric_feature': [0.1, 12, 22]})
>>> item_info = turicreate.SFrame({'item_id': ["a", "b", "c", "d"],
... 'name': ["item1", "item2", "item3", "item4"],
... 'dict_feature': [{'a' : 23}, {'a' : 13},
... {'b' : 1},
... {'a' : 23, 'b' : 32}]})
>>> m2 = ranking_factorization_recommender.create(sf, target='rating',
... user_data=user_info,
... item_data=item_info)
**Customizing ranking regularization**
Create a model that pushes predicted ratings of unobserved user-item
pairs toward 1 or below.
>>> m3 = ranking_factorization_recommender.create(sf, target='rating',
... ranking_regularization = 0.1,
... unobserved_rating_value = 1)
**Using the implicit alternating least squares model**
Ranking factorization also implements implicit alternating least squares [1] as
an alternative solver. This is enable using ``solver = 'ials'``.
>>> m3 = ranking_factorization_recommender.create(sf, target='rating',
solver = 'ials')
See Also
--------
:class:`turicreate.recommender.factorization_recommender.FactorizationRecommender`,
:class:`turicreate.recommender.ranking_factorization_recommender.RankingFactorizationRecommender`
References
-----------
[1] Collaborative Filtering for Implicit Feedback Datasets Hu, Y.; Koren,
Y.; Volinsky, C. IEEE International Conference on Data Mining
(ICDM 2008), IEEE (2008). | [
"Create",
"a",
"RankingFactorizationRecommender",
"that",
"learns",
"latent",
"factors",
"for",
"each",
"user",
"and",
"item",
"and",
"uses",
"them",
"to",
"make",
"rating",
"predictions",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/recommender/ranking_factorization_recommender.py#L19-L270 |
28,952 | apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/converters/sklearn/_converter_internal.py | _get_converter_module | def _get_converter_module(sk_obj):
"""
Returns the module holding the conversion functions for a
particular model).
"""
try:
cv_idx = _converter_lookup[sk_obj.__class__]
except KeyError:
raise ValueError(
"Transformer '%s' not supported; supported transformers are %s."
% (repr(sk_obj),
",".join(k.__name__ for k in _converter_module_list)))
return _converter_module_list[cv_idx] | python | def _get_converter_module(sk_obj):
"""
Returns the module holding the conversion functions for a
particular model).
"""
try:
cv_idx = _converter_lookup[sk_obj.__class__]
except KeyError:
raise ValueError(
"Transformer '%s' not supported; supported transformers are %s."
% (repr(sk_obj),
",".join(k.__name__ for k in _converter_module_list)))
return _converter_module_list[cv_idx] | [
"def",
"_get_converter_module",
"(",
"sk_obj",
")",
":",
"try",
":",
"cv_idx",
"=",
"_converter_lookup",
"[",
"sk_obj",
".",
"__class__",
"]",
"except",
"KeyError",
":",
"raise",
"ValueError",
"(",
"\"Transformer '%s' not supported; supported transformers are %s.\"",
"%",
"(",
"repr",
"(",
"sk_obj",
")",
",",
"\",\"",
".",
"join",
"(",
"k",
".",
"__name__",
"for",
"k",
"in",
"_converter_module_list",
")",
")",
")",
"return",
"_converter_module_list",
"[",
"cv_idx",
"]"
] | Returns the module holding the conversion functions for a
particular model). | [
"Returns",
"the",
"module",
"holding",
"the",
"conversion",
"functions",
"for",
"a",
"particular",
"model",
")",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/converters/sklearn/_converter_internal.py#L87-L100 |
28,953 | apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/models/tree_ensemble.py | TreeEnsembleBase.set_post_evaluation_transform | def set_post_evaluation_transform(self, value):
r"""
Set the post processing transform applied after the prediction value
from the tree ensemble.
Parameters
----------
value: str
A value denoting the transform applied. Possible values are:
- "NoTransform" (default). Do not apply a transform.
- "Classification_SoftMax".
Apply a softmax function to the outcome to produce normalized,
non-negative scores that sum to 1. The transformation applied to
dimension `i` is equivalent to:
.. math::
\frac{e^{x_i}}{\sum_j e^{x_j}}
Note: This is the output transformation applied by the XGBoost package
with multiclass classification.
- "Regression_Logistic".
Applies a logistic transform the predicted value, specifically:
.. math::
(1 + e^{-v})^{-1}
This is the transformation used in binary classification.
"""
self.tree_spec.postEvaluationTransform = \
_TreeEnsemble_pb2.TreeEnsemblePostEvaluationTransform.Value(value) | python | def set_post_evaluation_transform(self, value):
r"""
Set the post processing transform applied after the prediction value
from the tree ensemble.
Parameters
----------
value: str
A value denoting the transform applied. Possible values are:
- "NoTransform" (default). Do not apply a transform.
- "Classification_SoftMax".
Apply a softmax function to the outcome to produce normalized,
non-negative scores that sum to 1. The transformation applied to
dimension `i` is equivalent to:
.. math::
\frac{e^{x_i}}{\sum_j e^{x_j}}
Note: This is the output transformation applied by the XGBoost package
with multiclass classification.
- "Regression_Logistic".
Applies a logistic transform the predicted value, specifically:
.. math::
(1 + e^{-v})^{-1}
This is the transformation used in binary classification.
"""
self.tree_spec.postEvaluationTransform = \
_TreeEnsemble_pb2.TreeEnsemblePostEvaluationTransform.Value(value) | [
"def",
"set_post_evaluation_transform",
"(",
"self",
",",
"value",
")",
":",
"self",
".",
"tree_spec",
".",
"postEvaluationTransform",
"=",
"_TreeEnsemble_pb2",
".",
"TreeEnsemblePostEvaluationTransform",
".",
"Value",
"(",
"value",
")"
] | r"""
Set the post processing transform applied after the prediction value
from the tree ensemble.
Parameters
----------
value: str
A value denoting the transform applied. Possible values are:
- "NoTransform" (default). Do not apply a transform.
- "Classification_SoftMax".
Apply a softmax function to the outcome to produce normalized,
non-negative scores that sum to 1. The transformation applied to
dimension `i` is equivalent to:
.. math::
\frac{e^{x_i}}{\sum_j e^{x_j}}
Note: This is the output transformation applied by the XGBoost package
with multiclass classification.
- "Regression_Logistic".
Applies a logistic transform the predicted value, specifically:
.. math::
(1 + e^{-v})^{-1}
This is the transformation used in binary classification. | [
"r",
"Set",
"the",
"post",
"processing",
"transform",
"applied",
"after",
"the",
"prediction",
"value",
"from",
"the",
"tree",
"ensemble",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/tree_ensemble.py#L57-L97 |
28,954 | apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/models/tree_ensemble.py | TreeEnsembleBase.add_branch_node | def add_branch_node(self, tree_id, node_id, feature_index, feature_value,
branch_mode, true_child_id, false_child_id, relative_hit_rate = None,
missing_value_tracks_true_child = False):
"""
Add a branch node to the tree ensemble.
Parameters
----------
tree_id: int
ID of the tree to add the node to.
node_id: int
ID of the node within the tree.
feature_index: int
Index of the feature in the input being split on.
feature_value: double or int
The value used in the feature comparison determining the traversal
direction from this node.
branch_mode: str
Branch mode of the node, specifying the condition under which the node
referenced by `true_child_id` is called next.
Must be one of the following:
- `"BranchOnValueLessThanEqual"`. Traverse to node `true_child_id`
if `input[feature_index] <= feature_value`, and `false_child_id`
otherwise.
- `"BranchOnValueLessThan"`. Traverse to node `true_child_id`
if `input[feature_index] < feature_value`, and `false_child_id`
otherwise.
- `"BranchOnValueGreaterThanEqual"`. Traverse to node `true_child_id`
if `input[feature_index] >= feature_value`, and `false_child_id`
otherwise.
- `"BranchOnValueGreaterThan"`. Traverse to node `true_child_id`
if `input[feature_index] > feature_value`, and `false_child_id`
otherwise.
- `"BranchOnValueEqual"`. Traverse to node `true_child_id`
if `input[feature_index] == feature_value`, and `false_child_id`
otherwise.
- `"BranchOnValueNotEqual"`. Traverse to node `true_child_id`
if `input[feature_index] != feature_value`, and `false_child_id`
otherwise.
true_child_id: int
ID of the child under the true condition of the split. An error will
be raised at model validation if this does not match the `node_id`
of a node instantiated by `add_branch_node` or `add_leaf_node` within
this `tree_id`.
false_child_id: int
ID of the child under the false condition of the split. An error will
be raised at model validation if this does not match the `node_id`
of a node instantiated by `add_branch_node` or `add_leaf_node` within
this `tree_id`.
relative_hit_rate: float [optional]
When the model is converted compiled by CoreML, this gives hints to
Core ML about which node is more likely to be hit on evaluation,
allowing for additional optimizations. The values can be on any scale,
with the values between child nodes being compared relative to each
other.
missing_value_tracks_true_child: bool [optional]
If the training data contains NaN values or missing values, then this
flag determines which direction a NaN value traverses.
"""
spec_node = self.tree_parameters.nodes.add()
spec_node.treeId = tree_id
spec_node.nodeId = node_id
spec_node.branchFeatureIndex = feature_index
spec_node.branchFeatureValue = feature_value
spec_node.trueChildNodeId = true_child_id
spec_node.falseChildNodeId = false_child_id
spec_node.nodeBehavior = \
_TreeEnsemble_pb2.TreeEnsembleParameters.TreeNode.TreeNodeBehavior.Value(branch_mode)
if relative_hit_rate is not None:
spec_node.relativeHitRate = relative_hit_rate
spec_node.missingValueTracksTrueChild = missing_value_tracks_true_child | python | def add_branch_node(self, tree_id, node_id, feature_index, feature_value,
branch_mode, true_child_id, false_child_id, relative_hit_rate = None,
missing_value_tracks_true_child = False):
"""
Add a branch node to the tree ensemble.
Parameters
----------
tree_id: int
ID of the tree to add the node to.
node_id: int
ID of the node within the tree.
feature_index: int
Index of the feature in the input being split on.
feature_value: double or int
The value used in the feature comparison determining the traversal
direction from this node.
branch_mode: str
Branch mode of the node, specifying the condition under which the node
referenced by `true_child_id` is called next.
Must be one of the following:
- `"BranchOnValueLessThanEqual"`. Traverse to node `true_child_id`
if `input[feature_index] <= feature_value`, and `false_child_id`
otherwise.
- `"BranchOnValueLessThan"`. Traverse to node `true_child_id`
if `input[feature_index] < feature_value`, and `false_child_id`
otherwise.
- `"BranchOnValueGreaterThanEqual"`. Traverse to node `true_child_id`
if `input[feature_index] >= feature_value`, and `false_child_id`
otherwise.
- `"BranchOnValueGreaterThan"`. Traverse to node `true_child_id`
if `input[feature_index] > feature_value`, and `false_child_id`
otherwise.
- `"BranchOnValueEqual"`. Traverse to node `true_child_id`
if `input[feature_index] == feature_value`, and `false_child_id`
otherwise.
- `"BranchOnValueNotEqual"`. Traverse to node `true_child_id`
if `input[feature_index] != feature_value`, and `false_child_id`
otherwise.
true_child_id: int
ID of the child under the true condition of the split. An error will
be raised at model validation if this does not match the `node_id`
of a node instantiated by `add_branch_node` or `add_leaf_node` within
this `tree_id`.
false_child_id: int
ID of the child under the false condition of the split. An error will
be raised at model validation if this does not match the `node_id`
of a node instantiated by `add_branch_node` or `add_leaf_node` within
this `tree_id`.
relative_hit_rate: float [optional]
When the model is converted compiled by CoreML, this gives hints to
Core ML about which node is more likely to be hit on evaluation,
allowing for additional optimizations. The values can be on any scale,
with the values between child nodes being compared relative to each
other.
missing_value_tracks_true_child: bool [optional]
If the training data contains NaN values or missing values, then this
flag determines which direction a NaN value traverses.
"""
spec_node = self.tree_parameters.nodes.add()
spec_node.treeId = tree_id
spec_node.nodeId = node_id
spec_node.branchFeatureIndex = feature_index
spec_node.branchFeatureValue = feature_value
spec_node.trueChildNodeId = true_child_id
spec_node.falseChildNodeId = false_child_id
spec_node.nodeBehavior = \
_TreeEnsemble_pb2.TreeEnsembleParameters.TreeNode.TreeNodeBehavior.Value(branch_mode)
if relative_hit_rate is not None:
spec_node.relativeHitRate = relative_hit_rate
spec_node.missingValueTracksTrueChild = missing_value_tracks_true_child | [
"def",
"add_branch_node",
"(",
"self",
",",
"tree_id",
",",
"node_id",
",",
"feature_index",
",",
"feature_value",
",",
"branch_mode",
",",
"true_child_id",
",",
"false_child_id",
",",
"relative_hit_rate",
"=",
"None",
",",
"missing_value_tracks_true_child",
"=",
"False",
")",
":",
"spec_node",
"=",
"self",
".",
"tree_parameters",
".",
"nodes",
".",
"add",
"(",
")",
"spec_node",
".",
"treeId",
"=",
"tree_id",
"spec_node",
".",
"nodeId",
"=",
"node_id",
"spec_node",
".",
"branchFeatureIndex",
"=",
"feature_index",
"spec_node",
".",
"branchFeatureValue",
"=",
"feature_value",
"spec_node",
".",
"trueChildNodeId",
"=",
"true_child_id",
"spec_node",
".",
"falseChildNodeId",
"=",
"false_child_id",
"spec_node",
".",
"nodeBehavior",
"=",
"_TreeEnsemble_pb2",
".",
"TreeEnsembleParameters",
".",
"TreeNode",
".",
"TreeNodeBehavior",
".",
"Value",
"(",
"branch_mode",
")",
"if",
"relative_hit_rate",
"is",
"not",
"None",
":",
"spec_node",
".",
"relativeHitRate",
"=",
"relative_hit_rate",
"spec_node",
".",
"missingValueTracksTrueChild",
"=",
"missing_value_tracks_true_child"
] | Add a branch node to the tree ensemble.
Parameters
----------
tree_id: int
ID of the tree to add the node to.
node_id: int
ID of the node within the tree.
feature_index: int
Index of the feature in the input being split on.
feature_value: double or int
The value used in the feature comparison determining the traversal
direction from this node.
branch_mode: str
Branch mode of the node, specifying the condition under which the node
referenced by `true_child_id` is called next.
Must be one of the following:
- `"BranchOnValueLessThanEqual"`. Traverse to node `true_child_id`
if `input[feature_index] <= feature_value`, and `false_child_id`
otherwise.
- `"BranchOnValueLessThan"`. Traverse to node `true_child_id`
if `input[feature_index] < feature_value`, and `false_child_id`
otherwise.
- `"BranchOnValueGreaterThanEqual"`. Traverse to node `true_child_id`
if `input[feature_index] >= feature_value`, and `false_child_id`
otherwise.
- `"BranchOnValueGreaterThan"`. Traverse to node `true_child_id`
if `input[feature_index] > feature_value`, and `false_child_id`
otherwise.
- `"BranchOnValueEqual"`. Traverse to node `true_child_id`
if `input[feature_index] == feature_value`, and `false_child_id`
otherwise.
- `"BranchOnValueNotEqual"`. Traverse to node `true_child_id`
if `input[feature_index] != feature_value`, and `false_child_id`
otherwise.
true_child_id: int
ID of the child under the true condition of the split. An error will
be raised at model validation if this does not match the `node_id`
of a node instantiated by `add_branch_node` or `add_leaf_node` within
this `tree_id`.
false_child_id: int
ID of the child under the false condition of the split. An error will
be raised at model validation if this does not match the `node_id`
of a node instantiated by `add_branch_node` or `add_leaf_node` within
this `tree_id`.
relative_hit_rate: float [optional]
When the model is converted compiled by CoreML, this gives hints to
Core ML about which node is more likely to be hit on evaluation,
allowing for additional optimizations. The values can be on any scale,
with the values between child nodes being compared relative to each
other.
missing_value_tracks_true_child: bool [optional]
If the training data contains NaN values or missing values, then this
flag determines which direction a NaN value traverses. | [
"Add",
"a",
"branch",
"node",
"to",
"the",
"tree",
"ensemble",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/tree_ensemble.py#L99-L186 |
28,955 | apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/models/tree_ensemble.py | TreeEnsembleBase.add_leaf_node | def add_leaf_node(self, tree_id, node_id, values, relative_hit_rate = None):
"""
Add a leaf node to the tree ensemble.
Parameters
----------
tree_id: int
ID of the tree to add the node to.
node_id: int
ID of the node within the tree.
values: [float | int | list | dict]
Value(s) at the leaf node to add to the prediction when this node is
activated. If the prediction dimension of the tree is 1, then the
value is specified as a float or integer value.
For multidimensional predictions, the values can be a list of numbers
with length matching the dimension of the predictions or a dictionary
mapping index to value added to that dimension.
Note that the dimension of any tree must match the dimension given
when :py:meth:`set_default_prediction_value` is called.
"""
spec_node = self.tree_parameters.nodes.add()
spec_node.treeId = tree_id
spec_node.nodeId = node_id
spec_node.nodeBehavior = \
_TreeEnsemble_pb2.TreeEnsembleParameters.TreeNode.TreeNodeBehavior.Value('LeafNode')
if not isinstance(values, _collections.Iterable):
values = [values]
if relative_hit_rate is not None:
spec_node.relativeHitRate = relative_hit_rate
if type(values) == dict:
iter = values.items()
else:
iter = enumerate(values)
for index, value in iter:
ev_info = spec_node.evaluationInfo.add()
ev_info.evaluationIndex = index
ev_info.evaluationValue = float(value)
spec_node.nodeBehavior = \
_TreeEnsemble_pb2.TreeEnsembleParameters.TreeNode.TreeNodeBehavior.Value('LeafNode') | python | def add_leaf_node(self, tree_id, node_id, values, relative_hit_rate = None):
"""
Add a leaf node to the tree ensemble.
Parameters
----------
tree_id: int
ID of the tree to add the node to.
node_id: int
ID of the node within the tree.
values: [float | int | list | dict]
Value(s) at the leaf node to add to the prediction when this node is
activated. If the prediction dimension of the tree is 1, then the
value is specified as a float or integer value.
For multidimensional predictions, the values can be a list of numbers
with length matching the dimension of the predictions or a dictionary
mapping index to value added to that dimension.
Note that the dimension of any tree must match the dimension given
when :py:meth:`set_default_prediction_value` is called.
"""
spec_node = self.tree_parameters.nodes.add()
spec_node.treeId = tree_id
spec_node.nodeId = node_id
spec_node.nodeBehavior = \
_TreeEnsemble_pb2.TreeEnsembleParameters.TreeNode.TreeNodeBehavior.Value('LeafNode')
if not isinstance(values, _collections.Iterable):
values = [values]
if relative_hit_rate is not None:
spec_node.relativeHitRate = relative_hit_rate
if type(values) == dict:
iter = values.items()
else:
iter = enumerate(values)
for index, value in iter:
ev_info = spec_node.evaluationInfo.add()
ev_info.evaluationIndex = index
ev_info.evaluationValue = float(value)
spec_node.nodeBehavior = \
_TreeEnsemble_pb2.TreeEnsembleParameters.TreeNode.TreeNodeBehavior.Value('LeafNode') | [
"def",
"add_leaf_node",
"(",
"self",
",",
"tree_id",
",",
"node_id",
",",
"values",
",",
"relative_hit_rate",
"=",
"None",
")",
":",
"spec_node",
"=",
"self",
".",
"tree_parameters",
".",
"nodes",
".",
"add",
"(",
")",
"spec_node",
".",
"treeId",
"=",
"tree_id",
"spec_node",
".",
"nodeId",
"=",
"node_id",
"spec_node",
".",
"nodeBehavior",
"=",
"_TreeEnsemble_pb2",
".",
"TreeEnsembleParameters",
".",
"TreeNode",
".",
"TreeNodeBehavior",
".",
"Value",
"(",
"'LeafNode'",
")",
"if",
"not",
"isinstance",
"(",
"values",
",",
"_collections",
".",
"Iterable",
")",
":",
"values",
"=",
"[",
"values",
"]",
"if",
"relative_hit_rate",
"is",
"not",
"None",
":",
"spec_node",
".",
"relativeHitRate",
"=",
"relative_hit_rate",
"if",
"type",
"(",
"values",
")",
"==",
"dict",
":",
"iter",
"=",
"values",
".",
"items",
"(",
")",
"else",
":",
"iter",
"=",
"enumerate",
"(",
"values",
")",
"for",
"index",
",",
"value",
"in",
"iter",
":",
"ev_info",
"=",
"spec_node",
".",
"evaluationInfo",
".",
"add",
"(",
")",
"ev_info",
".",
"evaluationIndex",
"=",
"index",
"ev_info",
".",
"evaluationValue",
"=",
"float",
"(",
"value",
")",
"spec_node",
".",
"nodeBehavior",
"=",
"_TreeEnsemble_pb2",
".",
"TreeEnsembleParameters",
".",
"TreeNode",
".",
"TreeNodeBehavior",
".",
"Value",
"(",
"'LeafNode'",
")"
] | Add a leaf node to the tree ensemble.
Parameters
----------
tree_id: int
ID of the tree to add the node to.
node_id: int
ID of the node within the tree.
values: [float | int | list | dict]
Value(s) at the leaf node to add to the prediction when this node is
activated. If the prediction dimension of the tree is 1, then the
value is specified as a float or integer value.
For multidimensional predictions, the values can be a list of numbers
with length matching the dimension of the predictions or a dictionary
mapping index to value added to that dimension.
Note that the dimension of any tree must match the dimension given
when :py:meth:`set_default_prediction_value` is called. | [
"Add",
"a",
"leaf",
"node",
"to",
"the",
"tree",
"ensemble",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/tree_ensemble.py#L188-L235 |
28,956 | apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/property_set.py | create | def create (raw_properties = []):
""" Creates a new 'PropertySet' instance for the given raw properties,
or returns an already existing one.
"""
assert (is_iterable_typed(raw_properties, property.Property)
or is_iterable_typed(raw_properties, basestring))
# FIXME: propagate to callers.
if len(raw_properties) > 0 and isinstance(raw_properties[0], property.Property):
x = raw_properties
else:
x = [property.create_from_string(ps) for ps in raw_properties]
# These two lines of code are optimized to the current state
# of the Property class. Since this function acts as the caching
# frontend to the PropertySet class modifying these two lines
# could have a severe performance penalty. Be careful.
# It would be faster to sort by p.id, but some projects may rely
# on the fact that the properties are ordered alphabetically. So,
# we maintain alphabetical sorting so as to maintain backward compatibility.
x = sorted(set(x), key=lambda p: (p.feature.name, p.value, p.condition))
key = tuple(p.id for p in x)
if key not in __cache:
__cache [key] = PropertySet(x)
return __cache [key] | python | def create (raw_properties = []):
""" Creates a new 'PropertySet' instance for the given raw properties,
or returns an already existing one.
"""
assert (is_iterable_typed(raw_properties, property.Property)
or is_iterable_typed(raw_properties, basestring))
# FIXME: propagate to callers.
if len(raw_properties) > 0 and isinstance(raw_properties[0], property.Property):
x = raw_properties
else:
x = [property.create_from_string(ps) for ps in raw_properties]
# These two lines of code are optimized to the current state
# of the Property class. Since this function acts as the caching
# frontend to the PropertySet class modifying these two lines
# could have a severe performance penalty. Be careful.
# It would be faster to sort by p.id, but some projects may rely
# on the fact that the properties are ordered alphabetically. So,
# we maintain alphabetical sorting so as to maintain backward compatibility.
x = sorted(set(x), key=lambda p: (p.feature.name, p.value, p.condition))
key = tuple(p.id for p in x)
if key not in __cache:
__cache [key] = PropertySet(x)
return __cache [key] | [
"def",
"create",
"(",
"raw_properties",
"=",
"[",
"]",
")",
":",
"assert",
"(",
"is_iterable_typed",
"(",
"raw_properties",
",",
"property",
".",
"Property",
")",
"or",
"is_iterable_typed",
"(",
"raw_properties",
",",
"basestring",
")",
")",
"# FIXME: propagate to callers.",
"if",
"len",
"(",
"raw_properties",
")",
">",
"0",
"and",
"isinstance",
"(",
"raw_properties",
"[",
"0",
"]",
",",
"property",
".",
"Property",
")",
":",
"x",
"=",
"raw_properties",
"else",
":",
"x",
"=",
"[",
"property",
".",
"create_from_string",
"(",
"ps",
")",
"for",
"ps",
"in",
"raw_properties",
"]",
"# These two lines of code are optimized to the current state",
"# of the Property class. Since this function acts as the caching",
"# frontend to the PropertySet class modifying these two lines",
"# could have a severe performance penalty. Be careful.",
"# It would be faster to sort by p.id, but some projects may rely",
"# on the fact that the properties are ordered alphabetically. So,",
"# we maintain alphabetical sorting so as to maintain backward compatibility.",
"x",
"=",
"sorted",
"(",
"set",
"(",
"x",
")",
",",
"key",
"=",
"lambda",
"p",
":",
"(",
"p",
".",
"feature",
".",
"name",
",",
"p",
".",
"value",
",",
"p",
".",
"condition",
")",
")",
"key",
"=",
"tuple",
"(",
"p",
".",
"id",
"for",
"p",
"in",
"x",
")",
"if",
"key",
"not",
"in",
"__cache",
":",
"__cache",
"[",
"key",
"]",
"=",
"PropertySet",
"(",
"x",
")",
"return",
"__cache",
"[",
"key",
"]"
] | Creates a new 'PropertySet' instance for the given raw properties,
or returns an already existing one. | [
"Creates",
"a",
"new",
"PropertySet",
"instance",
"for",
"the",
"given",
"raw",
"properties",
"or",
"returns",
"an",
"already",
"existing",
"one",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/property_set.py#L36-L61 |
28,957 | apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/property_set.py | create_with_validation | def create_with_validation (raw_properties):
""" Creates new 'PropertySet' instances after checking
that all properties are valid and converting implicit
properties into gristed form.
"""
assert is_iterable_typed(raw_properties, basestring)
properties = [property.create_from_string(s) for s in raw_properties]
property.validate(properties)
return create(properties) | python | def create_with_validation (raw_properties):
""" Creates new 'PropertySet' instances after checking
that all properties are valid and converting implicit
properties into gristed form.
"""
assert is_iterable_typed(raw_properties, basestring)
properties = [property.create_from_string(s) for s in raw_properties]
property.validate(properties)
return create(properties) | [
"def",
"create_with_validation",
"(",
"raw_properties",
")",
":",
"assert",
"is_iterable_typed",
"(",
"raw_properties",
",",
"basestring",
")",
"properties",
"=",
"[",
"property",
".",
"create_from_string",
"(",
"s",
")",
"for",
"s",
"in",
"raw_properties",
"]",
"property",
".",
"validate",
"(",
"properties",
")",
"return",
"create",
"(",
"properties",
")"
] | Creates new 'PropertySet' instances after checking
that all properties are valid and converting implicit
properties into gristed form. | [
"Creates",
"new",
"PropertySet",
"instances",
"after",
"checking",
"that",
"all",
"properties",
"are",
"valid",
"and",
"converting",
"implicit",
"properties",
"into",
"gristed",
"form",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/property_set.py#L63-L72 |
28,958 | apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/property_set.py | create_from_user_input | def create_from_user_input(raw_properties, jamfile_module, location):
"""Creates a property-set from the input given by the user, in the
context of 'jamfile-module' at 'location'"""
assert is_iterable_typed(raw_properties, basestring)
assert isinstance(jamfile_module, basestring)
assert isinstance(location, basestring)
properties = property.create_from_strings(raw_properties, True)
properties = property.translate_paths(properties, location)
properties = property.translate_indirect(properties, jamfile_module)
project_id = get_manager().projects().attributeDefault(jamfile_module, 'id', None)
if not project_id:
project_id = os.path.abspath(location)
properties = property.translate_dependencies(properties, project_id, location)
properties = property.expand_subfeatures_in_conditions(properties)
return create(properties) | python | def create_from_user_input(raw_properties, jamfile_module, location):
"""Creates a property-set from the input given by the user, in the
context of 'jamfile-module' at 'location'"""
assert is_iterable_typed(raw_properties, basestring)
assert isinstance(jamfile_module, basestring)
assert isinstance(location, basestring)
properties = property.create_from_strings(raw_properties, True)
properties = property.translate_paths(properties, location)
properties = property.translate_indirect(properties, jamfile_module)
project_id = get_manager().projects().attributeDefault(jamfile_module, 'id', None)
if not project_id:
project_id = os.path.abspath(location)
properties = property.translate_dependencies(properties, project_id, location)
properties = property.expand_subfeatures_in_conditions(properties)
return create(properties) | [
"def",
"create_from_user_input",
"(",
"raw_properties",
",",
"jamfile_module",
",",
"location",
")",
":",
"assert",
"is_iterable_typed",
"(",
"raw_properties",
",",
"basestring",
")",
"assert",
"isinstance",
"(",
"jamfile_module",
",",
"basestring",
")",
"assert",
"isinstance",
"(",
"location",
",",
"basestring",
")",
"properties",
"=",
"property",
".",
"create_from_strings",
"(",
"raw_properties",
",",
"True",
")",
"properties",
"=",
"property",
".",
"translate_paths",
"(",
"properties",
",",
"location",
")",
"properties",
"=",
"property",
".",
"translate_indirect",
"(",
"properties",
",",
"jamfile_module",
")",
"project_id",
"=",
"get_manager",
"(",
")",
".",
"projects",
"(",
")",
".",
"attributeDefault",
"(",
"jamfile_module",
",",
"'id'",
",",
"None",
")",
"if",
"not",
"project_id",
":",
"project_id",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"location",
")",
"properties",
"=",
"property",
".",
"translate_dependencies",
"(",
"properties",
",",
"project_id",
",",
"location",
")",
"properties",
"=",
"property",
".",
"expand_subfeatures_in_conditions",
"(",
"properties",
")",
"return",
"create",
"(",
"properties",
")"
] | Creates a property-set from the input given by the user, in the
context of 'jamfile-module' at 'location | [
"Creates",
"a",
"property",
"-",
"set",
"from",
"the",
"input",
"given",
"by",
"the",
"user",
"in",
"the",
"context",
"of",
"jamfile",
"-",
"module",
"at",
"location"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/property_set.py#L79-L94 |
28,959 | apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/property_set.py | PropertySet.base | def base (self):
""" Returns properties that are neither incidental nor free.
"""
result = [p for p in self.lazy_properties
if not(p.feature.incidental or p.feature.free)]
result.extend(self.base_)
return result | python | def base (self):
""" Returns properties that are neither incidental nor free.
"""
result = [p for p in self.lazy_properties
if not(p.feature.incidental or p.feature.free)]
result.extend(self.base_)
return result | [
"def",
"base",
"(",
"self",
")",
":",
"result",
"=",
"[",
"p",
"for",
"p",
"in",
"self",
".",
"lazy_properties",
"if",
"not",
"(",
"p",
".",
"feature",
".",
"incidental",
"or",
"p",
".",
"feature",
".",
"free",
")",
"]",
"result",
".",
"extend",
"(",
"self",
".",
"base_",
")",
"return",
"result"
] | Returns properties that are neither incidental nor free. | [
"Returns",
"properties",
"that",
"are",
"neither",
"incidental",
"nor",
"free",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/property_set.py#L264-L270 |
28,960 | apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/property_set.py | PropertySet.free | def free (self):
""" Returns free properties which are not dependency properties.
"""
result = [p for p in self.lazy_properties
if not p.feature.incidental and p.feature.free]
result.extend(self.free_)
return result | python | def free (self):
""" Returns free properties which are not dependency properties.
"""
result = [p for p in self.lazy_properties
if not p.feature.incidental and p.feature.free]
result.extend(self.free_)
return result | [
"def",
"free",
"(",
"self",
")",
":",
"result",
"=",
"[",
"p",
"for",
"p",
"in",
"self",
".",
"lazy_properties",
"if",
"not",
"p",
".",
"feature",
".",
"incidental",
"and",
"p",
".",
"feature",
".",
"free",
"]",
"result",
".",
"extend",
"(",
"self",
".",
"free_",
")",
"return",
"result"
] | Returns free properties which are not dependency properties. | [
"Returns",
"free",
"properties",
"which",
"are",
"not",
"dependency",
"properties",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/property_set.py#L272-L278 |
28,961 | apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/property_set.py | PropertySet.dependency | def dependency (self):
""" Returns dependency properties.
"""
result = [p for p in self.lazy_properties if p.feature.dependency]
result.extend(self.dependency_)
return self.dependency_ | python | def dependency (self):
""" Returns dependency properties.
"""
result = [p for p in self.lazy_properties if p.feature.dependency]
result.extend(self.dependency_)
return self.dependency_ | [
"def",
"dependency",
"(",
"self",
")",
":",
"result",
"=",
"[",
"p",
"for",
"p",
"in",
"self",
".",
"lazy_properties",
"if",
"p",
".",
"feature",
".",
"dependency",
"]",
"result",
".",
"extend",
"(",
"self",
".",
"dependency_",
")",
"return",
"self",
".",
"dependency_"
] | Returns dependency properties. | [
"Returns",
"dependency",
"properties",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/property_set.py#L283-L288 |
28,962 | apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/property_set.py | PropertySet.non_dependency | def non_dependency (self):
""" Returns properties that are not dependencies.
"""
result = [p for p in self.lazy_properties if not p.feature.dependency]
result.extend(self.non_dependency_)
return result | python | def non_dependency (self):
""" Returns properties that are not dependencies.
"""
result = [p for p in self.lazy_properties if not p.feature.dependency]
result.extend(self.non_dependency_)
return result | [
"def",
"non_dependency",
"(",
"self",
")",
":",
"result",
"=",
"[",
"p",
"for",
"p",
"in",
"self",
".",
"lazy_properties",
"if",
"not",
"p",
".",
"feature",
".",
"dependency",
"]",
"result",
".",
"extend",
"(",
"self",
".",
"non_dependency_",
")",
"return",
"result"
] | Returns properties that are not dependencies. | [
"Returns",
"properties",
"that",
"are",
"not",
"dependencies",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/property_set.py#L290-L295 |
28,963 | apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/property_set.py | PropertySet.incidental | def incidental (self):
""" Returns incidental properties.
"""
result = [p for p in self.lazy_properties if p.feature.incidental]
result.extend(self.incidental_)
return result | python | def incidental (self):
""" Returns incidental properties.
"""
result = [p for p in self.lazy_properties if p.feature.incidental]
result.extend(self.incidental_)
return result | [
"def",
"incidental",
"(",
"self",
")",
":",
"result",
"=",
"[",
"p",
"for",
"p",
"in",
"self",
".",
"lazy_properties",
"if",
"p",
".",
"feature",
".",
"incidental",
"]",
"result",
".",
"extend",
"(",
"self",
".",
"incidental_",
")",
"return",
"result"
] | Returns incidental properties. | [
"Returns",
"incidental",
"properties",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/property_set.py#L307-L312 |
28,964 | apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/property_set.py | PropertySet.refine | def refine (self, requirements):
""" Refines this set's properties using the requirements passed as an argument.
"""
assert isinstance(requirements, PropertySet)
if requirements not in self.refined_:
r = property.refine(self.all_, requirements.all_)
self.refined_[requirements] = create(r)
return self.refined_[requirements] | python | def refine (self, requirements):
""" Refines this set's properties using the requirements passed as an argument.
"""
assert isinstance(requirements, PropertySet)
if requirements not in self.refined_:
r = property.refine(self.all_, requirements.all_)
self.refined_[requirements] = create(r)
return self.refined_[requirements] | [
"def",
"refine",
"(",
"self",
",",
"requirements",
")",
":",
"assert",
"isinstance",
"(",
"requirements",
",",
"PropertySet",
")",
"if",
"requirements",
"not",
"in",
"self",
".",
"refined_",
":",
"r",
"=",
"property",
".",
"refine",
"(",
"self",
".",
"all_",
",",
"requirements",
".",
"all_",
")",
"self",
".",
"refined_",
"[",
"requirements",
"]",
"=",
"create",
"(",
"r",
")",
"return",
"self",
".",
"refined_",
"[",
"requirements",
"]"
] | Refines this set's properties using the requirements passed as an argument. | [
"Refines",
"this",
"set",
"s",
"properties",
"using",
"the",
"requirements",
"passed",
"as",
"an",
"argument",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/property_set.py#L314-L323 |
28,965 | apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/property_set.py | PropertySet.target_path | def target_path (self):
""" Computes the target path that should be used for
target with these properties.
Returns a tuple of
- the computed path
- if the path is relative to build directory, a value of
'true'.
"""
if not self.target_path_:
# The <location> feature can be used to explicitly
# change the location of generated targets
l = self.get ('<location>')
if l:
computed = l[0]
is_relative = False
else:
p = self.as_path()
if hash_maybe:
p = hash_maybe(p)
# Really, an ugly hack. Boost regression test system requires
# specific target paths, and it seems that changing it to handle
# other directory layout is really hard. For that reason,
# we teach V2 to do the things regression system requires.
# The value o '<location-prefix>' is predended to the path.
prefix = self.get ('<location-prefix>')
if prefix:
if len (prefix) > 1:
raise AlreadyDefined ("Two <location-prefix> properties specified: '%s'" % prefix)
computed = os.path.join(prefix[0], p)
else:
computed = p
if not computed:
computed = "."
is_relative = True
self.target_path_ = (computed, is_relative)
return self.target_path_ | python | def target_path (self):
""" Computes the target path that should be used for
target with these properties.
Returns a tuple of
- the computed path
- if the path is relative to build directory, a value of
'true'.
"""
if not self.target_path_:
# The <location> feature can be used to explicitly
# change the location of generated targets
l = self.get ('<location>')
if l:
computed = l[0]
is_relative = False
else:
p = self.as_path()
if hash_maybe:
p = hash_maybe(p)
# Really, an ugly hack. Boost regression test system requires
# specific target paths, and it seems that changing it to handle
# other directory layout is really hard. For that reason,
# we teach V2 to do the things regression system requires.
# The value o '<location-prefix>' is predended to the path.
prefix = self.get ('<location-prefix>')
if prefix:
if len (prefix) > 1:
raise AlreadyDefined ("Two <location-prefix> properties specified: '%s'" % prefix)
computed = os.path.join(prefix[0], p)
else:
computed = p
if not computed:
computed = "."
is_relative = True
self.target_path_ = (computed, is_relative)
return self.target_path_ | [
"def",
"target_path",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"target_path_",
":",
"# The <location> feature can be used to explicitly",
"# change the location of generated targets",
"l",
"=",
"self",
".",
"get",
"(",
"'<location>'",
")",
"if",
"l",
":",
"computed",
"=",
"l",
"[",
"0",
"]",
"is_relative",
"=",
"False",
"else",
":",
"p",
"=",
"self",
".",
"as_path",
"(",
")",
"if",
"hash_maybe",
":",
"p",
"=",
"hash_maybe",
"(",
"p",
")",
"# Really, an ugly hack. Boost regression test system requires",
"# specific target paths, and it seems that changing it to handle",
"# other directory layout is really hard. For that reason,",
"# we teach V2 to do the things regression system requires.",
"# The value o '<location-prefix>' is predended to the path.",
"prefix",
"=",
"self",
".",
"get",
"(",
"'<location-prefix>'",
")",
"if",
"prefix",
":",
"if",
"len",
"(",
"prefix",
")",
">",
"1",
":",
"raise",
"AlreadyDefined",
"(",
"\"Two <location-prefix> properties specified: '%s'\"",
"%",
"prefix",
")",
"computed",
"=",
"os",
".",
"path",
".",
"join",
"(",
"prefix",
"[",
"0",
"]",
",",
"p",
")",
"else",
":",
"computed",
"=",
"p",
"if",
"not",
"computed",
":",
"computed",
"=",
"\".\"",
"is_relative",
"=",
"True",
"self",
".",
"target_path_",
"=",
"(",
"computed",
",",
"is_relative",
")",
"return",
"self",
".",
"target_path_"
] | Computes the target path that should be used for
target with these properties.
Returns a tuple of
- the computed path
- if the path is relative to build directory, a value of
'true'. | [
"Computes",
"the",
"target",
"path",
"that",
"should",
"be",
"used",
"for",
"target",
"with",
"these",
"properties",
".",
"Returns",
"a",
"tuple",
"of",
"-",
"the",
"computed",
"path",
"-",
"if",
"the",
"path",
"is",
"relative",
"to",
"build",
"directory",
"a",
"value",
"of",
"true",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/property_set.py#L395-L439 |
28,966 | apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/property_set.py | PropertySet.add | def add (self, ps):
""" Creates a new property set containing the properties in this one,
plus the ones of the property set passed as argument.
"""
assert isinstance(ps, PropertySet)
if ps not in self.added_:
self.added_[ps] = create(self.all_ + ps.all())
return self.added_[ps] | python | def add (self, ps):
""" Creates a new property set containing the properties in this one,
plus the ones of the property set passed as argument.
"""
assert isinstance(ps, PropertySet)
if ps not in self.added_:
self.added_[ps] = create(self.all_ + ps.all())
return self.added_[ps] | [
"def",
"add",
"(",
"self",
",",
"ps",
")",
":",
"assert",
"isinstance",
"(",
"ps",
",",
"PropertySet",
")",
"if",
"ps",
"not",
"in",
"self",
".",
"added_",
":",
"self",
".",
"added_",
"[",
"ps",
"]",
"=",
"create",
"(",
"self",
".",
"all_",
"+",
"ps",
".",
"all",
"(",
")",
")",
"return",
"self",
".",
"added_",
"[",
"ps",
"]"
] | Creates a new property set containing the properties in this one,
plus the ones of the property set passed as argument. | [
"Creates",
"a",
"new",
"property",
"set",
"containing",
"the",
"properties",
"in",
"this",
"one",
"plus",
"the",
"ones",
"of",
"the",
"property",
"set",
"passed",
"as",
"argument",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/property_set.py#L441-L448 |
28,967 | apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/property_set.py | PropertySet.get | def get (self, feature):
""" Returns all values of 'feature'.
"""
if type(feature) == type([]):
feature = feature[0]
if not isinstance(feature, b2.build.feature.Feature):
feature = b2.build.feature.get(feature)
assert isinstance(feature, b2.build.feature.Feature)
if self.feature_map_ is None:
self.feature_map_ = {}
for v in self.all_:
if v.feature not in self.feature_map_:
self.feature_map_[v.feature] = []
self.feature_map_[v.feature].append(v.value)
return self.feature_map_.get(feature, []) | python | def get (self, feature):
""" Returns all values of 'feature'.
"""
if type(feature) == type([]):
feature = feature[0]
if not isinstance(feature, b2.build.feature.Feature):
feature = b2.build.feature.get(feature)
assert isinstance(feature, b2.build.feature.Feature)
if self.feature_map_ is None:
self.feature_map_ = {}
for v in self.all_:
if v.feature not in self.feature_map_:
self.feature_map_[v.feature] = []
self.feature_map_[v.feature].append(v.value)
return self.feature_map_.get(feature, []) | [
"def",
"get",
"(",
"self",
",",
"feature",
")",
":",
"if",
"type",
"(",
"feature",
")",
"==",
"type",
"(",
"[",
"]",
")",
":",
"feature",
"=",
"feature",
"[",
"0",
"]",
"if",
"not",
"isinstance",
"(",
"feature",
",",
"b2",
".",
"build",
".",
"feature",
".",
"Feature",
")",
":",
"feature",
"=",
"b2",
".",
"build",
".",
"feature",
".",
"get",
"(",
"feature",
")",
"assert",
"isinstance",
"(",
"feature",
",",
"b2",
".",
"build",
".",
"feature",
".",
"Feature",
")",
"if",
"self",
".",
"feature_map_",
"is",
"None",
":",
"self",
".",
"feature_map_",
"=",
"{",
"}",
"for",
"v",
"in",
"self",
".",
"all_",
":",
"if",
"v",
".",
"feature",
"not",
"in",
"self",
".",
"feature_map_",
":",
"self",
".",
"feature_map_",
"[",
"v",
".",
"feature",
"]",
"=",
"[",
"]",
"self",
".",
"feature_map_",
"[",
"v",
".",
"feature",
"]",
".",
"append",
"(",
"v",
".",
"value",
")",
"return",
"self",
".",
"feature_map_",
".",
"get",
"(",
"feature",
",",
"[",
"]",
")"
] | Returns all values of 'feature'. | [
"Returns",
"all",
"values",
"of",
"feature",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/property_set.py#L457-L474 |
28,968 | apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/property_set.py | PropertySet.get_properties | def get_properties(self, feature):
"""Returns all contained properties associated with 'feature'"""
if not isinstance(feature, b2.build.feature.Feature):
feature = b2.build.feature.get(feature)
assert isinstance(feature, b2.build.feature.Feature)
result = []
for p in self.all_:
if p.feature == feature:
result.append(p)
return result | python | def get_properties(self, feature):
"""Returns all contained properties associated with 'feature'"""
if not isinstance(feature, b2.build.feature.Feature):
feature = b2.build.feature.get(feature)
assert isinstance(feature, b2.build.feature.Feature)
result = []
for p in self.all_:
if p.feature == feature:
result.append(p)
return result | [
"def",
"get_properties",
"(",
"self",
",",
"feature",
")",
":",
"if",
"not",
"isinstance",
"(",
"feature",
",",
"b2",
".",
"build",
".",
"feature",
".",
"Feature",
")",
":",
"feature",
"=",
"b2",
".",
"build",
".",
"feature",
".",
"get",
"(",
"feature",
")",
"assert",
"isinstance",
"(",
"feature",
",",
"b2",
".",
"build",
".",
"feature",
".",
"Feature",
")",
"result",
"=",
"[",
"]",
"for",
"p",
"in",
"self",
".",
"all_",
":",
"if",
"p",
".",
"feature",
"==",
"feature",
":",
"result",
".",
"append",
"(",
"p",
")",
"return",
"result"
] | Returns all contained properties associated with 'feature | [
"Returns",
"all",
"contained",
"properties",
"associated",
"with",
"feature"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/property_set.py#L477-L487 |
28,969 | apple/turicreate | src/unity/python/turicreate/toolkits/recommender/util.py | _create | def _create(observation_data,
user_id='user_id', item_id='item_id', target=None,
user_data=None, item_data=None,
ranking=True,
verbose=True):
"""
A unified interface for training recommender models. Based on simple
characteristics of the data, a type of model is selected and trained. The
trained model can be used to predict ratings and make recommendations.
To use specific options of a desired model, use the ``create`` function
of the corresponding model.
Parameters
----------
observation_data : SFrame
The dataset to use for training the model. It must contain a column of
user ids and a column of item ids. Each row represents an observed
interaction between the user and the item. The (user, item) pairs
are stored with the model so that they can later be excluded from
recommendations if desired. It can optionally contain a target ratings
column. All other columns are interpreted by the underlying model as
side features for the observations.
The user id and item id columns must be of type 'int' or 'str'. The
target column must be of type 'int' or 'float'.
user_id : string, optional
The name of the column in `observation_data` that corresponds to the
user id.
item_id : string, optional
The name of the column in `observation_data` that corresponds to the
item id.
target : string, optional
Name of the column in `observation_data` containing ratings given by
users to items, if applicable.
user_data : SFrame, optional
Side information for the users. This SFrame must have a column with the
same name as what is specified by the `user_id` input parameter.
`user_data` can provide any amount of additional user-specific
information.
item_data : SFrame, optional
Side information for the items. This SFrame must have a column with the
same name as what is specified by the `item_id` input parameter.
`item_data` can provide any amount of additional item-specific
information.
ranking : bool, optional
Determine whether or not the goal is to rank items for each user.
verbose : bool, optional
Enables verbose output.
Returns
-------
out : A trained model.
- If a target column is given, then
:class:`turicreate.recommender.factorization_recommender.FactorizationRecommender`.
- If no target column is given, then
:class:`turicreate.recommender.item_similarity_recommender.ItemSimilarityRecommender`.
Examples
--------
**Basic usage**
Given basic user-item observation data, an
:class:`~turicreate.recommender.item_similarity_recommender.ItemSimilarityRecommender` is created:
>>> sf = turicreate.SFrame({'user_id': ['0', '0', '0', '1', '1', '2', '2', '2'],
... 'item_id': ['a', 'b', 'c', 'a', 'b', 'b', 'c', 'd']})
>>> m = turicreate.recommender.create(sf)
>>> recs = m.recommend()
**Creating a model for ratings data**
This trains a :class:`~turicreate.recommender.factorization_recommender.FactorizationRecommender` that
can predict target ratings:
>>> sf2 = turicreate.SFrame({'user_id': ['0', '0', '0', '1', '1', '2', '2', '2'],
... 'item_id': ['a', 'b', 'c', 'a', 'b', 'b', 'c', 'd'],
... 'rating': [1, 3, 2, 5, 4, 1, 4, 3]})
>>> m2 = turicreate.recommender.create(sf2, target="rating", ranking = False)
**Creating specific models**
Specific models allow for a number of additional options during create.
The available recommenders are all in the turicreate.recommender namespace.
For the complete list of acceptable options, please refer to the documentation
for individual models. Such options can be passed to the underlying model
just like any other parameter. For example, the following code creates
an :class:`~turicreate.recommender.ItemSimilarityRecommender` with a space-saving
option called `only_top_k`. The returned model stores only the 2 most
similar items for item:
>>> from turicreate.recommender import item_similarity_recommender
>>> item_similarity_recommender.create(sf, only_top_k=2)
"""
if not (isinstance(observation_data, _SFrame)):
raise TypeError('observation_data input must be a SFrame')
side_data = (user_data is not None) or (item_data is not None)
if user_data is not None:
if not isinstance(user_data, _SFrame):
raise TypeError('Provided user_data must be an SFrame.')
if item_data is not None:
if not isinstance(item_data, _SFrame):
raise TypeError('Provided item_data must be an SFrame.')
if target is None:
if ranking:
if side_data:
method = 'ranking_factorization_recommender'
else:
method = 'item_similarity'
else:
if side_data:
method = 'ranking_factorization_recommender'
else:
method = 'item_similarity'
else:
if ranking:
if side_data:
method = 'ranking_factorization_recommender'
else:
method = 'ranking_factorization_recommender'
else:
if side_data:
method = 'factorization_recommender'
else:
method = 'factorization_recommender'
opts = {'observation_data': observation_data,
'user_id': user_id,
'item_id': item_id,
'target': target,
'user_data': user_data,
'item_data': item_data}
if method == "item_similarity":
return _turicreate.recommender.item_similarity_recommender.create(**opts)
elif method == "factorization_recommender":
return _turicreate.recommender.factorization_recommender.create(**opts)
elif method == "ranking_factorization_recommender":
return _turicreate.recommender.ranking_factorization_recommender.create(**opts)
else:
raise RuntimeError("Provided method not recognized.") | python | def _create(observation_data,
user_id='user_id', item_id='item_id', target=None,
user_data=None, item_data=None,
ranking=True,
verbose=True):
"""
A unified interface for training recommender models. Based on simple
characteristics of the data, a type of model is selected and trained. The
trained model can be used to predict ratings and make recommendations.
To use specific options of a desired model, use the ``create`` function
of the corresponding model.
Parameters
----------
observation_data : SFrame
The dataset to use for training the model. It must contain a column of
user ids and a column of item ids. Each row represents an observed
interaction between the user and the item. The (user, item) pairs
are stored with the model so that they can later be excluded from
recommendations if desired. It can optionally contain a target ratings
column. All other columns are interpreted by the underlying model as
side features for the observations.
The user id and item id columns must be of type 'int' or 'str'. The
target column must be of type 'int' or 'float'.
user_id : string, optional
The name of the column in `observation_data` that corresponds to the
user id.
item_id : string, optional
The name of the column in `observation_data` that corresponds to the
item id.
target : string, optional
Name of the column in `observation_data` containing ratings given by
users to items, if applicable.
user_data : SFrame, optional
Side information for the users. This SFrame must have a column with the
same name as what is specified by the `user_id` input parameter.
`user_data` can provide any amount of additional user-specific
information.
item_data : SFrame, optional
Side information for the items. This SFrame must have a column with the
same name as what is specified by the `item_id` input parameter.
`item_data` can provide any amount of additional item-specific
information.
ranking : bool, optional
Determine whether or not the goal is to rank items for each user.
verbose : bool, optional
Enables verbose output.
Returns
-------
out : A trained model.
- If a target column is given, then
:class:`turicreate.recommender.factorization_recommender.FactorizationRecommender`.
- If no target column is given, then
:class:`turicreate.recommender.item_similarity_recommender.ItemSimilarityRecommender`.
Examples
--------
**Basic usage**
Given basic user-item observation data, an
:class:`~turicreate.recommender.item_similarity_recommender.ItemSimilarityRecommender` is created:
>>> sf = turicreate.SFrame({'user_id': ['0', '0', '0', '1', '1', '2', '2', '2'],
... 'item_id': ['a', 'b', 'c', 'a', 'b', 'b', 'c', 'd']})
>>> m = turicreate.recommender.create(sf)
>>> recs = m.recommend()
**Creating a model for ratings data**
This trains a :class:`~turicreate.recommender.factorization_recommender.FactorizationRecommender` that
can predict target ratings:
>>> sf2 = turicreate.SFrame({'user_id': ['0', '0', '0', '1', '1', '2', '2', '2'],
... 'item_id': ['a', 'b', 'c', 'a', 'b', 'b', 'c', 'd'],
... 'rating': [1, 3, 2, 5, 4, 1, 4, 3]})
>>> m2 = turicreate.recommender.create(sf2, target="rating", ranking = False)
**Creating specific models**
Specific models allow for a number of additional options during create.
The available recommenders are all in the turicreate.recommender namespace.
For the complete list of acceptable options, please refer to the documentation
for individual models. Such options can be passed to the underlying model
just like any other parameter. For example, the following code creates
an :class:`~turicreate.recommender.ItemSimilarityRecommender` with a space-saving
option called `only_top_k`. The returned model stores only the 2 most
similar items for item:
>>> from turicreate.recommender import item_similarity_recommender
>>> item_similarity_recommender.create(sf, only_top_k=2)
"""
if not (isinstance(observation_data, _SFrame)):
raise TypeError('observation_data input must be a SFrame')
side_data = (user_data is not None) or (item_data is not None)
if user_data is not None:
if not isinstance(user_data, _SFrame):
raise TypeError('Provided user_data must be an SFrame.')
if item_data is not None:
if not isinstance(item_data, _SFrame):
raise TypeError('Provided item_data must be an SFrame.')
if target is None:
if ranking:
if side_data:
method = 'ranking_factorization_recommender'
else:
method = 'item_similarity'
else:
if side_data:
method = 'ranking_factorization_recommender'
else:
method = 'item_similarity'
else:
if ranking:
if side_data:
method = 'ranking_factorization_recommender'
else:
method = 'ranking_factorization_recommender'
else:
if side_data:
method = 'factorization_recommender'
else:
method = 'factorization_recommender'
opts = {'observation_data': observation_data,
'user_id': user_id,
'item_id': item_id,
'target': target,
'user_data': user_data,
'item_data': item_data}
if method == "item_similarity":
return _turicreate.recommender.item_similarity_recommender.create(**opts)
elif method == "factorization_recommender":
return _turicreate.recommender.factorization_recommender.create(**opts)
elif method == "ranking_factorization_recommender":
return _turicreate.recommender.ranking_factorization_recommender.create(**opts)
else:
raise RuntimeError("Provided method not recognized.") | [
"def",
"_create",
"(",
"observation_data",
",",
"user_id",
"=",
"'user_id'",
",",
"item_id",
"=",
"'item_id'",
",",
"target",
"=",
"None",
",",
"user_data",
"=",
"None",
",",
"item_data",
"=",
"None",
",",
"ranking",
"=",
"True",
",",
"verbose",
"=",
"True",
")",
":",
"if",
"not",
"(",
"isinstance",
"(",
"observation_data",
",",
"_SFrame",
")",
")",
":",
"raise",
"TypeError",
"(",
"'observation_data input must be a SFrame'",
")",
"side_data",
"=",
"(",
"user_data",
"is",
"not",
"None",
")",
"or",
"(",
"item_data",
"is",
"not",
"None",
")",
"if",
"user_data",
"is",
"not",
"None",
":",
"if",
"not",
"isinstance",
"(",
"user_data",
",",
"_SFrame",
")",
":",
"raise",
"TypeError",
"(",
"'Provided user_data must be an SFrame.'",
")",
"if",
"item_data",
"is",
"not",
"None",
":",
"if",
"not",
"isinstance",
"(",
"item_data",
",",
"_SFrame",
")",
":",
"raise",
"TypeError",
"(",
"'Provided item_data must be an SFrame.'",
")",
"if",
"target",
"is",
"None",
":",
"if",
"ranking",
":",
"if",
"side_data",
":",
"method",
"=",
"'ranking_factorization_recommender'",
"else",
":",
"method",
"=",
"'item_similarity'",
"else",
":",
"if",
"side_data",
":",
"method",
"=",
"'ranking_factorization_recommender'",
"else",
":",
"method",
"=",
"'item_similarity'",
"else",
":",
"if",
"ranking",
":",
"if",
"side_data",
":",
"method",
"=",
"'ranking_factorization_recommender'",
"else",
":",
"method",
"=",
"'ranking_factorization_recommender'",
"else",
":",
"if",
"side_data",
":",
"method",
"=",
"'factorization_recommender'",
"else",
":",
"method",
"=",
"'factorization_recommender'",
"opts",
"=",
"{",
"'observation_data'",
":",
"observation_data",
",",
"'user_id'",
":",
"user_id",
",",
"'item_id'",
":",
"item_id",
",",
"'target'",
":",
"target",
",",
"'user_data'",
":",
"user_data",
",",
"'item_data'",
":",
"item_data",
"}",
"if",
"method",
"==",
"\"item_similarity\"",
":",
"return",
"_turicreate",
".",
"recommender",
".",
"item_similarity_recommender",
".",
"create",
"(",
"*",
"*",
"opts",
")",
"elif",
"method",
"==",
"\"factorization_recommender\"",
":",
"return",
"_turicreate",
".",
"recommender",
".",
"factorization_recommender",
".",
"create",
"(",
"*",
"*",
"opts",
")",
"elif",
"method",
"==",
"\"ranking_factorization_recommender\"",
":",
"return",
"_turicreate",
".",
"recommender",
".",
"ranking_factorization_recommender",
".",
"create",
"(",
"*",
"*",
"opts",
")",
"else",
":",
"raise",
"RuntimeError",
"(",
"\"Provided method not recognized.\"",
")"
] | A unified interface for training recommender models. Based on simple
characteristics of the data, a type of model is selected and trained. The
trained model can be used to predict ratings and make recommendations.
To use specific options of a desired model, use the ``create`` function
of the corresponding model.
Parameters
----------
observation_data : SFrame
The dataset to use for training the model. It must contain a column of
user ids and a column of item ids. Each row represents an observed
interaction between the user and the item. The (user, item) pairs
are stored with the model so that they can later be excluded from
recommendations if desired. It can optionally contain a target ratings
column. All other columns are interpreted by the underlying model as
side features for the observations.
The user id and item id columns must be of type 'int' or 'str'. The
target column must be of type 'int' or 'float'.
user_id : string, optional
The name of the column in `observation_data` that corresponds to the
user id.
item_id : string, optional
The name of the column in `observation_data` that corresponds to the
item id.
target : string, optional
Name of the column in `observation_data` containing ratings given by
users to items, if applicable.
user_data : SFrame, optional
Side information for the users. This SFrame must have a column with the
same name as what is specified by the `user_id` input parameter.
`user_data` can provide any amount of additional user-specific
information.
item_data : SFrame, optional
Side information for the items. This SFrame must have a column with the
same name as what is specified by the `item_id` input parameter.
`item_data` can provide any amount of additional item-specific
information.
ranking : bool, optional
Determine whether or not the goal is to rank items for each user.
verbose : bool, optional
Enables verbose output.
Returns
-------
out : A trained model.
- If a target column is given, then
:class:`turicreate.recommender.factorization_recommender.FactorizationRecommender`.
- If no target column is given, then
:class:`turicreate.recommender.item_similarity_recommender.ItemSimilarityRecommender`.
Examples
--------
**Basic usage**
Given basic user-item observation data, an
:class:`~turicreate.recommender.item_similarity_recommender.ItemSimilarityRecommender` is created:
>>> sf = turicreate.SFrame({'user_id': ['0', '0', '0', '1', '1', '2', '2', '2'],
... 'item_id': ['a', 'b', 'c', 'a', 'b', 'b', 'c', 'd']})
>>> m = turicreate.recommender.create(sf)
>>> recs = m.recommend()
**Creating a model for ratings data**
This trains a :class:`~turicreate.recommender.factorization_recommender.FactorizationRecommender` that
can predict target ratings:
>>> sf2 = turicreate.SFrame({'user_id': ['0', '0', '0', '1', '1', '2', '2', '2'],
... 'item_id': ['a', 'b', 'c', 'a', 'b', 'b', 'c', 'd'],
... 'rating': [1, 3, 2, 5, 4, 1, 4, 3]})
>>> m2 = turicreate.recommender.create(sf2, target="rating", ranking = False)
**Creating specific models**
Specific models allow for a number of additional options during create.
The available recommenders are all in the turicreate.recommender namespace.
For the complete list of acceptable options, please refer to the documentation
for individual models. Such options can be passed to the underlying model
just like any other parameter. For example, the following code creates
an :class:`~turicreate.recommender.ItemSimilarityRecommender` with a space-saving
option called `only_top_k`. The returned model stores only the 2 most
similar items for item:
>>> from turicreate.recommender import item_similarity_recommender
>>> item_similarity_recommender.create(sf, only_top_k=2) | [
"A",
"unified",
"interface",
"for",
"training",
"recommender",
"models",
".",
"Based",
"on",
"simple",
"characteristics",
"of",
"the",
"data",
"a",
"type",
"of",
"model",
"is",
"selected",
"and",
"trained",
".",
"The",
"trained",
"model",
"can",
"be",
"used",
"to",
"predict",
"ratings",
"and",
"make",
"recommendations",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/recommender/util.py#L24-L175 |
28,970 | apple/turicreate | src/unity/python/turicreate/toolkits/recommender/util.py | compare_models | def compare_models(dataset, models, model_names=None, user_sample=1.0,
metric='auto',
target=None,
exclude_known_for_precision_recall=True,
make_plot=False,
verbose=True,
**kwargs):
"""
Compare the prediction or recommendation performance of recommender models
on a common test dataset.
Models that are trained to predict ratings are compared separately from
models that are trained without target ratings. The ratings prediction
models are compared on root-mean-squared error, and the rest are compared on
precision-recall.
Parameters
----------
dataset : SFrame
The dataset to use for model evaluation.
models : list[recommender models]
List of trained recommender models.
model_names : list[str], optional
List of model name strings for display.
user_sample : float, optional
Sampling proportion of unique users to use in estimating model
performance. Defaults to 1.0, i.e. use all users in the dataset.
metric : str, {'auto', 'rmse', 'precision_recall'}, optional
Metric for the evaluation. The default automatically splits
models into two groups with their default evaluation metric respectively:
'rmse' for models trained with a target, and 'precision_recall'
otherwise.
target : str, optional
The name of the target column for evaluating rmse. If the model is
trained with a target column, the default is to using the same column.
If the model is trained without a target column and `metric='rmse'`,
then this option must be provided by user.
exclude_known_for_precision_recall : bool, optional
A useful option when `metric='precision_recall'`. Recommender models
automatically exclude items seen in the training data from the
final recommendation list. If the input evaluation `dataset` is the
same as the data used for training the models, set this option to False.
verbose : bool, optional
If true, print the progress.
Returns
-------
out : list[SFrame]
A list of results where each one is an sframe of evaluation results of
the respective model on the given dataset
Examples
--------
If you have created two ItemSimilarityRecommenders ``m1`` and ``m2`` and have
an :class:`~turicreate.SFrame` ``test_data``, then you may compare the
performance of the two models on test data using:
>>> import turicreate
>>> train_data = turicreate.SFrame({'user_id': ["0", "0", "0", "1", "1", "2", "2", "2"],
... 'item_id': ["a", "c", "e", "b", "f", "b", "c", "d"]})
>>> test_data = turicreate.SFrame({'user_id': ["0", "0", "1", "1", "1", "2", "2"],
... 'item_id': ["b", "d", "a", "c", "e", "a", "e"]})
>>> m1 = turicreate.item_similarity_recommender.create(train_data)
>>> m2 = turicreate.item_similarity_recommender.create(train_data, only_top_k=1)
>>> turicreate.recommender.util.compare_models(test_data, [m1, m2], model_names=["m1", "m2"])
The evaluation metric is automatically set to 'precision_recall', and the
evaluation will be based on recommendations that exclude items seen in the
training data.
If you want to evaluate on the original training set:
>>> turicreate.recommender.util.compare_models(train_data, [m1, m2],
... exclude_known_for_precision_recall=False)
Suppose you have four models, two trained with a target rating column, and
the other two trained without a target. By default, the models are put into
two different groups with "rmse", and "precision-recall" as the evaluation
metric respectively.
>>> train_data2 = turicreate.SFrame({'user_id': ["0", "0", "0", "1", "1", "2", "2", "2"],
... 'item_id': ["a", "c", "e", "b", "f", "b", "c", "d"],
... 'rating': [1, 3, 4, 5, 3, 4, 2, 5]})
>>> test_data2 = turicreate.SFrame({'user_id': ["0", "0", "1", "1", "1", "2", "2"],
... 'item_id': ["b", "d", "a", "c", "e", "a", "e"],
... 'rating': [3, 5, 4, 4, 3, 5, 2]})
>>> m3 = turicreate.factorization_recommender.create(train_data2, target='rating')
>>> m4 = turicreate.factorization_recommender.create(train_data2, target='rating')
>>> turicreate.recommender.util.compare_models(test_data2, [m3, m4])
To compare all four models using the same 'precision_recall' metric, you can
do:
>>> turicreate.recommender.util.compare_models(test_data2, [m1, m2, m3, m4],
... metric='precision_recall')
"""
num_models = len(models)
if model_names is None:
model_names = ['M' + str(i) for i in range(len(models))]
if num_models < 1:
raise ValueError("Must pass in at least one recommender model to \
evaluate")
if model_names is not None and len(model_names) != num_models:
raise ValueError("Must pass in the same number of model names as \
models")
# if we are asked to sample the users, come up with a list of unique users
if user_sample < 1.0:
user_id_name = models[0].user_id
if user_id_name is None:
raise ValueError("user_id not set in model(s)")
user_sa = dataset[user_id_name]
unique_users = list(user_sa.unique())
nusers = len(unique_users)
ntake = int(round(user_sample * nusers))
_random.shuffle(unique_users)
users = unique_users[:ntake]
print("compare_models: using", ntake, "users to estimate model performance")
users = frozenset(users)
ix = [u in users for u in dataset[user_id_name]]
dataset_subset = dataset[_SArray(ix) == True]
else:
dataset_subset = dataset
results = []
for (m, mname) in zip(models, model_names):
if verbose:
print('PROGRESS: Evaluate model %s' % mname)
r = m.evaluate(dataset_subset,
metric,
exclude_known_for_precision_recall,
target,
verbose=verbose,
cutoffs=list(range(1,11,1))+list(range(11,50,5)),
**kwargs)
results.append(r)
return results | python | def compare_models(dataset, models, model_names=None, user_sample=1.0,
metric='auto',
target=None,
exclude_known_for_precision_recall=True,
make_plot=False,
verbose=True,
**kwargs):
"""
Compare the prediction or recommendation performance of recommender models
on a common test dataset.
Models that are trained to predict ratings are compared separately from
models that are trained without target ratings. The ratings prediction
models are compared on root-mean-squared error, and the rest are compared on
precision-recall.
Parameters
----------
dataset : SFrame
The dataset to use for model evaluation.
models : list[recommender models]
List of trained recommender models.
model_names : list[str], optional
List of model name strings for display.
user_sample : float, optional
Sampling proportion of unique users to use in estimating model
performance. Defaults to 1.0, i.e. use all users in the dataset.
metric : str, {'auto', 'rmse', 'precision_recall'}, optional
Metric for the evaluation. The default automatically splits
models into two groups with their default evaluation metric respectively:
'rmse' for models trained with a target, and 'precision_recall'
otherwise.
target : str, optional
The name of the target column for evaluating rmse. If the model is
trained with a target column, the default is to using the same column.
If the model is trained without a target column and `metric='rmse'`,
then this option must be provided by user.
exclude_known_for_precision_recall : bool, optional
A useful option when `metric='precision_recall'`. Recommender models
automatically exclude items seen in the training data from the
final recommendation list. If the input evaluation `dataset` is the
same as the data used for training the models, set this option to False.
verbose : bool, optional
If true, print the progress.
Returns
-------
out : list[SFrame]
A list of results where each one is an sframe of evaluation results of
the respective model on the given dataset
Examples
--------
If you have created two ItemSimilarityRecommenders ``m1`` and ``m2`` and have
an :class:`~turicreate.SFrame` ``test_data``, then you may compare the
performance of the two models on test data using:
>>> import turicreate
>>> train_data = turicreate.SFrame({'user_id': ["0", "0", "0", "1", "1", "2", "2", "2"],
... 'item_id': ["a", "c", "e", "b", "f", "b", "c", "d"]})
>>> test_data = turicreate.SFrame({'user_id': ["0", "0", "1", "1", "1", "2", "2"],
... 'item_id': ["b", "d", "a", "c", "e", "a", "e"]})
>>> m1 = turicreate.item_similarity_recommender.create(train_data)
>>> m2 = turicreate.item_similarity_recommender.create(train_data, only_top_k=1)
>>> turicreate.recommender.util.compare_models(test_data, [m1, m2], model_names=["m1", "m2"])
The evaluation metric is automatically set to 'precision_recall', and the
evaluation will be based on recommendations that exclude items seen in the
training data.
If you want to evaluate on the original training set:
>>> turicreate.recommender.util.compare_models(train_data, [m1, m2],
... exclude_known_for_precision_recall=False)
Suppose you have four models, two trained with a target rating column, and
the other two trained without a target. By default, the models are put into
two different groups with "rmse", and "precision-recall" as the evaluation
metric respectively.
>>> train_data2 = turicreate.SFrame({'user_id': ["0", "0", "0", "1", "1", "2", "2", "2"],
... 'item_id': ["a", "c", "e", "b", "f", "b", "c", "d"],
... 'rating': [1, 3, 4, 5, 3, 4, 2, 5]})
>>> test_data2 = turicreate.SFrame({'user_id': ["0", "0", "1", "1", "1", "2", "2"],
... 'item_id': ["b", "d", "a", "c", "e", "a", "e"],
... 'rating': [3, 5, 4, 4, 3, 5, 2]})
>>> m3 = turicreate.factorization_recommender.create(train_data2, target='rating')
>>> m4 = turicreate.factorization_recommender.create(train_data2, target='rating')
>>> turicreate.recommender.util.compare_models(test_data2, [m3, m4])
To compare all four models using the same 'precision_recall' metric, you can
do:
>>> turicreate.recommender.util.compare_models(test_data2, [m1, m2, m3, m4],
... metric='precision_recall')
"""
num_models = len(models)
if model_names is None:
model_names = ['M' + str(i) for i in range(len(models))]
if num_models < 1:
raise ValueError("Must pass in at least one recommender model to \
evaluate")
if model_names is not None and len(model_names) != num_models:
raise ValueError("Must pass in the same number of model names as \
models")
# if we are asked to sample the users, come up with a list of unique users
if user_sample < 1.0:
user_id_name = models[0].user_id
if user_id_name is None:
raise ValueError("user_id not set in model(s)")
user_sa = dataset[user_id_name]
unique_users = list(user_sa.unique())
nusers = len(unique_users)
ntake = int(round(user_sample * nusers))
_random.shuffle(unique_users)
users = unique_users[:ntake]
print("compare_models: using", ntake, "users to estimate model performance")
users = frozenset(users)
ix = [u in users for u in dataset[user_id_name]]
dataset_subset = dataset[_SArray(ix) == True]
else:
dataset_subset = dataset
results = []
for (m, mname) in zip(models, model_names):
if verbose:
print('PROGRESS: Evaluate model %s' % mname)
r = m.evaluate(dataset_subset,
metric,
exclude_known_for_precision_recall,
target,
verbose=verbose,
cutoffs=list(range(1,11,1))+list(range(11,50,5)),
**kwargs)
results.append(r)
return results | [
"def",
"compare_models",
"(",
"dataset",
",",
"models",
",",
"model_names",
"=",
"None",
",",
"user_sample",
"=",
"1.0",
",",
"metric",
"=",
"'auto'",
",",
"target",
"=",
"None",
",",
"exclude_known_for_precision_recall",
"=",
"True",
",",
"make_plot",
"=",
"False",
",",
"verbose",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"num_models",
"=",
"len",
"(",
"models",
")",
"if",
"model_names",
"is",
"None",
":",
"model_names",
"=",
"[",
"'M'",
"+",
"str",
"(",
"i",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"models",
")",
")",
"]",
"if",
"num_models",
"<",
"1",
":",
"raise",
"ValueError",
"(",
"\"Must pass in at least one recommender model to \\\n evaluate\"",
")",
"if",
"model_names",
"is",
"not",
"None",
"and",
"len",
"(",
"model_names",
")",
"!=",
"num_models",
":",
"raise",
"ValueError",
"(",
"\"Must pass in the same number of model names as \\\n models\"",
")",
"# if we are asked to sample the users, come up with a list of unique users",
"if",
"user_sample",
"<",
"1.0",
":",
"user_id_name",
"=",
"models",
"[",
"0",
"]",
".",
"user_id",
"if",
"user_id_name",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"user_id not set in model(s)\"",
")",
"user_sa",
"=",
"dataset",
"[",
"user_id_name",
"]",
"unique_users",
"=",
"list",
"(",
"user_sa",
".",
"unique",
"(",
")",
")",
"nusers",
"=",
"len",
"(",
"unique_users",
")",
"ntake",
"=",
"int",
"(",
"round",
"(",
"user_sample",
"*",
"nusers",
")",
")",
"_random",
".",
"shuffle",
"(",
"unique_users",
")",
"users",
"=",
"unique_users",
"[",
":",
"ntake",
"]",
"print",
"(",
"\"compare_models: using\"",
",",
"ntake",
",",
"\"users to estimate model performance\"",
")",
"users",
"=",
"frozenset",
"(",
"users",
")",
"ix",
"=",
"[",
"u",
"in",
"users",
"for",
"u",
"in",
"dataset",
"[",
"user_id_name",
"]",
"]",
"dataset_subset",
"=",
"dataset",
"[",
"_SArray",
"(",
"ix",
")",
"==",
"True",
"]",
"else",
":",
"dataset_subset",
"=",
"dataset",
"results",
"=",
"[",
"]",
"for",
"(",
"m",
",",
"mname",
")",
"in",
"zip",
"(",
"models",
",",
"model_names",
")",
":",
"if",
"verbose",
":",
"print",
"(",
"'PROGRESS: Evaluate model %s'",
"%",
"mname",
")",
"r",
"=",
"m",
".",
"evaluate",
"(",
"dataset_subset",
",",
"metric",
",",
"exclude_known_for_precision_recall",
",",
"target",
",",
"verbose",
"=",
"verbose",
",",
"cutoffs",
"=",
"list",
"(",
"range",
"(",
"1",
",",
"11",
",",
"1",
")",
")",
"+",
"list",
"(",
"range",
"(",
"11",
",",
"50",
",",
"5",
")",
")",
",",
"*",
"*",
"kwargs",
")",
"results",
".",
"append",
"(",
"r",
")",
"return",
"results"
] | Compare the prediction or recommendation performance of recommender models
on a common test dataset.
Models that are trained to predict ratings are compared separately from
models that are trained without target ratings. The ratings prediction
models are compared on root-mean-squared error, and the rest are compared on
precision-recall.
Parameters
----------
dataset : SFrame
The dataset to use for model evaluation.
models : list[recommender models]
List of trained recommender models.
model_names : list[str], optional
List of model name strings for display.
user_sample : float, optional
Sampling proportion of unique users to use in estimating model
performance. Defaults to 1.0, i.e. use all users in the dataset.
metric : str, {'auto', 'rmse', 'precision_recall'}, optional
Metric for the evaluation. The default automatically splits
models into two groups with their default evaluation metric respectively:
'rmse' for models trained with a target, and 'precision_recall'
otherwise.
target : str, optional
The name of the target column for evaluating rmse. If the model is
trained with a target column, the default is to using the same column.
If the model is trained without a target column and `metric='rmse'`,
then this option must be provided by user.
exclude_known_for_precision_recall : bool, optional
A useful option when `metric='precision_recall'`. Recommender models
automatically exclude items seen in the training data from the
final recommendation list. If the input evaluation `dataset` is the
same as the data used for training the models, set this option to False.
verbose : bool, optional
If true, print the progress.
Returns
-------
out : list[SFrame]
A list of results where each one is an sframe of evaluation results of
the respective model on the given dataset
Examples
--------
If you have created two ItemSimilarityRecommenders ``m1`` and ``m2`` and have
an :class:`~turicreate.SFrame` ``test_data``, then you may compare the
performance of the two models on test data using:
>>> import turicreate
>>> train_data = turicreate.SFrame({'user_id': ["0", "0", "0", "1", "1", "2", "2", "2"],
... 'item_id': ["a", "c", "e", "b", "f", "b", "c", "d"]})
>>> test_data = turicreate.SFrame({'user_id': ["0", "0", "1", "1", "1", "2", "2"],
... 'item_id': ["b", "d", "a", "c", "e", "a", "e"]})
>>> m1 = turicreate.item_similarity_recommender.create(train_data)
>>> m2 = turicreate.item_similarity_recommender.create(train_data, only_top_k=1)
>>> turicreate.recommender.util.compare_models(test_data, [m1, m2], model_names=["m1", "m2"])
The evaluation metric is automatically set to 'precision_recall', and the
evaluation will be based on recommendations that exclude items seen in the
training data.
If you want to evaluate on the original training set:
>>> turicreate.recommender.util.compare_models(train_data, [m1, m2],
... exclude_known_for_precision_recall=False)
Suppose you have four models, two trained with a target rating column, and
the other two trained without a target. By default, the models are put into
two different groups with "rmse", and "precision-recall" as the evaluation
metric respectively.
>>> train_data2 = turicreate.SFrame({'user_id': ["0", "0", "0", "1", "1", "2", "2", "2"],
... 'item_id': ["a", "c", "e", "b", "f", "b", "c", "d"],
... 'rating': [1, 3, 4, 5, 3, 4, 2, 5]})
>>> test_data2 = turicreate.SFrame({'user_id': ["0", "0", "1", "1", "1", "2", "2"],
... 'item_id': ["b", "d", "a", "c", "e", "a", "e"],
... 'rating': [3, 5, 4, 4, 3, 5, 2]})
>>> m3 = turicreate.factorization_recommender.create(train_data2, target='rating')
>>> m4 = turicreate.factorization_recommender.create(train_data2, target='rating')
>>> turicreate.recommender.util.compare_models(test_data2, [m3, m4])
To compare all four models using the same 'precision_recall' metric, you can
do:
>>> turicreate.recommender.util.compare_models(test_data2, [m1, m2, m3, m4],
... metric='precision_recall') | [
"Compare",
"the",
"prediction",
"or",
"recommendation",
"performance",
"of",
"recommender",
"models",
"on",
"a",
"common",
"test",
"dataset",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/recommender/util.py#L177-L328 |
28,971 | apple/turicreate | src/unity/python/turicreate/toolkits/recommender/util.py | precision_recall_by_user | def precision_recall_by_user(observed_user_items,
recommendations,
cutoffs=[10]):
"""
Compute precision and recall at a given cutoff for each user. In information
retrieval terms, precision represents the ratio of relevant, retrieved items
to the number of relevant items. Recall represents the ratio of relevant,
retrieved items to the number of relevant items.
Let :math:`p_k` be a vector of the first :math:`k` elements in the
recommendations for a particular user, and let :math:`a` be the set of items
in ``observed_user_items`` for that user. The "precision at cutoff k" for
this user is defined as
.. math::
P(k) = \\frac{ | a \cap p_k | }{k},
while "recall at cutoff k" is defined as
.. math::
R(k) = \\frac{ | a \cap p_k | }{|a|}
The order of the elements in the recommendations affects the returned
precision and recall scores.
Parameters
----------
observed_user_items : SFrame
An SFrame containing observed user item pairs, where the first
column contains user ids and the second column contains item ids.
recommendations : SFrame
An SFrame containing columns pertaining to the user id, the item id,
the score given to that pair, and the rank of that item among the
recommendations made for user id. For example, see the output of
recommend() produced by any turicreate.recommender model.
cutoffs : list[int], optional
The cutoffs to use when computing precision and recall.
Returns
-------
out : SFrame
An SFrame containing columns user id, cutoff, precision, recall, and
count where the precision and recall are reported for each user at
each requested cutoff, and count is the number of observations for
that user id.
Notes
-----
The corner cases that involve empty lists were chosen to be consistent
with the feasible set of precision-recall curves, which start at
(precision, recall) = (1,0) and end at (0,1). However, we do not believe
there is a well-known consensus on this choice.
Examples
--------
Given SFrames ``train_data`` and ``test_data`` with columns user_id
and item_id:
>>> from turicreate.toolkits.recommender.util import precision_recall_by_user
>>> m = turicreate.recommender.create(train_data)
>>> recs = m.recommend()
>>> precision_recall_by_user(test_data, recs, cutoffs=[5, 10])
"""
assert type(observed_user_items) == _SFrame
assert type(recommendations) == _SFrame
assert type(cutoffs) == list
assert min(cutoffs) > 0, "All cutoffs must be positive integers."
assert recommendations.num_columns() >= 2
user_id = recommendations.column_names()[0]
item_id = recommendations.column_names()[1]
assert observed_user_items.num_rows() > 0, \
"Evaluating precision and recall requires a non-empty " + \
"observed_user_items."
assert user_id in observed_user_items.column_names(), \
"User column required in observed_user_items."
assert item_id in observed_user_items.column_names(), \
"Item column required in observed_user_items."
assert observed_user_items[user_id].dtype == \
recommendations[user_id].dtype, \
"The user column in the two provided SFrames must have the same type."
assert observed_user_items[item_id].dtype == \
recommendations[item_id].dtype, \
"The user column in the two provided SFrames must have the same type."
cutoffs = _array.array('f', cutoffs)
opts = {'data': observed_user_items,
'recommendations': recommendations,
'cutoffs': cutoffs}
response = _turicreate.toolkits._main.run('evaluation_precision_recall_by_user', opts)
sf = _SFrame(None, _proxy=response['pr'])
return sf.sort([user_id, 'cutoff']) | python | def precision_recall_by_user(observed_user_items,
recommendations,
cutoffs=[10]):
"""
Compute precision and recall at a given cutoff for each user. In information
retrieval terms, precision represents the ratio of relevant, retrieved items
to the number of relevant items. Recall represents the ratio of relevant,
retrieved items to the number of relevant items.
Let :math:`p_k` be a vector of the first :math:`k` elements in the
recommendations for a particular user, and let :math:`a` be the set of items
in ``observed_user_items`` for that user. The "precision at cutoff k" for
this user is defined as
.. math::
P(k) = \\frac{ | a \cap p_k | }{k},
while "recall at cutoff k" is defined as
.. math::
R(k) = \\frac{ | a \cap p_k | }{|a|}
The order of the elements in the recommendations affects the returned
precision and recall scores.
Parameters
----------
observed_user_items : SFrame
An SFrame containing observed user item pairs, where the first
column contains user ids and the second column contains item ids.
recommendations : SFrame
An SFrame containing columns pertaining to the user id, the item id,
the score given to that pair, and the rank of that item among the
recommendations made for user id. For example, see the output of
recommend() produced by any turicreate.recommender model.
cutoffs : list[int], optional
The cutoffs to use when computing precision and recall.
Returns
-------
out : SFrame
An SFrame containing columns user id, cutoff, precision, recall, and
count where the precision and recall are reported for each user at
each requested cutoff, and count is the number of observations for
that user id.
Notes
-----
The corner cases that involve empty lists were chosen to be consistent
with the feasible set of precision-recall curves, which start at
(precision, recall) = (1,0) and end at (0,1). However, we do not believe
there is a well-known consensus on this choice.
Examples
--------
Given SFrames ``train_data`` and ``test_data`` with columns user_id
and item_id:
>>> from turicreate.toolkits.recommender.util import precision_recall_by_user
>>> m = turicreate.recommender.create(train_data)
>>> recs = m.recommend()
>>> precision_recall_by_user(test_data, recs, cutoffs=[5, 10])
"""
assert type(observed_user_items) == _SFrame
assert type(recommendations) == _SFrame
assert type(cutoffs) == list
assert min(cutoffs) > 0, "All cutoffs must be positive integers."
assert recommendations.num_columns() >= 2
user_id = recommendations.column_names()[0]
item_id = recommendations.column_names()[1]
assert observed_user_items.num_rows() > 0, \
"Evaluating precision and recall requires a non-empty " + \
"observed_user_items."
assert user_id in observed_user_items.column_names(), \
"User column required in observed_user_items."
assert item_id in observed_user_items.column_names(), \
"Item column required in observed_user_items."
assert observed_user_items[user_id].dtype == \
recommendations[user_id].dtype, \
"The user column in the two provided SFrames must have the same type."
assert observed_user_items[item_id].dtype == \
recommendations[item_id].dtype, \
"The user column in the two provided SFrames must have the same type."
cutoffs = _array.array('f', cutoffs)
opts = {'data': observed_user_items,
'recommendations': recommendations,
'cutoffs': cutoffs}
response = _turicreate.toolkits._main.run('evaluation_precision_recall_by_user', opts)
sf = _SFrame(None, _proxy=response['pr'])
return sf.sort([user_id, 'cutoff']) | [
"def",
"precision_recall_by_user",
"(",
"observed_user_items",
",",
"recommendations",
",",
"cutoffs",
"=",
"[",
"10",
"]",
")",
":",
"assert",
"type",
"(",
"observed_user_items",
")",
"==",
"_SFrame",
"assert",
"type",
"(",
"recommendations",
")",
"==",
"_SFrame",
"assert",
"type",
"(",
"cutoffs",
")",
"==",
"list",
"assert",
"min",
"(",
"cutoffs",
")",
">",
"0",
",",
"\"All cutoffs must be positive integers.\"",
"assert",
"recommendations",
".",
"num_columns",
"(",
")",
">=",
"2",
"user_id",
"=",
"recommendations",
".",
"column_names",
"(",
")",
"[",
"0",
"]",
"item_id",
"=",
"recommendations",
".",
"column_names",
"(",
")",
"[",
"1",
"]",
"assert",
"observed_user_items",
".",
"num_rows",
"(",
")",
">",
"0",
",",
"\"Evaluating precision and recall requires a non-empty \"",
"+",
"\"observed_user_items.\"",
"assert",
"user_id",
"in",
"observed_user_items",
".",
"column_names",
"(",
")",
",",
"\"User column required in observed_user_items.\"",
"assert",
"item_id",
"in",
"observed_user_items",
".",
"column_names",
"(",
")",
",",
"\"Item column required in observed_user_items.\"",
"assert",
"observed_user_items",
"[",
"user_id",
"]",
".",
"dtype",
"==",
"recommendations",
"[",
"user_id",
"]",
".",
"dtype",
",",
"\"The user column in the two provided SFrames must have the same type.\"",
"assert",
"observed_user_items",
"[",
"item_id",
"]",
".",
"dtype",
"==",
"recommendations",
"[",
"item_id",
"]",
".",
"dtype",
",",
"\"The user column in the two provided SFrames must have the same type.\"",
"cutoffs",
"=",
"_array",
".",
"array",
"(",
"'f'",
",",
"cutoffs",
")",
"opts",
"=",
"{",
"'data'",
":",
"observed_user_items",
",",
"'recommendations'",
":",
"recommendations",
",",
"'cutoffs'",
":",
"cutoffs",
"}",
"response",
"=",
"_turicreate",
".",
"toolkits",
".",
"_main",
".",
"run",
"(",
"'evaluation_precision_recall_by_user'",
",",
"opts",
")",
"sf",
"=",
"_SFrame",
"(",
"None",
",",
"_proxy",
"=",
"response",
"[",
"'pr'",
"]",
")",
"return",
"sf",
".",
"sort",
"(",
"[",
"user_id",
",",
"'cutoff'",
"]",
")"
] | Compute precision and recall at a given cutoff for each user. In information
retrieval terms, precision represents the ratio of relevant, retrieved items
to the number of relevant items. Recall represents the ratio of relevant,
retrieved items to the number of relevant items.
Let :math:`p_k` be a vector of the first :math:`k` elements in the
recommendations for a particular user, and let :math:`a` be the set of items
in ``observed_user_items`` for that user. The "precision at cutoff k" for
this user is defined as
.. math::
P(k) = \\frac{ | a \cap p_k | }{k},
while "recall at cutoff k" is defined as
.. math::
R(k) = \\frac{ | a \cap p_k | }{|a|}
The order of the elements in the recommendations affects the returned
precision and recall scores.
Parameters
----------
observed_user_items : SFrame
An SFrame containing observed user item pairs, where the first
column contains user ids and the second column contains item ids.
recommendations : SFrame
An SFrame containing columns pertaining to the user id, the item id,
the score given to that pair, and the rank of that item among the
recommendations made for user id. For example, see the output of
recommend() produced by any turicreate.recommender model.
cutoffs : list[int], optional
The cutoffs to use when computing precision and recall.
Returns
-------
out : SFrame
An SFrame containing columns user id, cutoff, precision, recall, and
count where the precision and recall are reported for each user at
each requested cutoff, and count is the number of observations for
that user id.
Notes
-----
The corner cases that involve empty lists were chosen to be consistent
with the feasible set of precision-recall curves, which start at
(precision, recall) = (1,0) and end at (0,1). However, we do not believe
there is a well-known consensus on this choice.
Examples
--------
Given SFrames ``train_data`` and ``test_data`` with columns user_id
and item_id:
>>> from turicreate.toolkits.recommender.util import precision_recall_by_user
>>> m = turicreate.recommender.create(train_data)
>>> recs = m.recommend()
>>> precision_recall_by_user(test_data, recs, cutoffs=[5, 10]) | [
"Compute",
"precision",
"and",
"recall",
"at",
"a",
"given",
"cutoff",
"for",
"each",
"user",
".",
"In",
"information",
"retrieval",
"terms",
"precision",
"represents",
"the",
"ratio",
"of",
"relevant",
"retrieved",
"items",
"to",
"the",
"number",
"of",
"relevant",
"items",
".",
"Recall",
"represents",
"the",
"ratio",
"of",
"relevant",
"retrieved",
"items",
"to",
"the",
"number",
"of",
"relevant",
"items",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/recommender/util.py#L331-L427 |
28,972 | apple/turicreate | src/unity/python/turicreate/toolkits/recommender/util.py | random_split_by_user | def random_split_by_user(dataset,
user_id='user_id',
item_id='item_id',
max_num_users=1000,
item_test_proportion=.2,
random_seed=0):
"""Create a recommender-friendly train-test split of the provided data set.
The test dataset is generated by first choosing `max_num_users` out of the
total number of users in `dataset`. Then, for each of the chosen test users,
a portion of the user's items (determined by `item_test_proportion`) is
randomly chosen to be included in the test set. This split allows the
training data to retain enough information about the users in the testset,
so that adequate recommendations can be made. The total number of users
in the test set may be fewer than `max_num_users` if a user was chosen for
the test set but none of their items are selected.
Parameters
----------
dataset : SFrame
An SFrame containing (user, item) pairs.
user_id : str, optional
The name of the column in ``dataset`` that contains user ids.
item_id : str, optional
The name of the column in ``dataset`` that contains item ids.
max_num_users : int, optional
The maximum number of users to use to construct the test set. If
set to 'None', then use all available users.
item_test_proportion : float, optional
The desired probability that a test user's item will be chosen
for the test set.
random_seed : int, optional The random seed to use for
randomization. If None, then the random seed is different
every time; if numeric, then subsequent calls with the same
dataset and random seed with have the same split.
Returns
-------
train, test : SFrame
A tuple with two datasets to be used for training and testing.
Examples
--------
>>> import turicreate as tc
>>> sf = tc.SFrame('https://static.turi.com/datasets/audioscrobbler')
>>> train, test = tc.recommender.util.random_split_by_user(sf, max_num_users=100)
"""
assert user_id in dataset.column_names(), \
'Provided user column "{0}" not found in data set.'.format(user_id)
assert item_id in dataset.column_names(), \
'Provided item column "{0}" not found in data set.'.format(item_id)
if max_num_users == 'all':
max_num_users = None
if random_seed is None:
import time
random_seed = int(hash("%20f" % time.time()) % 2**63)
opts = {'dataset': dataset,
'user_id': user_id,
'item_id': item_id,
'max_num_users': max_num_users,
'item_test_proportion': item_test_proportion,
'random_seed': random_seed}
response = _turicreate.extensions._recsys.train_test_split(dataset, user_id, item_id,
max_num_users, item_test_proportion, random_seed)
train = response['train']
test = response['test']
return train, test | python | def random_split_by_user(dataset,
user_id='user_id',
item_id='item_id',
max_num_users=1000,
item_test_proportion=.2,
random_seed=0):
"""Create a recommender-friendly train-test split of the provided data set.
The test dataset is generated by first choosing `max_num_users` out of the
total number of users in `dataset`. Then, for each of the chosen test users,
a portion of the user's items (determined by `item_test_proportion`) is
randomly chosen to be included in the test set. This split allows the
training data to retain enough information about the users in the testset,
so that adequate recommendations can be made. The total number of users
in the test set may be fewer than `max_num_users` if a user was chosen for
the test set but none of their items are selected.
Parameters
----------
dataset : SFrame
An SFrame containing (user, item) pairs.
user_id : str, optional
The name of the column in ``dataset`` that contains user ids.
item_id : str, optional
The name of the column in ``dataset`` that contains item ids.
max_num_users : int, optional
The maximum number of users to use to construct the test set. If
set to 'None', then use all available users.
item_test_proportion : float, optional
The desired probability that a test user's item will be chosen
for the test set.
random_seed : int, optional The random seed to use for
randomization. If None, then the random seed is different
every time; if numeric, then subsequent calls with the same
dataset and random seed with have the same split.
Returns
-------
train, test : SFrame
A tuple with two datasets to be used for training and testing.
Examples
--------
>>> import turicreate as tc
>>> sf = tc.SFrame('https://static.turi.com/datasets/audioscrobbler')
>>> train, test = tc.recommender.util.random_split_by_user(sf, max_num_users=100)
"""
assert user_id in dataset.column_names(), \
'Provided user column "{0}" not found in data set.'.format(user_id)
assert item_id in dataset.column_names(), \
'Provided item column "{0}" not found in data set.'.format(item_id)
if max_num_users == 'all':
max_num_users = None
if random_seed is None:
import time
random_seed = int(hash("%20f" % time.time()) % 2**63)
opts = {'dataset': dataset,
'user_id': user_id,
'item_id': item_id,
'max_num_users': max_num_users,
'item_test_proportion': item_test_proportion,
'random_seed': random_seed}
response = _turicreate.extensions._recsys.train_test_split(dataset, user_id, item_id,
max_num_users, item_test_proportion, random_seed)
train = response['train']
test = response['test']
return train, test | [
"def",
"random_split_by_user",
"(",
"dataset",
",",
"user_id",
"=",
"'user_id'",
",",
"item_id",
"=",
"'item_id'",
",",
"max_num_users",
"=",
"1000",
",",
"item_test_proportion",
"=",
".2",
",",
"random_seed",
"=",
"0",
")",
":",
"assert",
"user_id",
"in",
"dataset",
".",
"column_names",
"(",
")",
",",
"'Provided user column \"{0}\" not found in data set.'",
".",
"format",
"(",
"user_id",
")",
"assert",
"item_id",
"in",
"dataset",
".",
"column_names",
"(",
")",
",",
"'Provided item column \"{0}\" not found in data set.'",
".",
"format",
"(",
"item_id",
")",
"if",
"max_num_users",
"==",
"'all'",
":",
"max_num_users",
"=",
"None",
"if",
"random_seed",
"is",
"None",
":",
"import",
"time",
"random_seed",
"=",
"int",
"(",
"hash",
"(",
"\"%20f\"",
"%",
"time",
".",
"time",
"(",
")",
")",
"%",
"2",
"**",
"63",
")",
"opts",
"=",
"{",
"'dataset'",
":",
"dataset",
",",
"'user_id'",
":",
"user_id",
",",
"'item_id'",
":",
"item_id",
",",
"'max_num_users'",
":",
"max_num_users",
",",
"'item_test_proportion'",
":",
"item_test_proportion",
",",
"'random_seed'",
":",
"random_seed",
"}",
"response",
"=",
"_turicreate",
".",
"extensions",
".",
"_recsys",
".",
"train_test_split",
"(",
"dataset",
",",
"user_id",
",",
"item_id",
",",
"max_num_users",
",",
"item_test_proportion",
",",
"random_seed",
")",
"train",
"=",
"response",
"[",
"'train'",
"]",
"test",
"=",
"response",
"[",
"'test'",
"]",
"return",
"train",
",",
"test"
] | Create a recommender-friendly train-test split of the provided data set.
The test dataset is generated by first choosing `max_num_users` out of the
total number of users in `dataset`. Then, for each of the chosen test users,
a portion of the user's items (determined by `item_test_proportion`) is
randomly chosen to be included in the test set. This split allows the
training data to retain enough information about the users in the testset,
so that adequate recommendations can be made. The total number of users
in the test set may be fewer than `max_num_users` if a user was chosen for
the test set but none of their items are selected.
Parameters
----------
dataset : SFrame
An SFrame containing (user, item) pairs.
user_id : str, optional
The name of the column in ``dataset`` that contains user ids.
item_id : str, optional
The name of the column in ``dataset`` that contains item ids.
max_num_users : int, optional
The maximum number of users to use to construct the test set. If
set to 'None', then use all available users.
item_test_proportion : float, optional
The desired probability that a test user's item will be chosen
for the test set.
random_seed : int, optional The random seed to use for
randomization. If None, then the random seed is different
every time; if numeric, then subsequent calls with the same
dataset and random seed with have the same split.
Returns
-------
train, test : SFrame
A tuple with two datasets to be used for training and testing.
Examples
--------
>>> import turicreate as tc
>>> sf = tc.SFrame('https://static.turi.com/datasets/audioscrobbler')
>>> train, test = tc.recommender.util.random_split_by_user(sf, max_num_users=100) | [
"Create",
"a",
"recommender",
"-",
"friendly",
"train",
"-",
"test",
"split",
"of",
"the",
"provided",
"data",
"set",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/recommender/util.py#L430-L508 |
28,973 | apple/turicreate | src/unity/python/turicreate/toolkits/recommender/util.py | _Recommender._list_fields | def _list_fields(self):
"""
Get the current settings of the model. The keys depend on the type of
model.
Returns
-------
out : list
A list of fields that can be queried using the ``get`` method.
"""
response = self.__proxy__.list_fields()
return [s for s in response['value'] if not s.startswith("_")] | python | def _list_fields(self):
"""
Get the current settings of the model. The keys depend on the type of
model.
Returns
-------
out : list
A list of fields that can be queried using the ``get`` method.
"""
response = self.__proxy__.list_fields()
return [s for s in response['value'] if not s.startswith("_")] | [
"def",
"_list_fields",
"(",
"self",
")",
":",
"response",
"=",
"self",
".",
"__proxy__",
".",
"list_fields",
"(",
")",
"return",
"[",
"s",
"for",
"s",
"in",
"response",
"[",
"'value'",
"]",
"if",
"not",
"s",
".",
"startswith",
"(",
"\"_\"",
")",
"]"
] | Get the current settings of the model. The keys depend on the type of
model.
Returns
-------
out : list
A list of fields that can be queried using the ``get`` method. | [
"Get",
"the",
"current",
"settings",
"of",
"the",
"model",
".",
"The",
"keys",
"depend",
"on",
"the",
"type",
"of",
"model",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/recommender/util.py#L543-L555 |
28,974 | apple/turicreate | src/unity/python/turicreate/toolkits/recommender/util.py | _Recommender._set_current_options | def _set_current_options(self, options):
"""
Set current options for a model.
Parameters
----------
options : dict
A dictionary of the desired option settings. The key should be the name
of the option and each value is the desired value of the option.
"""
opts = self._get_current_options()
opts.update(options)
response = self.__proxy__.set_current_options(opts)
return response | python | def _set_current_options(self, options):
"""
Set current options for a model.
Parameters
----------
options : dict
A dictionary of the desired option settings. The key should be the name
of the option and each value is the desired value of the option.
"""
opts = self._get_current_options()
opts.update(options)
response = self.__proxy__.set_current_options(opts)
return response | [
"def",
"_set_current_options",
"(",
"self",
",",
"options",
")",
":",
"opts",
"=",
"self",
".",
"_get_current_options",
"(",
")",
"opts",
".",
"update",
"(",
"options",
")",
"response",
"=",
"self",
".",
"__proxy__",
".",
"set_current_options",
"(",
"opts",
")",
"return",
"response"
] | Set current options for a model.
Parameters
----------
options : dict
A dictionary of the desired option settings. The key should be the name
of the option and each value is the desired value of the option. | [
"Set",
"current",
"options",
"for",
"a",
"model",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/recommender/util.py#L804-L818 |
28,975 | apple/turicreate | src/unity/python/turicreate/toolkits/recommender/util.py | _Recommender.__prepare_dataset_parameter | def __prepare_dataset_parameter(self, dataset):
"""
Processes the dataset parameter for type correctness.
Returns it as an SFrame.
"""
# Translate the dataset argument into the proper type
if not isinstance(dataset, _SFrame):
def raise_dataset_type_exception():
raise TypeError("The dataset parameter must be either an SFrame, "
"or a dictionary of (str : list) or (str : value).")
if type(dataset) is dict:
if not all(type(k) is str for k in _six.iterkeys(dataset)):
raise_dataset_type_exception()
if all(type(v) in (list, tuple, _array.array) for v in _six.itervalues(dataset)):
dataset = _SFrame(dataset)
else:
dataset = _SFrame({k : [v] for k, v in _six.iteritems(dataset)})
else:
raise_dataset_type_exception()
return dataset | python | def __prepare_dataset_parameter(self, dataset):
"""
Processes the dataset parameter for type correctness.
Returns it as an SFrame.
"""
# Translate the dataset argument into the proper type
if not isinstance(dataset, _SFrame):
def raise_dataset_type_exception():
raise TypeError("The dataset parameter must be either an SFrame, "
"or a dictionary of (str : list) or (str : value).")
if type(dataset) is dict:
if not all(type(k) is str for k in _six.iterkeys(dataset)):
raise_dataset_type_exception()
if all(type(v) in (list, tuple, _array.array) for v in _six.itervalues(dataset)):
dataset = _SFrame(dataset)
else:
dataset = _SFrame({k : [v] for k, v in _six.iteritems(dataset)})
else:
raise_dataset_type_exception()
return dataset | [
"def",
"__prepare_dataset_parameter",
"(",
"self",
",",
"dataset",
")",
":",
"# Translate the dataset argument into the proper type",
"if",
"not",
"isinstance",
"(",
"dataset",
",",
"_SFrame",
")",
":",
"def",
"raise_dataset_type_exception",
"(",
")",
":",
"raise",
"TypeError",
"(",
"\"The dataset parameter must be either an SFrame, \"",
"\"or a dictionary of (str : list) or (str : value).\"",
")",
"if",
"type",
"(",
"dataset",
")",
"is",
"dict",
":",
"if",
"not",
"all",
"(",
"type",
"(",
"k",
")",
"is",
"str",
"for",
"k",
"in",
"_six",
".",
"iterkeys",
"(",
"dataset",
")",
")",
":",
"raise_dataset_type_exception",
"(",
")",
"if",
"all",
"(",
"type",
"(",
"v",
")",
"in",
"(",
"list",
",",
"tuple",
",",
"_array",
".",
"array",
")",
"for",
"v",
"in",
"_six",
".",
"itervalues",
"(",
"dataset",
")",
")",
":",
"dataset",
"=",
"_SFrame",
"(",
"dataset",
")",
"else",
":",
"dataset",
"=",
"_SFrame",
"(",
"{",
"k",
":",
"[",
"v",
"]",
"for",
"k",
",",
"v",
"in",
"_six",
".",
"iteritems",
"(",
"dataset",
")",
"}",
")",
"else",
":",
"raise_dataset_type_exception",
"(",
")",
"return",
"dataset"
] | Processes the dataset parameter for type correctness.
Returns it as an SFrame. | [
"Processes",
"the",
"dataset",
"parameter",
"for",
"type",
"correctness",
".",
"Returns",
"it",
"as",
"an",
"SFrame",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/recommender/util.py#L820-L843 |
28,976 | apple/turicreate | src/unity/python/turicreate/toolkits/recommender/util.py | _Recommender.predict | def predict(self, dataset,
new_observation_data=None, new_user_data=None, new_item_data=None):
"""
Return a score prediction for the user ids and item ids in the provided
data set.
Parameters
----------
dataset : SFrame
Dataset in the same form used for training.
new_observation_data : SFrame, optional
``new_observation_data`` gives additional observation data
to the model, which may be used by the models to improve
score accuracy. Must be in the same format as the
observation data passed to ``create``. How this data is
used varies by model.
new_user_data : SFrame, optional
``new_user_data`` may give additional user data to the
model. If present, scoring is done with reference to this
new information. If there is any overlap with the side
information present at training time, then this new side
data is preferred. Must be in the same format as the user
data passed to ``create``.
new_item_data : SFrame, optional
``new_item_data`` may give additional item data to the
model. If present, scoring is done with reference to this
new information. If there is any overlap with the side
information present at training time, then this new side
data is preferred. Must be in the same format as the item
data passed to ``create``.
Returns
-------
out : SArray
An SArray with predicted scores for each given observation
predicted by the model.
See Also
--------
recommend, evaluate
"""
if new_observation_data is None:
new_observation_data = _SFrame()
if new_user_data is None:
new_user_data = _SFrame()
if new_item_data is None:
new_item_data = _SFrame()
dataset = self.__prepare_dataset_parameter(dataset)
def check_type(arg, arg_name, required_type, allowed_types):
if not isinstance(arg, required_type):
raise TypeError("Parameter " + arg_name + " must be of type(s) "
+ (", ".join(allowed_types))
+ "; Type '" + str(type(arg)) + "' not recognized.")
check_type(new_observation_data, "new_observation_data", _SFrame, ["SFrame"])
check_type(new_user_data, "new_user_data", _SFrame, ["SFrame"])
check_type(new_item_data, "new_item_data", _SFrame, ["SFrame"])
response = self.__proxy__.predict(dataset, new_user_data, new_item_data)
return response['prediction'] | python | def predict(self, dataset,
new_observation_data=None, new_user_data=None, new_item_data=None):
"""
Return a score prediction for the user ids and item ids in the provided
data set.
Parameters
----------
dataset : SFrame
Dataset in the same form used for training.
new_observation_data : SFrame, optional
``new_observation_data`` gives additional observation data
to the model, which may be used by the models to improve
score accuracy. Must be in the same format as the
observation data passed to ``create``. How this data is
used varies by model.
new_user_data : SFrame, optional
``new_user_data`` may give additional user data to the
model. If present, scoring is done with reference to this
new information. If there is any overlap with the side
information present at training time, then this new side
data is preferred. Must be in the same format as the user
data passed to ``create``.
new_item_data : SFrame, optional
``new_item_data`` may give additional item data to the
model. If present, scoring is done with reference to this
new information. If there is any overlap with the side
information present at training time, then this new side
data is preferred. Must be in the same format as the item
data passed to ``create``.
Returns
-------
out : SArray
An SArray with predicted scores for each given observation
predicted by the model.
See Also
--------
recommend, evaluate
"""
if new_observation_data is None:
new_observation_data = _SFrame()
if new_user_data is None:
new_user_data = _SFrame()
if new_item_data is None:
new_item_data = _SFrame()
dataset = self.__prepare_dataset_parameter(dataset)
def check_type(arg, arg_name, required_type, allowed_types):
if not isinstance(arg, required_type):
raise TypeError("Parameter " + arg_name + " must be of type(s) "
+ (", ".join(allowed_types))
+ "; Type '" + str(type(arg)) + "' not recognized.")
check_type(new_observation_data, "new_observation_data", _SFrame, ["SFrame"])
check_type(new_user_data, "new_user_data", _SFrame, ["SFrame"])
check_type(new_item_data, "new_item_data", _SFrame, ["SFrame"])
response = self.__proxy__.predict(dataset, new_user_data, new_item_data)
return response['prediction'] | [
"def",
"predict",
"(",
"self",
",",
"dataset",
",",
"new_observation_data",
"=",
"None",
",",
"new_user_data",
"=",
"None",
",",
"new_item_data",
"=",
"None",
")",
":",
"if",
"new_observation_data",
"is",
"None",
":",
"new_observation_data",
"=",
"_SFrame",
"(",
")",
"if",
"new_user_data",
"is",
"None",
":",
"new_user_data",
"=",
"_SFrame",
"(",
")",
"if",
"new_item_data",
"is",
"None",
":",
"new_item_data",
"=",
"_SFrame",
"(",
")",
"dataset",
"=",
"self",
".",
"__prepare_dataset_parameter",
"(",
"dataset",
")",
"def",
"check_type",
"(",
"arg",
",",
"arg_name",
",",
"required_type",
",",
"allowed_types",
")",
":",
"if",
"not",
"isinstance",
"(",
"arg",
",",
"required_type",
")",
":",
"raise",
"TypeError",
"(",
"\"Parameter \"",
"+",
"arg_name",
"+",
"\" must be of type(s) \"",
"+",
"(",
"\", \"",
".",
"join",
"(",
"allowed_types",
")",
")",
"+",
"\"; Type '\"",
"+",
"str",
"(",
"type",
"(",
"arg",
")",
")",
"+",
"\"' not recognized.\"",
")",
"check_type",
"(",
"new_observation_data",
",",
"\"new_observation_data\"",
",",
"_SFrame",
",",
"[",
"\"SFrame\"",
"]",
")",
"check_type",
"(",
"new_user_data",
",",
"\"new_user_data\"",
",",
"_SFrame",
",",
"[",
"\"SFrame\"",
"]",
")",
"check_type",
"(",
"new_item_data",
",",
"\"new_item_data\"",
",",
"_SFrame",
",",
"[",
"\"SFrame\"",
"]",
")",
"response",
"=",
"self",
".",
"__proxy__",
".",
"predict",
"(",
"dataset",
",",
"new_user_data",
",",
"new_item_data",
")",
"return",
"response",
"[",
"'prediction'",
"]"
] | Return a score prediction for the user ids and item ids in the provided
data set.
Parameters
----------
dataset : SFrame
Dataset in the same form used for training.
new_observation_data : SFrame, optional
``new_observation_data`` gives additional observation data
to the model, which may be used by the models to improve
score accuracy. Must be in the same format as the
observation data passed to ``create``. How this data is
used varies by model.
new_user_data : SFrame, optional
``new_user_data`` may give additional user data to the
model. If present, scoring is done with reference to this
new information. If there is any overlap with the side
information present at training time, then this new side
data is preferred. Must be in the same format as the user
data passed to ``create``.
new_item_data : SFrame, optional
``new_item_data`` may give additional item data to the
model. If present, scoring is done with reference to this
new information. If there is any overlap with the side
information present at training time, then this new side
data is preferred. Must be in the same format as the item
data passed to ``create``.
Returns
-------
out : SArray
An SArray with predicted scores for each given observation
predicted by the model.
See Also
--------
recommend, evaluate | [
"Return",
"a",
"score",
"prediction",
"for",
"the",
"user",
"ids",
"and",
"item",
"ids",
"in",
"the",
"provided",
"data",
"set",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/recommender/util.py#L859-L925 |
28,977 | apple/turicreate | src/unity/python/turicreate/toolkits/recommender/util.py | _Recommender.get_similar_items | def get_similar_items(self, items=None, k=10, verbose=False):
"""
Get the k most similar items for each item in items.
Each type of recommender has its own model for the similarity
between items. For example, the item_similarity_recommender will
return the most similar items according to the user-chosen
similarity; the factorization_recommender will return the
nearest items based on the cosine similarity between latent item
factors.
Parameters
----------
items : SArray or list; optional
An :class:`~turicreate.SArray` or list of item ids for which to get
similar items. If 'None', then return the `k` most similar items for
all items in the training set.
k : int, optional
The number of similar items for each item.
verbose : bool, optional
Progress printing is shown.
Returns
-------
out : SFrame
A SFrame with the top ranked similar items for each item. The
columns `item`, 'similar', 'score' and 'rank', where
`item` matches the item column name specified at training time.
The 'rank' is between 1 and `k` and 'score' gives the similarity
score of that item. The value of the score depends on the method
used for computing item similarities.
Examples
--------
>>> sf = turicreate.SFrame({'user_id': ["0", "0", "0", "1", "1", "2", "2", "2"],
'item_id': ["a", "b", "c", "a", "b", "b", "c", "d"]})
>>> m = turicreate.item_similarity_recommender.create(sf)
>>> nn = m.get_similar_items()
"""
if items is None:
get_all_items = True
items = _SArray()
else:
get_all_items = False
if isinstance(items, list):
items = _SArray(items)
def check_type(arg, arg_name, required_type, allowed_types):
if not isinstance(arg, required_type):
raise TypeError("Parameter " + arg_name + " must be of type(s) "
+ (", ".join(allowed_types) )
+ "; Type '" + str(type(arg)) + "' not recognized.")
check_type(items, "items", _SArray, ["SArray", "list"])
check_type(k, "k", int, ["int"])
return self.__proxy__.get_similar_items(items, k, verbose, get_all_items) | python | def get_similar_items(self, items=None, k=10, verbose=False):
"""
Get the k most similar items for each item in items.
Each type of recommender has its own model for the similarity
between items. For example, the item_similarity_recommender will
return the most similar items according to the user-chosen
similarity; the factorization_recommender will return the
nearest items based on the cosine similarity between latent item
factors.
Parameters
----------
items : SArray or list; optional
An :class:`~turicreate.SArray` or list of item ids for which to get
similar items. If 'None', then return the `k` most similar items for
all items in the training set.
k : int, optional
The number of similar items for each item.
verbose : bool, optional
Progress printing is shown.
Returns
-------
out : SFrame
A SFrame with the top ranked similar items for each item. The
columns `item`, 'similar', 'score' and 'rank', where
`item` matches the item column name specified at training time.
The 'rank' is between 1 and `k` and 'score' gives the similarity
score of that item. The value of the score depends on the method
used for computing item similarities.
Examples
--------
>>> sf = turicreate.SFrame({'user_id': ["0", "0", "0", "1", "1", "2", "2", "2"],
'item_id': ["a", "b", "c", "a", "b", "b", "c", "d"]})
>>> m = turicreate.item_similarity_recommender.create(sf)
>>> nn = m.get_similar_items()
"""
if items is None:
get_all_items = True
items = _SArray()
else:
get_all_items = False
if isinstance(items, list):
items = _SArray(items)
def check_type(arg, arg_name, required_type, allowed_types):
if not isinstance(arg, required_type):
raise TypeError("Parameter " + arg_name + " must be of type(s) "
+ (", ".join(allowed_types) )
+ "; Type '" + str(type(arg)) + "' not recognized.")
check_type(items, "items", _SArray, ["SArray", "list"])
check_type(k, "k", int, ["int"])
return self.__proxy__.get_similar_items(items, k, verbose, get_all_items) | [
"def",
"get_similar_items",
"(",
"self",
",",
"items",
"=",
"None",
",",
"k",
"=",
"10",
",",
"verbose",
"=",
"False",
")",
":",
"if",
"items",
"is",
"None",
":",
"get_all_items",
"=",
"True",
"items",
"=",
"_SArray",
"(",
")",
"else",
":",
"get_all_items",
"=",
"False",
"if",
"isinstance",
"(",
"items",
",",
"list",
")",
":",
"items",
"=",
"_SArray",
"(",
"items",
")",
"def",
"check_type",
"(",
"arg",
",",
"arg_name",
",",
"required_type",
",",
"allowed_types",
")",
":",
"if",
"not",
"isinstance",
"(",
"arg",
",",
"required_type",
")",
":",
"raise",
"TypeError",
"(",
"\"Parameter \"",
"+",
"arg_name",
"+",
"\" must be of type(s) \"",
"+",
"(",
"\", \"",
".",
"join",
"(",
"allowed_types",
")",
")",
"+",
"\"; Type '\"",
"+",
"str",
"(",
"type",
"(",
"arg",
")",
")",
"+",
"\"' not recognized.\"",
")",
"check_type",
"(",
"items",
",",
"\"items\"",
",",
"_SArray",
",",
"[",
"\"SArray\"",
",",
"\"list\"",
"]",
")",
"check_type",
"(",
"k",
",",
"\"k\"",
",",
"int",
",",
"[",
"\"int\"",
"]",
")",
"return",
"self",
".",
"__proxy__",
".",
"get_similar_items",
"(",
"items",
",",
"k",
",",
"verbose",
",",
"get_all_items",
")"
] | Get the k most similar items for each item in items.
Each type of recommender has its own model for the similarity
between items. For example, the item_similarity_recommender will
return the most similar items according to the user-chosen
similarity; the factorization_recommender will return the
nearest items based on the cosine similarity between latent item
factors.
Parameters
----------
items : SArray or list; optional
An :class:`~turicreate.SArray` or list of item ids for which to get
similar items. If 'None', then return the `k` most similar items for
all items in the training set.
k : int, optional
The number of similar items for each item.
verbose : bool, optional
Progress printing is shown.
Returns
-------
out : SFrame
A SFrame with the top ranked similar items for each item. The
columns `item`, 'similar', 'score' and 'rank', where
`item` matches the item column name specified at training time.
The 'rank' is between 1 and `k` and 'score' gives the similarity
score of that item. The value of the score depends on the method
used for computing item similarities.
Examples
--------
>>> sf = turicreate.SFrame({'user_id': ["0", "0", "0", "1", "1", "2", "2", "2"],
'item_id': ["a", "b", "c", "a", "b", "b", "c", "d"]})
>>> m = turicreate.item_similarity_recommender.create(sf)
>>> nn = m.get_similar_items() | [
"Get",
"the",
"k",
"most",
"similar",
"items",
"for",
"each",
"item",
"in",
"items",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/recommender/util.py#L927-L988 |
28,978 | apple/turicreate | src/unity/python/turicreate/toolkits/recommender/util.py | _Recommender.get_similar_users | def get_similar_users(self, users=None, k=10):
"""Get the k most similar users for each entry in `users`.
Each type of recommender has its own model for the similarity
between users. For example, the factorization_recommender will
return the nearest users based on the cosine similarity
between latent user factors. (This method is not currently
available for item_similarity models.)
Parameters
----------
users : SArray or list; optional
An :class:`~turicreate.SArray` or list of user ids for which to get
similar users. If 'None', then return the `k` most similar users for
all users in the training set.
k : int, optional
The number of neighbors to return for each user.
Returns
-------
out : SFrame
A SFrame with the top ranked similar users for each user. The
columns `user`, 'similar', 'score' and 'rank', where
`user` matches the user column name specified at training time.
The 'rank' is between 1 and `k` and 'score' gives the similarity
score of that user. The value of the score depends on the method
used for computing user similarities.
Examples
--------
>>> sf = turicreate.SFrame({'user_id': ["0", "0", "0", "1", "1", "2", "2", "2"],
'item_id': ["a", "b", "c", "a", "b", "b", "c", "d"]})
>>> m = turicreate.factorization_recommender.create(sf)
>>> nn = m.get_similar_users()
"""
if users is None:
get_all_users = True
users = _SArray()
else:
get_all_users = False
if isinstance(users, list):
users = _SArray(users)
def check_type(arg, arg_name, required_type, allowed_types):
if not isinstance(arg, required_type):
raise TypeError("Parameter " + arg_name + " must be of type(s) "
+ (", ".join(allowed_types) )
+ "; Type '" + str(type(arg)) + "' not recognized.")
check_type(users, "users", _SArray, ["SArray", "list"])
check_type(k, "k", int, ["int"])
opt = {'model': self.__proxy__,
'users': users,
'get_all_users' : get_all_users,
'k': k}
response = self.__proxy__.get_similar_users(users, k, get_all_users)
return response | python | def get_similar_users(self, users=None, k=10):
"""Get the k most similar users for each entry in `users`.
Each type of recommender has its own model for the similarity
between users. For example, the factorization_recommender will
return the nearest users based on the cosine similarity
between latent user factors. (This method is not currently
available for item_similarity models.)
Parameters
----------
users : SArray or list; optional
An :class:`~turicreate.SArray` or list of user ids for which to get
similar users. If 'None', then return the `k` most similar users for
all users in the training set.
k : int, optional
The number of neighbors to return for each user.
Returns
-------
out : SFrame
A SFrame with the top ranked similar users for each user. The
columns `user`, 'similar', 'score' and 'rank', where
`user` matches the user column name specified at training time.
The 'rank' is between 1 and `k` and 'score' gives the similarity
score of that user. The value of the score depends on the method
used for computing user similarities.
Examples
--------
>>> sf = turicreate.SFrame({'user_id': ["0", "0", "0", "1", "1", "2", "2", "2"],
'item_id': ["a", "b", "c", "a", "b", "b", "c", "d"]})
>>> m = turicreate.factorization_recommender.create(sf)
>>> nn = m.get_similar_users()
"""
if users is None:
get_all_users = True
users = _SArray()
else:
get_all_users = False
if isinstance(users, list):
users = _SArray(users)
def check_type(arg, arg_name, required_type, allowed_types):
if not isinstance(arg, required_type):
raise TypeError("Parameter " + arg_name + " must be of type(s) "
+ (", ".join(allowed_types) )
+ "; Type '" + str(type(arg)) + "' not recognized.")
check_type(users, "users", _SArray, ["SArray", "list"])
check_type(k, "k", int, ["int"])
opt = {'model': self.__proxy__,
'users': users,
'get_all_users' : get_all_users,
'k': k}
response = self.__proxy__.get_similar_users(users, k, get_all_users)
return response | [
"def",
"get_similar_users",
"(",
"self",
",",
"users",
"=",
"None",
",",
"k",
"=",
"10",
")",
":",
"if",
"users",
"is",
"None",
":",
"get_all_users",
"=",
"True",
"users",
"=",
"_SArray",
"(",
")",
"else",
":",
"get_all_users",
"=",
"False",
"if",
"isinstance",
"(",
"users",
",",
"list",
")",
":",
"users",
"=",
"_SArray",
"(",
"users",
")",
"def",
"check_type",
"(",
"arg",
",",
"arg_name",
",",
"required_type",
",",
"allowed_types",
")",
":",
"if",
"not",
"isinstance",
"(",
"arg",
",",
"required_type",
")",
":",
"raise",
"TypeError",
"(",
"\"Parameter \"",
"+",
"arg_name",
"+",
"\" must be of type(s) \"",
"+",
"(",
"\", \"",
".",
"join",
"(",
"allowed_types",
")",
")",
"+",
"\"; Type '\"",
"+",
"str",
"(",
"type",
"(",
"arg",
")",
")",
"+",
"\"' not recognized.\"",
")",
"check_type",
"(",
"users",
",",
"\"users\"",
",",
"_SArray",
",",
"[",
"\"SArray\"",
",",
"\"list\"",
"]",
")",
"check_type",
"(",
"k",
",",
"\"k\"",
",",
"int",
",",
"[",
"\"int\"",
"]",
")",
"opt",
"=",
"{",
"'model'",
":",
"self",
".",
"__proxy__",
",",
"'users'",
":",
"users",
",",
"'get_all_users'",
":",
"get_all_users",
",",
"'k'",
":",
"k",
"}",
"response",
"=",
"self",
".",
"__proxy__",
".",
"get_similar_users",
"(",
"users",
",",
"k",
",",
"get_all_users",
")",
"return",
"response"
] | Get the k most similar users for each entry in `users`.
Each type of recommender has its own model for the similarity
between users. For example, the factorization_recommender will
return the nearest users based on the cosine similarity
between latent user factors. (This method is not currently
available for item_similarity models.)
Parameters
----------
users : SArray or list; optional
An :class:`~turicreate.SArray` or list of user ids for which to get
similar users. If 'None', then return the `k` most similar users for
all users in the training set.
k : int, optional
The number of neighbors to return for each user.
Returns
-------
out : SFrame
A SFrame with the top ranked similar users for each user. The
columns `user`, 'similar', 'score' and 'rank', where
`user` matches the user column name specified at training time.
The 'rank' is between 1 and `k` and 'score' gives the similarity
score of that user. The value of the score depends on the method
used for computing user similarities.
Examples
--------
>>> sf = turicreate.SFrame({'user_id': ["0", "0", "0", "1", "1", "2", "2", "2"],
'item_id': ["a", "b", "c", "a", "b", "b", "c", "d"]})
>>> m = turicreate.factorization_recommender.create(sf)
>>> nn = m.get_similar_users() | [
"Get",
"the",
"k",
"most",
"similar",
"users",
"for",
"each",
"entry",
"in",
"users",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/recommender/util.py#L990-L1053 |
28,979 | apple/turicreate | src/unity/python/turicreate/toolkits/recommender/util.py | _Recommender.recommend_from_interactions | def recommend_from_interactions(
self, observed_items, k=10, exclude=None, items=None,
new_user_data=None, new_item_data=None,
exclude_known=True, diversity=0, random_seed=None,
verbose=True):
"""
Recommend the ``k`` highest scored items based on the
interactions given in `observed_items.`
Parameters
----------
observed_items : SArray, SFrame, or list
A list/SArray of items to use to make recommendations, or
an SFrame of items and optionally ratings and/or other
interaction data. The model will then recommend the most
similar items to those given. If ``observed_items`` has a user
column, then it must be only one user, and the additional
interaction data stored in the model is also used to make
recommendations.
k : int, optional
The number of recommendations to generate.
items : SArray, SFrame, or list, optional
Restricts the items from which recommendations can be
made. ``items`` must be an SArray, list, or SFrame with a
single column containing items, and all recommendations
will be made from this pool of items. This can be used,
for example, to restrict the recommendations to items
within a particular category or genre. By default,
recommendations are made from all items present when the
model was trained.
new_user_data : SFrame, optional
``new_user_data`` may give additional user data to the
model. If present, scoring is done with reference to this
new information. If there is any overlap with the side
information present at training time, then this new side
data is preferred. Must be in the same format as the user
data passed to ``create``.
new_item_data : SFrame, optional
``new_item_data`` may give additional item data to the
model. If present, scoring is done with reference to this
new information. If there is any overlap with the side
information present at training time, then this new side
data is preferred. Must be in the same format as the item
data passed to ``create``.
exclude : SFrame, optional
An :class:`~turicreate.SFrame` of items or user / item
pairs. The column names must be equal to the user and
item columns of the main data, and it provides the model
with user/item pairs to exclude from the recommendations.
These user-item-pairs are always excluded from the
predictions, even if exclude_known is False.
exclude_known : bool, optional
By default, all user-item interactions previously seen in
the training data, or in any new data provided using
new_observation_data.., are excluded from the
recommendations. Passing in ``exclude_known = False``
overrides this behavior.
diversity : non-negative float, optional
If given, then the recommend function attempts chooses a set
of `k` items that are both highly scored and different from
other items in that set. It does this by first retrieving
``k*(1+diversity)`` recommended items, then randomly
choosing a diverse set from these items. Suggested values
for diversity are between 1 and 3.
random_seed : int, optional
If diversity is larger than 0, then some randomness is used;
this controls the random seed to use for randomization. If
None, then it will be different each time.
verbose : bool, optional
If True, print the progress of generating recommendation.
Returns
-------
out : SFrame
A SFrame with the top ranked items for each user. The
columns are: ``item_id``, *score*, and *rank*, where
``user_id`` and ``item_id`` match the user and item column
names specified at training time. The rank column is
between 1 and ``k`` and gives the relative score of that
item. The value of score depends on the method used for
recommendations.
observed_items: list, SArray, or SFrame
"""
column_types = self._get_data_schema()
user_id = self.user_id
item_id = self.item_id
user_type = column_types[user_id]
item_type = column_types[item_id]
if not hasattr(self, "_implicit_user_name"):
import hashlib
import time
self._implicit_user_name = None #("implicit-user-%s"
# % hashlib.md5("%0.20f" % time.time()).hexdigest()[:12])
if isinstance(observed_items, list):
observed_items = _SArray(observed_items, dtype = item_type)
if isinstance(observed_items, _SArray):
observed_items = _SFrame({self.item_id : observed_items})
if not isinstance(observed_items, _SFrame):
raise TypeError("observed_items must be a list or SArray of items, or an SFrame of items "
"and optionally ratings or other interaction information.")
# Don't modify the user's argument (if it's an SFrame).
observed_items = observed_items.copy()
# If a user id is present, then use that as the query user id
# (making sure there is only one present). If not, then use
# the local fake user id.
if user_id in observed_items.column_names():
main_user_value = observed_items[user_id][0]
if (observed_items[user_id] != main_user_value).any():
raise ValueError("To recommend items for more than one user, use `recommend()` and "
"supply new interactions using new_observation_data.")
users = _SArray([main_user_value], dtype = user_type)
else:
users = _SArray([self._implicit_user_name], dtype = user_type)
observed_items[user_id] = self._implicit_user_name
if observed_items[user_id].dtype != user_type:
observed_items[user_id] = observed_items[user_id].astype(user_type)
# Check the rest of the arguments.
if exclude is not None:
if isinstance(exclude, list):
exclude = _SArray(exclude, dtype = item_type)
if isinstance(exclude, _SArray):
exclude = _SFrame({item_id : exclude})
if user_id not in exclude.column_names():
exclude[user_id] = self._implicit_user_name
exclude[user_id] = exclude[user_id].astype(user_type)
recommendations = self.recommend(
users = users,
new_observation_data = observed_items,
k = k,
items = items,
new_user_data = new_user_data,
new_item_data = new_item_data,
exclude_known = exclude_known,
diversity = diversity,
random_seed = random_seed,
verbose = verbose)
del recommendations[user_id]
return recommendations | python | def recommend_from_interactions(
self, observed_items, k=10, exclude=None, items=None,
new_user_data=None, new_item_data=None,
exclude_known=True, diversity=0, random_seed=None,
verbose=True):
"""
Recommend the ``k`` highest scored items based on the
interactions given in `observed_items.`
Parameters
----------
observed_items : SArray, SFrame, or list
A list/SArray of items to use to make recommendations, or
an SFrame of items and optionally ratings and/or other
interaction data. The model will then recommend the most
similar items to those given. If ``observed_items`` has a user
column, then it must be only one user, and the additional
interaction data stored in the model is also used to make
recommendations.
k : int, optional
The number of recommendations to generate.
items : SArray, SFrame, or list, optional
Restricts the items from which recommendations can be
made. ``items`` must be an SArray, list, or SFrame with a
single column containing items, and all recommendations
will be made from this pool of items. This can be used,
for example, to restrict the recommendations to items
within a particular category or genre. By default,
recommendations are made from all items present when the
model was trained.
new_user_data : SFrame, optional
``new_user_data`` may give additional user data to the
model. If present, scoring is done with reference to this
new information. If there is any overlap with the side
information present at training time, then this new side
data is preferred. Must be in the same format as the user
data passed to ``create``.
new_item_data : SFrame, optional
``new_item_data`` may give additional item data to the
model. If present, scoring is done with reference to this
new information. If there is any overlap with the side
information present at training time, then this new side
data is preferred. Must be in the same format as the item
data passed to ``create``.
exclude : SFrame, optional
An :class:`~turicreate.SFrame` of items or user / item
pairs. The column names must be equal to the user and
item columns of the main data, and it provides the model
with user/item pairs to exclude from the recommendations.
These user-item-pairs are always excluded from the
predictions, even if exclude_known is False.
exclude_known : bool, optional
By default, all user-item interactions previously seen in
the training data, or in any new data provided using
new_observation_data.., are excluded from the
recommendations. Passing in ``exclude_known = False``
overrides this behavior.
diversity : non-negative float, optional
If given, then the recommend function attempts chooses a set
of `k` items that are both highly scored and different from
other items in that set. It does this by first retrieving
``k*(1+diversity)`` recommended items, then randomly
choosing a diverse set from these items. Suggested values
for diversity are between 1 and 3.
random_seed : int, optional
If diversity is larger than 0, then some randomness is used;
this controls the random seed to use for randomization. If
None, then it will be different each time.
verbose : bool, optional
If True, print the progress of generating recommendation.
Returns
-------
out : SFrame
A SFrame with the top ranked items for each user. The
columns are: ``item_id``, *score*, and *rank*, where
``user_id`` and ``item_id`` match the user and item column
names specified at training time. The rank column is
between 1 and ``k`` and gives the relative score of that
item. The value of score depends on the method used for
recommendations.
observed_items: list, SArray, or SFrame
"""
column_types = self._get_data_schema()
user_id = self.user_id
item_id = self.item_id
user_type = column_types[user_id]
item_type = column_types[item_id]
if not hasattr(self, "_implicit_user_name"):
import hashlib
import time
self._implicit_user_name = None #("implicit-user-%s"
# % hashlib.md5("%0.20f" % time.time()).hexdigest()[:12])
if isinstance(observed_items, list):
observed_items = _SArray(observed_items, dtype = item_type)
if isinstance(observed_items, _SArray):
observed_items = _SFrame({self.item_id : observed_items})
if not isinstance(observed_items, _SFrame):
raise TypeError("observed_items must be a list or SArray of items, or an SFrame of items "
"and optionally ratings or other interaction information.")
# Don't modify the user's argument (if it's an SFrame).
observed_items = observed_items.copy()
# If a user id is present, then use that as the query user id
# (making sure there is only one present). If not, then use
# the local fake user id.
if user_id in observed_items.column_names():
main_user_value = observed_items[user_id][0]
if (observed_items[user_id] != main_user_value).any():
raise ValueError("To recommend items for more than one user, use `recommend()` and "
"supply new interactions using new_observation_data.")
users = _SArray([main_user_value], dtype = user_type)
else:
users = _SArray([self._implicit_user_name], dtype = user_type)
observed_items[user_id] = self._implicit_user_name
if observed_items[user_id].dtype != user_type:
observed_items[user_id] = observed_items[user_id].astype(user_type)
# Check the rest of the arguments.
if exclude is not None:
if isinstance(exclude, list):
exclude = _SArray(exclude, dtype = item_type)
if isinstance(exclude, _SArray):
exclude = _SFrame({item_id : exclude})
if user_id not in exclude.column_names():
exclude[user_id] = self._implicit_user_name
exclude[user_id] = exclude[user_id].astype(user_type)
recommendations = self.recommend(
users = users,
new_observation_data = observed_items,
k = k,
items = items,
new_user_data = new_user_data,
new_item_data = new_item_data,
exclude_known = exclude_known,
diversity = diversity,
random_seed = random_seed,
verbose = verbose)
del recommendations[user_id]
return recommendations | [
"def",
"recommend_from_interactions",
"(",
"self",
",",
"observed_items",
",",
"k",
"=",
"10",
",",
"exclude",
"=",
"None",
",",
"items",
"=",
"None",
",",
"new_user_data",
"=",
"None",
",",
"new_item_data",
"=",
"None",
",",
"exclude_known",
"=",
"True",
",",
"diversity",
"=",
"0",
",",
"random_seed",
"=",
"None",
",",
"verbose",
"=",
"True",
")",
":",
"column_types",
"=",
"self",
".",
"_get_data_schema",
"(",
")",
"user_id",
"=",
"self",
".",
"user_id",
"item_id",
"=",
"self",
".",
"item_id",
"user_type",
"=",
"column_types",
"[",
"user_id",
"]",
"item_type",
"=",
"column_types",
"[",
"item_id",
"]",
"if",
"not",
"hasattr",
"(",
"self",
",",
"\"_implicit_user_name\"",
")",
":",
"import",
"hashlib",
"import",
"time",
"self",
".",
"_implicit_user_name",
"=",
"None",
"#(\"implicit-user-%s\"",
"# % hashlib.md5(\"%0.20f\" % time.time()).hexdigest()[:12])",
"if",
"isinstance",
"(",
"observed_items",
",",
"list",
")",
":",
"observed_items",
"=",
"_SArray",
"(",
"observed_items",
",",
"dtype",
"=",
"item_type",
")",
"if",
"isinstance",
"(",
"observed_items",
",",
"_SArray",
")",
":",
"observed_items",
"=",
"_SFrame",
"(",
"{",
"self",
".",
"item_id",
":",
"observed_items",
"}",
")",
"if",
"not",
"isinstance",
"(",
"observed_items",
",",
"_SFrame",
")",
":",
"raise",
"TypeError",
"(",
"\"observed_items must be a list or SArray of items, or an SFrame of items \"",
"\"and optionally ratings or other interaction information.\"",
")",
"# Don't modify the user's argument (if it's an SFrame).",
"observed_items",
"=",
"observed_items",
".",
"copy",
"(",
")",
"# If a user id is present, then use that as the query user id",
"# (making sure there is only one present). If not, then use",
"# the local fake user id.",
"if",
"user_id",
"in",
"observed_items",
".",
"column_names",
"(",
")",
":",
"main_user_value",
"=",
"observed_items",
"[",
"user_id",
"]",
"[",
"0",
"]",
"if",
"(",
"observed_items",
"[",
"user_id",
"]",
"!=",
"main_user_value",
")",
".",
"any",
"(",
")",
":",
"raise",
"ValueError",
"(",
"\"To recommend items for more than one user, use `recommend()` and \"",
"\"supply new interactions using new_observation_data.\"",
")",
"users",
"=",
"_SArray",
"(",
"[",
"main_user_value",
"]",
",",
"dtype",
"=",
"user_type",
")",
"else",
":",
"users",
"=",
"_SArray",
"(",
"[",
"self",
".",
"_implicit_user_name",
"]",
",",
"dtype",
"=",
"user_type",
")",
"observed_items",
"[",
"user_id",
"]",
"=",
"self",
".",
"_implicit_user_name",
"if",
"observed_items",
"[",
"user_id",
"]",
".",
"dtype",
"!=",
"user_type",
":",
"observed_items",
"[",
"user_id",
"]",
"=",
"observed_items",
"[",
"user_id",
"]",
".",
"astype",
"(",
"user_type",
")",
"# Check the rest of the arguments.",
"if",
"exclude",
"is",
"not",
"None",
":",
"if",
"isinstance",
"(",
"exclude",
",",
"list",
")",
":",
"exclude",
"=",
"_SArray",
"(",
"exclude",
",",
"dtype",
"=",
"item_type",
")",
"if",
"isinstance",
"(",
"exclude",
",",
"_SArray",
")",
":",
"exclude",
"=",
"_SFrame",
"(",
"{",
"item_id",
":",
"exclude",
"}",
")",
"if",
"user_id",
"not",
"in",
"exclude",
".",
"column_names",
"(",
")",
":",
"exclude",
"[",
"user_id",
"]",
"=",
"self",
".",
"_implicit_user_name",
"exclude",
"[",
"user_id",
"]",
"=",
"exclude",
"[",
"user_id",
"]",
".",
"astype",
"(",
"user_type",
")",
"recommendations",
"=",
"self",
".",
"recommend",
"(",
"users",
"=",
"users",
",",
"new_observation_data",
"=",
"observed_items",
",",
"k",
"=",
"k",
",",
"items",
"=",
"items",
",",
"new_user_data",
"=",
"new_user_data",
",",
"new_item_data",
"=",
"new_item_data",
",",
"exclude_known",
"=",
"exclude_known",
",",
"diversity",
"=",
"diversity",
",",
"random_seed",
"=",
"random_seed",
",",
"verbose",
"=",
"verbose",
")",
"del",
"recommendations",
"[",
"user_id",
"]",
"return",
"recommendations"
] | Recommend the ``k`` highest scored items based on the
interactions given in `observed_items.`
Parameters
----------
observed_items : SArray, SFrame, or list
A list/SArray of items to use to make recommendations, or
an SFrame of items and optionally ratings and/or other
interaction data. The model will then recommend the most
similar items to those given. If ``observed_items`` has a user
column, then it must be only one user, and the additional
interaction data stored in the model is also used to make
recommendations.
k : int, optional
The number of recommendations to generate.
items : SArray, SFrame, or list, optional
Restricts the items from which recommendations can be
made. ``items`` must be an SArray, list, or SFrame with a
single column containing items, and all recommendations
will be made from this pool of items. This can be used,
for example, to restrict the recommendations to items
within a particular category or genre. By default,
recommendations are made from all items present when the
model was trained.
new_user_data : SFrame, optional
``new_user_data`` may give additional user data to the
model. If present, scoring is done with reference to this
new information. If there is any overlap with the side
information present at training time, then this new side
data is preferred. Must be in the same format as the user
data passed to ``create``.
new_item_data : SFrame, optional
``new_item_data`` may give additional item data to the
model. If present, scoring is done with reference to this
new information. If there is any overlap with the side
information present at training time, then this new side
data is preferred. Must be in the same format as the item
data passed to ``create``.
exclude : SFrame, optional
An :class:`~turicreate.SFrame` of items or user / item
pairs. The column names must be equal to the user and
item columns of the main data, and it provides the model
with user/item pairs to exclude from the recommendations.
These user-item-pairs are always excluded from the
predictions, even if exclude_known is False.
exclude_known : bool, optional
By default, all user-item interactions previously seen in
the training data, or in any new data provided using
new_observation_data.., are excluded from the
recommendations. Passing in ``exclude_known = False``
overrides this behavior.
diversity : non-negative float, optional
If given, then the recommend function attempts chooses a set
of `k` items that are both highly scored and different from
other items in that set. It does this by first retrieving
``k*(1+diversity)`` recommended items, then randomly
choosing a diverse set from these items. Suggested values
for diversity are between 1 and 3.
random_seed : int, optional
If diversity is larger than 0, then some randomness is used;
this controls the random seed to use for randomization. If
None, then it will be different each time.
verbose : bool, optional
If True, print the progress of generating recommendation.
Returns
-------
out : SFrame
A SFrame with the top ranked items for each user. The
columns are: ``item_id``, *score*, and *rank*, where
``user_id`` and ``item_id`` match the user and item column
names specified at training time. The rank column is
between 1 and ``k`` and gives the relative score of that
item. The value of score depends on the method used for
recommendations.
observed_items: list, SArray, or SFrame | [
"Recommend",
"the",
"k",
"highest",
"scored",
"items",
"based",
"on",
"the",
"interactions",
"given",
"in",
"observed_items",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/recommender/util.py#L1310-L1470 |
28,980 | apple/turicreate | src/unity/python/turicreate/toolkits/recommender/util.py | _Recommender.evaluate_precision_recall | def evaluate_precision_recall(self, dataset, cutoffs=list(range(1,11,1))+list(range(11,50,5)),
skip_set=None, exclude_known=True,
verbose=True, **kwargs):
"""
Compute a model's precision and recall scores for a particular dataset.
Parameters
----------
dataset : SFrame
An SFrame in the same format as the one used during training.
This will be compared to the model's recommendations, which exclude
the (user, item) pairs seen at training time.
cutoffs : list, optional
A list of cutoff values for which one wants to evaluate precision
and recall, i.e. the value of k in "precision at k".
skip_set : SFrame, optional
Passed to :meth:`recommend` as ``exclude``.
exclude_known : bool, optional
Passed to :meth:`recommend` as ``exclude_known``. If True, exclude
training item from recommendation.
verbose : bool, optional
Enables verbose output. Default is verbose.
**kwargs
Additional keyword arguments are passed to the recommend
function, whose returned recommendations are used for evaluating
precision and recall of the model.
Returns
-------
out : dict
Contains the precision and recall at each cutoff value and each
user in ``dataset``.
Examples
--------
>>> import turicreate as tc
>>> sf = tc.SFrame('https://static.turi.com/datasets/audioscrobbler')
>>> train, test = tc.recommender.util.random_split_by_user(sf)
>>> m = tc.recommender.create(train)
>>> m.evaluate_precision_recall(test)
See Also
--------
turicreate.recommender.util.precision_recall_by_user
"""
user_column = self.user_id
item_column = self.item_id
assert user_column in dataset.column_names() and \
item_column in dataset.column_names(), \
'Provided data set must have a column pertaining to user ids and \
item ids, similar to what we had during training.'
dataset = self.__prepare_dataset_parameter(dataset)
users = dataset[self.user_id].unique()
dataset = dataset[[self.user_id, self.item_id]]
recs = self.recommend(users=users, k=max(cutoffs), exclude=skip_set,
exclude_known=exclude_known,
verbose=verbose,
**kwargs)
precision_recall_by_user = self.__proxy__.precision_recall_by_user(dataset, recs, cutoffs)
ret = {'precision_recall_by_user': precision_recall_by_user}
pr_agg = precision_recall_by_user.groupby(
'cutoff',
operations={'precision' : _Aggregate.MEAN('precision'),
'recall' : _Aggregate.MEAN('recall')})
pr_agg = pr_agg[['cutoff', 'precision', 'recall']]
ret["precision_recall_overall"] = pr_agg.sort("cutoff")
return ret | python | def evaluate_precision_recall(self, dataset, cutoffs=list(range(1,11,1))+list(range(11,50,5)),
skip_set=None, exclude_known=True,
verbose=True, **kwargs):
"""
Compute a model's precision and recall scores for a particular dataset.
Parameters
----------
dataset : SFrame
An SFrame in the same format as the one used during training.
This will be compared to the model's recommendations, which exclude
the (user, item) pairs seen at training time.
cutoffs : list, optional
A list of cutoff values for which one wants to evaluate precision
and recall, i.e. the value of k in "precision at k".
skip_set : SFrame, optional
Passed to :meth:`recommend` as ``exclude``.
exclude_known : bool, optional
Passed to :meth:`recommend` as ``exclude_known``. If True, exclude
training item from recommendation.
verbose : bool, optional
Enables verbose output. Default is verbose.
**kwargs
Additional keyword arguments are passed to the recommend
function, whose returned recommendations are used for evaluating
precision and recall of the model.
Returns
-------
out : dict
Contains the precision and recall at each cutoff value and each
user in ``dataset``.
Examples
--------
>>> import turicreate as tc
>>> sf = tc.SFrame('https://static.turi.com/datasets/audioscrobbler')
>>> train, test = tc.recommender.util.random_split_by_user(sf)
>>> m = tc.recommender.create(train)
>>> m.evaluate_precision_recall(test)
See Also
--------
turicreate.recommender.util.precision_recall_by_user
"""
user_column = self.user_id
item_column = self.item_id
assert user_column in dataset.column_names() and \
item_column in dataset.column_names(), \
'Provided data set must have a column pertaining to user ids and \
item ids, similar to what we had during training.'
dataset = self.__prepare_dataset_parameter(dataset)
users = dataset[self.user_id].unique()
dataset = dataset[[self.user_id, self.item_id]]
recs = self.recommend(users=users, k=max(cutoffs), exclude=skip_set,
exclude_known=exclude_known,
verbose=verbose,
**kwargs)
precision_recall_by_user = self.__proxy__.precision_recall_by_user(dataset, recs, cutoffs)
ret = {'precision_recall_by_user': precision_recall_by_user}
pr_agg = precision_recall_by_user.groupby(
'cutoff',
operations={'precision' : _Aggregate.MEAN('precision'),
'recall' : _Aggregate.MEAN('recall')})
pr_agg = pr_agg[['cutoff', 'precision', 'recall']]
ret["precision_recall_overall"] = pr_agg.sort("cutoff")
return ret | [
"def",
"evaluate_precision_recall",
"(",
"self",
",",
"dataset",
",",
"cutoffs",
"=",
"list",
"(",
"range",
"(",
"1",
",",
"11",
",",
"1",
")",
")",
"+",
"list",
"(",
"range",
"(",
"11",
",",
"50",
",",
"5",
")",
")",
",",
"skip_set",
"=",
"None",
",",
"exclude_known",
"=",
"True",
",",
"verbose",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"user_column",
"=",
"self",
".",
"user_id",
"item_column",
"=",
"self",
".",
"item_id",
"assert",
"user_column",
"in",
"dataset",
".",
"column_names",
"(",
")",
"and",
"item_column",
"in",
"dataset",
".",
"column_names",
"(",
")",
",",
"'Provided data set must have a column pertaining to user ids and \\\n item ids, similar to what we had during training.'",
"dataset",
"=",
"self",
".",
"__prepare_dataset_parameter",
"(",
"dataset",
")",
"users",
"=",
"dataset",
"[",
"self",
".",
"user_id",
"]",
".",
"unique",
"(",
")",
"dataset",
"=",
"dataset",
"[",
"[",
"self",
".",
"user_id",
",",
"self",
".",
"item_id",
"]",
"]",
"recs",
"=",
"self",
".",
"recommend",
"(",
"users",
"=",
"users",
",",
"k",
"=",
"max",
"(",
"cutoffs",
")",
",",
"exclude",
"=",
"skip_set",
",",
"exclude_known",
"=",
"exclude_known",
",",
"verbose",
"=",
"verbose",
",",
"*",
"*",
"kwargs",
")",
"precision_recall_by_user",
"=",
"self",
".",
"__proxy__",
".",
"precision_recall_by_user",
"(",
"dataset",
",",
"recs",
",",
"cutoffs",
")",
"ret",
"=",
"{",
"'precision_recall_by_user'",
":",
"precision_recall_by_user",
"}",
"pr_agg",
"=",
"precision_recall_by_user",
".",
"groupby",
"(",
"'cutoff'",
",",
"operations",
"=",
"{",
"'precision'",
":",
"_Aggregate",
".",
"MEAN",
"(",
"'precision'",
")",
",",
"'recall'",
":",
"_Aggregate",
".",
"MEAN",
"(",
"'recall'",
")",
"}",
")",
"pr_agg",
"=",
"pr_agg",
"[",
"[",
"'cutoff'",
",",
"'precision'",
",",
"'recall'",
"]",
"]",
"ret",
"[",
"\"precision_recall_overall\"",
"]",
"=",
"pr_agg",
".",
"sort",
"(",
"\"cutoff\"",
")",
"return",
"ret"
] | Compute a model's precision and recall scores for a particular dataset.
Parameters
----------
dataset : SFrame
An SFrame in the same format as the one used during training.
This will be compared to the model's recommendations, which exclude
the (user, item) pairs seen at training time.
cutoffs : list, optional
A list of cutoff values for which one wants to evaluate precision
and recall, i.e. the value of k in "precision at k".
skip_set : SFrame, optional
Passed to :meth:`recommend` as ``exclude``.
exclude_known : bool, optional
Passed to :meth:`recommend` as ``exclude_known``. If True, exclude
training item from recommendation.
verbose : bool, optional
Enables verbose output. Default is verbose.
**kwargs
Additional keyword arguments are passed to the recommend
function, whose returned recommendations are used for evaluating
precision and recall of the model.
Returns
-------
out : dict
Contains the precision and recall at each cutoff value and each
user in ``dataset``.
Examples
--------
>>> import turicreate as tc
>>> sf = tc.SFrame('https://static.turi.com/datasets/audioscrobbler')
>>> train, test = tc.recommender.util.random_split_by_user(sf)
>>> m = tc.recommender.create(train)
>>> m.evaluate_precision_recall(test)
See Also
--------
turicreate.recommender.util.precision_recall_by_user | [
"Compute",
"a",
"model",
"s",
"precision",
"and",
"recall",
"scores",
"for",
"a",
"particular",
"dataset",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/recommender/util.py#L1492-L1574 |
28,981 | apple/turicreate | src/unity/python/turicreate/toolkits/recommender/util.py | _Recommender.evaluate_rmse | def evaluate_rmse(self, dataset, target):
"""
Evaluate the prediction error for each user-item pair in the given data
set.
Parameters
----------
dataset : SFrame
An SFrame in the same format as the one used during training.
target : str
The name of the target rating column in `dataset`.
Returns
-------
out : dict
A dictionary with three items: 'rmse_by_user' and 'rmse_by_item',
which are SFrames containing the average rmse for each user and
item, respectively; and 'rmse_overall', which is a float.
Examples
--------
>>> import turicreate as tc
>>> sf = tc.SFrame('https://static.turi.com/datasets/audioscrobbler')
>>> train, test = tc.recommender.util.random_split_by_user(sf)
>>> m = tc.recommender.create(train, target='target')
>>> m.evaluate_rmse(test, target='target')
See Also
--------
turicreate.evaluation.rmse
"""
assert target in dataset.column_names(), \
'Provided dataset must contain a target column with the same \
name as the target used during training.'
y = dataset[target]
yhat = self.predict(dataset)
user_column = self.user_id
item_column = self.item_id
assert user_column in dataset.column_names() and \
item_column in dataset.column_names(), \
'Provided data set must have a column pertaining to user ids and \
item ids, similar to what we had during training.'
result = dataset[[user_column, item_column]]
result['sq_error'] = (y - yhat) * (y - yhat)
rmse_by_user = result.groupby(user_column,
{'rmse':_turicreate.aggregate.AVG('sq_error'),
'count':_turicreate.aggregate.COUNT})
rmse_by_user['rmse'] = rmse_by_user['rmse'].apply(lambda x: x**.5)
rmse_by_item = result.groupby(item_column,
{'rmse':_turicreate.aggregate.AVG('sq_error'),
'count':_turicreate.aggregate.COUNT})
rmse_by_item['rmse'] = rmse_by_item['rmse'].apply(lambda x: x**.5)
overall_rmse = result['sq_error'].mean() ** .5
return {'rmse_by_user': rmse_by_user,
'rmse_by_item': rmse_by_item,
'rmse_overall': overall_rmse} | python | def evaluate_rmse(self, dataset, target):
"""
Evaluate the prediction error for each user-item pair in the given data
set.
Parameters
----------
dataset : SFrame
An SFrame in the same format as the one used during training.
target : str
The name of the target rating column in `dataset`.
Returns
-------
out : dict
A dictionary with three items: 'rmse_by_user' and 'rmse_by_item',
which are SFrames containing the average rmse for each user and
item, respectively; and 'rmse_overall', which is a float.
Examples
--------
>>> import turicreate as tc
>>> sf = tc.SFrame('https://static.turi.com/datasets/audioscrobbler')
>>> train, test = tc.recommender.util.random_split_by_user(sf)
>>> m = tc.recommender.create(train, target='target')
>>> m.evaluate_rmse(test, target='target')
See Also
--------
turicreate.evaluation.rmse
"""
assert target in dataset.column_names(), \
'Provided dataset must contain a target column with the same \
name as the target used during training.'
y = dataset[target]
yhat = self.predict(dataset)
user_column = self.user_id
item_column = self.item_id
assert user_column in dataset.column_names() and \
item_column in dataset.column_names(), \
'Provided data set must have a column pertaining to user ids and \
item ids, similar to what we had during training.'
result = dataset[[user_column, item_column]]
result['sq_error'] = (y - yhat) * (y - yhat)
rmse_by_user = result.groupby(user_column,
{'rmse':_turicreate.aggregate.AVG('sq_error'),
'count':_turicreate.aggregate.COUNT})
rmse_by_user['rmse'] = rmse_by_user['rmse'].apply(lambda x: x**.5)
rmse_by_item = result.groupby(item_column,
{'rmse':_turicreate.aggregate.AVG('sq_error'),
'count':_turicreate.aggregate.COUNT})
rmse_by_item['rmse'] = rmse_by_item['rmse'].apply(lambda x: x**.5)
overall_rmse = result['sq_error'].mean() ** .5
return {'rmse_by_user': rmse_by_user,
'rmse_by_item': rmse_by_item,
'rmse_overall': overall_rmse} | [
"def",
"evaluate_rmse",
"(",
"self",
",",
"dataset",
",",
"target",
")",
":",
"assert",
"target",
"in",
"dataset",
".",
"column_names",
"(",
")",
",",
"'Provided dataset must contain a target column with the same \\\n name as the target used during training.'",
"y",
"=",
"dataset",
"[",
"target",
"]",
"yhat",
"=",
"self",
".",
"predict",
"(",
"dataset",
")",
"user_column",
"=",
"self",
".",
"user_id",
"item_column",
"=",
"self",
".",
"item_id",
"assert",
"user_column",
"in",
"dataset",
".",
"column_names",
"(",
")",
"and",
"item_column",
"in",
"dataset",
".",
"column_names",
"(",
")",
",",
"'Provided data set must have a column pertaining to user ids and \\\n item ids, similar to what we had during training.'",
"result",
"=",
"dataset",
"[",
"[",
"user_column",
",",
"item_column",
"]",
"]",
"result",
"[",
"'sq_error'",
"]",
"=",
"(",
"y",
"-",
"yhat",
")",
"*",
"(",
"y",
"-",
"yhat",
")",
"rmse_by_user",
"=",
"result",
".",
"groupby",
"(",
"user_column",
",",
"{",
"'rmse'",
":",
"_turicreate",
".",
"aggregate",
".",
"AVG",
"(",
"'sq_error'",
")",
",",
"'count'",
":",
"_turicreate",
".",
"aggregate",
".",
"COUNT",
"}",
")",
"rmse_by_user",
"[",
"'rmse'",
"]",
"=",
"rmse_by_user",
"[",
"'rmse'",
"]",
".",
"apply",
"(",
"lambda",
"x",
":",
"x",
"**",
".5",
")",
"rmse_by_item",
"=",
"result",
".",
"groupby",
"(",
"item_column",
",",
"{",
"'rmse'",
":",
"_turicreate",
".",
"aggregate",
".",
"AVG",
"(",
"'sq_error'",
")",
",",
"'count'",
":",
"_turicreate",
".",
"aggregate",
".",
"COUNT",
"}",
")",
"rmse_by_item",
"[",
"'rmse'",
"]",
"=",
"rmse_by_item",
"[",
"'rmse'",
"]",
".",
"apply",
"(",
"lambda",
"x",
":",
"x",
"**",
".5",
")",
"overall_rmse",
"=",
"result",
"[",
"'sq_error'",
"]",
".",
"mean",
"(",
")",
"**",
".5",
"return",
"{",
"'rmse_by_user'",
":",
"rmse_by_user",
",",
"'rmse_by_item'",
":",
"rmse_by_item",
",",
"'rmse_overall'",
":",
"overall_rmse",
"}"
] | Evaluate the prediction error for each user-item pair in the given data
set.
Parameters
----------
dataset : SFrame
An SFrame in the same format as the one used during training.
target : str
The name of the target rating column in `dataset`.
Returns
-------
out : dict
A dictionary with three items: 'rmse_by_user' and 'rmse_by_item',
which are SFrames containing the average rmse for each user and
item, respectively; and 'rmse_overall', which is a float.
Examples
--------
>>> import turicreate as tc
>>> sf = tc.SFrame('https://static.turi.com/datasets/audioscrobbler')
>>> train, test = tc.recommender.util.random_split_by_user(sf)
>>> m = tc.recommender.create(train, target='target')
>>> m.evaluate_rmse(test, target='target')
See Also
--------
turicreate.evaluation.rmse | [
"Evaluate",
"the",
"prediction",
"error",
"for",
"each",
"user",
"-",
"item",
"pair",
"in",
"the",
"given",
"data",
"set",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/recommender/util.py#L1576-L1635 |
28,982 | apple/turicreate | src/unity/python/turicreate/toolkits/recommender/util.py | _Recommender.evaluate | def evaluate(self, dataset, metric='auto',
exclude_known_for_precision_recall=True,
target=None,
verbose=True, **kwargs):
r"""
Evaluate the model's ability to make rating predictions or
recommendations.
If the model is trained to predict a particular target, the
default metric used for model comparison is root-mean-squared error
(RMSE). Suppose :math:`y` and :math:`\widehat{y}` are vectors of length
:math:`N`, where :math:`y` contains the actual ratings and
:math:`\widehat{y}` the predicted ratings. Then the RMSE is defined as
.. math::
RMSE = \sqrt{\frac{1}{N} \sum_{i=1}^N (\widehat{y}_i - y_i)^2} .
If the model was not trained on a target column, the default metrics for
model comparison are precision and recall. Let
:math:`p_k` be a vector of the :math:`k` highest ranked recommendations
for a particular user, and let :math:`a` be the set of items for that
user in the groundtruth `dataset`. The "precision at cutoff k" is
defined as
.. math:: P(k) = \frac{ | a \cap p_k | }{k}
while "recall at cutoff k" is defined as
.. math:: R(k) = \frac{ | a \cap p_k | }{|a|}
Parameters
----------
dataset : SFrame
An SFrame that is in the same format as provided for training.
metric : str, {'auto', 'rmse', 'precision_recall'}, optional
Metric to use for evaluation. The default automatically chooses
'rmse' for models trained with a `target`, and 'precision_recall'
otherwise.
exclude_known_for_precision_recall : bool, optional
A useful option for evaluating precision-recall. Recommender models
have the option to exclude items seen in the training data from the
final recommendation list. Set this option to True when evaluating
on test data, and False when evaluating precision-recall on training
data.
target : str, optional
The name of the target column for evaluating rmse. If the model is
trained with a target column, the default is to using the same
column. If the model is trained without a target column and `metric`
is set to 'rmse', this option must provided by user.
verbose : bool, optional
Enables verbose output. Default is verbose.
**kwargs
When `metric` is set to 'precision_recall', these parameters
are passed on to :meth:`evaluate_precision_recall`.
Returns
-------
out : SFrame or dict
Results from the model evaluation procedure. If the model is trained
on a target (i.e. RMSE is the evaluation criterion), a dictionary
with three items is returned: items *rmse_by_user* and
*rmse_by_item* are SFrames with per-user and per-item RMSE, while
*rmse_overall* is the overall RMSE (a float). If the model is
trained without a target (i.e. precision and recall are the
evaluation criteria) an :py:class:`~turicreate.SFrame` is returned
with both of these metrics for each user at several cutoff values.
Examples
--------
>>> import turicreate as tc
>>> sf = tc.SFrame('https://static.turi.com/datasets/audioscrobbler')
>>> train, test = tc.recommender.util.random_split_by_user(sf)
>>> m = tc.recommender.create(train, target='target')
>>> eval = m.evaluate(test)
See Also
--------
evaluate_precision_recall, evaluate_rmse, precision_recall_by_user
"""
ret = {}
dataset = self.__prepare_dataset_parameter(dataset)
# If the model does not have a target column, compute prec-recall.
if metric in ['precision_recall', 'auto']:
results = self.evaluate_precision_recall(dataset,
exclude_known=exclude_known_for_precision_recall,
verbose=verbose,
**kwargs)
ret.update(results)
if verbose:
print("\nPrecision and recall summary statistics by cutoff")
print(results['precision_recall_by_user'].groupby('cutoff', \
{'mean_precision': _turicreate.aggregate.AVG('precision'),
'mean_recall': _turicreate.aggregate.AVG('recall')}).topk('cutoff', reverse=True))
if metric in ['rmse', 'auto']:
if target is None:
target = self.target
if target is None or target == "":
_logging.warning("Model trained without a target. Skipping RMSE computation.")
else:
results = self.evaluate_rmse(dataset, target)
ret.update(results)
if verbose:
print("\nOverall RMSE:", results['rmse_overall'])
print("\nPer User RMSE (best)")
print(results['rmse_by_user'].topk('rmse', 1, reverse=True))
print("\nPer User RMSE (worst)")
print(results['rmse_by_user'].topk('rmse', 1))
print("\nPer Item RMSE (best)")
print(results['rmse_by_item'].topk('rmse', 1, reverse=True))
print("\nPer Item RMSE (worst)")
print(results['rmse_by_item'].topk('rmse', 1))
if metric not in ['rmse', 'precision_recall', 'auto']:
raise ValueError('Unknown evaluation metric %s, supported metrics are [\"rmse\", \"precision_recall\"]' % metric)
return ret | python | def evaluate(self, dataset, metric='auto',
exclude_known_for_precision_recall=True,
target=None,
verbose=True, **kwargs):
r"""
Evaluate the model's ability to make rating predictions or
recommendations.
If the model is trained to predict a particular target, the
default metric used for model comparison is root-mean-squared error
(RMSE). Suppose :math:`y` and :math:`\widehat{y}` are vectors of length
:math:`N`, where :math:`y` contains the actual ratings and
:math:`\widehat{y}` the predicted ratings. Then the RMSE is defined as
.. math::
RMSE = \sqrt{\frac{1}{N} \sum_{i=1}^N (\widehat{y}_i - y_i)^2} .
If the model was not trained on a target column, the default metrics for
model comparison are precision and recall. Let
:math:`p_k` be a vector of the :math:`k` highest ranked recommendations
for a particular user, and let :math:`a` be the set of items for that
user in the groundtruth `dataset`. The "precision at cutoff k" is
defined as
.. math:: P(k) = \frac{ | a \cap p_k | }{k}
while "recall at cutoff k" is defined as
.. math:: R(k) = \frac{ | a \cap p_k | }{|a|}
Parameters
----------
dataset : SFrame
An SFrame that is in the same format as provided for training.
metric : str, {'auto', 'rmse', 'precision_recall'}, optional
Metric to use for evaluation. The default automatically chooses
'rmse' for models trained with a `target`, and 'precision_recall'
otherwise.
exclude_known_for_precision_recall : bool, optional
A useful option for evaluating precision-recall. Recommender models
have the option to exclude items seen in the training data from the
final recommendation list. Set this option to True when evaluating
on test data, and False when evaluating precision-recall on training
data.
target : str, optional
The name of the target column for evaluating rmse. If the model is
trained with a target column, the default is to using the same
column. If the model is trained without a target column and `metric`
is set to 'rmse', this option must provided by user.
verbose : bool, optional
Enables verbose output. Default is verbose.
**kwargs
When `metric` is set to 'precision_recall', these parameters
are passed on to :meth:`evaluate_precision_recall`.
Returns
-------
out : SFrame or dict
Results from the model evaluation procedure. If the model is trained
on a target (i.e. RMSE is the evaluation criterion), a dictionary
with three items is returned: items *rmse_by_user* and
*rmse_by_item* are SFrames with per-user and per-item RMSE, while
*rmse_overall* is the overall RMSE (a float). If the model is
trained without a target (i.e. precision and recall are the
evaluation criteria) an :py:class:`~turicreate.SFrame` is returned
with both of these metrics for each user at several cutoff values.
Examples
--------
>>> import turicreate as tc
>>> sf = tc.SFrame('https://static.turi.com/datasets/audioscrobbler')
>>> train, test = tc.recommender.util.random_split_by_user(sf)
>>> m = tc.recommender.create(train, target='target')
>>> eval = m.evaluate(test)
See Also
--------
evaluate_precision_recall, evaluate_rmse, precision_recall_by_user
"""
ret = {}
dataset = self.__prepare_dataset_parameter(dataset)
# If the model does not have a target column, compute prec-recall.
if metric in ['precision_recall', 'auto']:
results = self.evaluate_precision_recall(dataset,
exclude_known=exclude_known_for_precision_recall,
verbose=verbose,
**kwargs)
ret.update(results)
if verbose:
print("\nPrecision and recall summary statistics by cutoff")
print(results['precision_recall_by_user'].groupby('cutoff', \
{'mean_precision': _turicreate.aggregate.AVG('precision'),
'mean_recall': _turicreate.aggregate.AVG('recall')}).topk('cutoff', reverse=True))
if metric in ['rmse', 'auto']:
if target is None:
target = self.target
if target is None or target == "":
_logging.warning("Model trained without a target. Skipping RMSE computation.")
else:
results = self.evaluate_rmse(dataset, target)
ret.update(results)
if verbose:
print("\nOverall RMSE:", results['rmse_overall'])
print("\nPer User RMSE (best)")
print(results['rmse_by_user'].topk('rmse', 1, reverse=True))
print("\nPer User RMSE (worst)")
print(results['rmse_by_user'].topk('rmse', 1))
print("\nPer Item RMSE (best)")
print(results['rmse_by_item'].topk('rmse', 1, reverse=True))
print("\nPer Item RMSE (worst)")
print(results['rmse_by_item'].topk('rmse', 1))
if metric not in ['rmse', 'precision_recall', 'auto']:
raise ValueError('Unknown evaluation metric %s, supported metrics are [\"rmse\", \"precision_recall\"]' % metric)
return ret | [
"def",
"evaluate",
"(",
"self",
",",
"dataset",
",",
"metric",
"=",
"'auto'",
",",
"exclude_known_for_precision_recall",
"=",
"True",
",",
"target",
"=",
"None",
",",
"verbose",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"ret",
"=",
"{",
"}",
"dataset",
"=",
"self",
".",
"__prepare_dataset_parameter",
"(",
"dataset",
")",
"# If the model does not have a target column, compute prec-recall.",
"if",
"metric",
"in",
"[",
"'precision_recall'",
",",
"'auto'",
"]",
":",
"results",
"=",
"self",
".",
"evaluate_precision_recall",
"(",
"dataset",
",",
"exclude_known",
"=",
"exclude_known_for_precision_recall",
",",
"verbose",
"=",
"verbose",
",",
"*",
"*",
"kwargs",
")",
"ret",
".",
"update",
"(",
"results",
")",
"if",
"verbose",
":",
"print",
"(",
"\"\\nPrecision and recall summary statistics by cutoff\"",
")",
"print",
"(",
"results",
"[",
"'precision_recall_by_user'",
"]",
".",
"groupby",
"(",
"'cutoff'",
",",
"{",
"'mean_precision'",
":",
"_turicreate",
".",
"aggregate",
".",
"AVG",
"(",
"'precision'",
")",
",",
"'mean_recall'",
":",
"_turicreate",
".",
"aggregate",
".",
"AVG",
"(",
"'recall'",
")",
"}",
")",
".",
"topk",
"(",
"'cutoff'",
",",
"reverse",
"=",
"True",
")",
")",
"if",
"metric",
"in",
"[",
"'rmse'",
",",
"'auto'",
"]",
":",
"if",
"target",
"is",
"None",
":",
"target",
"=",
"self",
".",
"target",
"if",
"target",
"is",
"None",
"or",
"target",
"==",
"\"\"",
":",
"_logging",
".",
"warning",
"(",
"\"Model trained without a target. Skipping RMSE computation.\"",
")",
"else",
":",
"results",
"=",
"self",
".",
"evaluate_rmse",
"(",
"dataset",
",",
"target",
")",
"ret",
".",
"update",
"(",
"results",
")",
"if",
"verbose",
":",
"print",
"(",
"\"\\nOverall RMSE:\"",
",",
"results",
"[",
"'rmse_overall'",
"]",
")",
"print",
"(",
"\"\\nPer User RMSE (best)\"",
")",
"print",
"(",
"results",
"[",
"'rmse_by_user'",
"]",
".",
"topk",
"(",
"'rmse'",
",",
"1",
",",
"reverse",
"=",
"True",
")",
")",
"print",
"(",
"\"\\nPer User RMSE (worst)\"",
")",
"print",
"(",
"results",
"[",
"'rmse_by_user'",
"]",
".",
"topk",
"(",
"'rmse'",
",",
"1",
")",
")",
"print",
"(",
"\"\\nPer Item RMSE (best)\"",
")",
"print",
"(",
"results",
"[",
"'rmse_by_item'",
"]",
".",
"topk",
"(",
"'rmse'",
",",
"1",
",",
"reverse",
"=",
"True",
")",
")",
"print",
"(",
"\"\\nPer Item RMSE (worst)\"",
")",
"print",
"(",
"results",
"[",
"'rmse_by_item'",
"]",
".",
"topk",
"(",
"'rmse'",
",",
"1",
")",
")",
"if",
"metric",
"not",
"in",
"[",
"'rmse'",
",",
"'precision_recall'",
",",
"'auto'",
"]",
":",
"raise",
"ValueError",
"(",
"'Unknown evaluation metric %s, supported metrics are [\\\"rmse\\\", \\\"precision_recall\\\"]'",
"%",
"metric",
")",
"return",
"ret"
] | r"""
Evaluate the model's ability to make rating predictions or
recommendations.
If the model is trained to predict a particular target, the
default metric used for model comparison is root-mean-squared error
(RMSE). Suppose :math:`y` and :math:`\widehat{y}` are vectors of length
:math:`N`, where :math:`y` contains the actual ratings and
:math:`\widehat{y}` the predicted ratings. Then the RMSE is defined as
.. math::
RMSE = \sqrt{\frac{1}{N} \sum_{i=1}^N (\widehat{y}_i - y_i)^2} .
If the model was not trained on a target column, the default metrics for
model comparison are precision and recall. Let
:math:`p_k` be a vector of the :math:`k` highest ranked recommendations
for a particular user, and let :math:`a` be the set of items for that
user in the groundtruth `dataset`. The "precision at cutoff k" is
defined as
.. math:: P(k) = \frac{ | a \cap p_k | }{k}
while "recall at cutoff k" is defined as
.. math:: R(k) = \frac{ | a \cap p_k | }{|a|}
Parameters
----------
dataset : SFrame
An SFrame that is in the same format as provided for training.
metric : str, {'auto', 'rmse', 'precision_recall'}, optional
Metric to use for evaluation. The default automatically chooses
'rmse' for models trained with a `target`, and 'precision_recall'
otherwise.
exclude_known_for_precision_recall : bool, optional
A useful option for evaluating precision-recall. Recommender models
have the option to exclude items seen in the training data from the
final recommendation list. Set this option to True when evaluating
on test data, and False when evaluating precision-recall on training
data.
target : str, optional
The name of the target column for evaluating rmse. If the model is
trained with a target column, the default is to using the same
column. If the model is trained without a target column and `metric`
is set to 'rmse', this option must provided by user.
verbose : bool, optional
Enables verbose output. Default is verbose.
**kwargs
When `metric` is set to 'precision_recall', these parameters
are passed on to :meth:`evaluate_precision_recall`.
Returns
-------
out : SFrame or dict
Results from the model evaluation procedure. If the model is trained
on a target (i.e. RMSE is the evaluation criterion), a dictionary
with three items is returned: items *rmse_by_user* and
*rmse_by_item* are SFrames with per-user and per-item RMSE, while
*rmse_overall* is the overall RMSE (a float). If the model is
trained without a target (i.e. precision and recall are the
evaluation criteria) an :py:class:`~turicreate.SFrame` is returned
with both of these metrics for each user at several cutoff values.
Examples
--------
>>> import turicreate as tc
>>> sf = tc.SFrame('https://static.turi.com/datasets/audioscrobbler')
>>> train, test = tc.recommender.util.random_split_by_user(sf)
>>> m = tc.recommender.create(train, target='target')
>>> eval = m.evaluate(test)
See Also
--------
evaluate_precision_recall, evaluate_rmse, precision_recall_by_user | [
"r",
"Evaluate",
"the",
"model",
"s",
"ability",
"to",
"make",
"rating",
"predictions",
"or",
"recommendations",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/recommender/util.py#L1637-L1761 |
28,983 | apple/turicreate | src/unity/python/turicreate/toolkits/recommender/util.py | _Recommender._get_popularity_baseline | def _get_popularity_baseline(self):
"""
Returns a new popularity model matching the data set this model was
trained with. Can be used for comparison purposes.
"""
response = self.__proxy__.get_popularity_baseline()
from .popularity_recommender import PopularityRecommender
return PopularityRecommender(response) | python | def _get_popularity_baseline(self):
"""
Returns a new popularity model matching the data set this model was
trained with. Can be used for comparison purposes.
"""
response = self.__proxy__.get_popularity_baseline()
from .popularity_recommender import PopularityRecommender
return PopularityRecommender(response) | [
"def",
"_get_popularity_baseline",
"(",
"self",
")",
":",
"response",
"=",
"self",
".",
"__proxy__",
".",
"get_popularity_baseline",
"(",
")",
"from",
".",
"popularity_recommender",
"import",
"PopularityRecommender",
"return",
"PopularityRecommender",
"(",
"response",
")"
] | Returns a new popularity model matching the data set this model was
trained with. Can be used for comparison purposes. | [
"Returns",
"a",
"new",
"popularity",
"model",
"matching",
"the",
"data",
"set",
"this",
"model",
"was",
"trained",
"with",
".",
"Can",
"be",
"used",
"for",
"comparison",
"purposes",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/recommender/util.py#L1763-L1772 |
28,984 | apple/turicreate | src/unity/python/turicreate/toolkits/recommender/util.py | _Recommender._get_item_intersection_info | def _get_item_intersection_info(self, item_pairs):
"""
For a collection of item -> item pairs, returns information about the
users in that intersection.
Parameters
----------
item_pairs : 2-column SFrame of two item columns, or a list of
(item_1, item_2) tuples.
Returns
-------
out : SFrame
A SFrame with the two item columns given above, the number of
users that rated each, and a dictionary mapping the user to a
pair of the ratings, with the first rating being the rating of
the first item and the second being the rating of the second item.
If no ratings are provided, these values are always 1.0.
"""
if type(item_pairs) is list:
if not all(type(t) in [list, tuple] and len(t) == 2 for t in item_pairs):
raise TypeError("item_pairs must be 2-column SFrame of two item "
"columns, or a list of (item_1, item_2) tuples. ")
item_name = self.item_id
item_pairs = _turicreate.SFrame({item_name + "_1" : [v1 for v1, v2 in item_pairs],
item_name + "_2" : [v2 for v1, v2 in item_pairs]})
if not isinstance(item_pairs, _turicreate.SFrame):
raise TypeError("item_pairs must be 2-column SFrame of two item "
"columns, or a list of (item_1, item_2) tuples. ")
response = self.__proxy__.get_item_intersection_info(item_pairs)
return response | python | def _get_item_intersection_info(self, item_pairs):
"""
For a collection of item -> item pairs, returns information about the
users in that intersection.
Parameters
----------
item_pairs : 2-column SFrame of two item columns, or a list of
(item_1, item_2) tuples.
Returns
-------
out : SFrame
A SFrame with the two item columns given above, the number of
users that rated each, and a dictionary mapping the user to a
pair of the ratings, with the first rating being the rating of
the first item and the second being the rating of the second item.
If no ratings are provided, these values are always 1.0.
"""
if type(item_pairs) is list:
if not all(type(t) in [list, tuple] and len(t) == 2 for t in item_pairs):
raise TypeError("item_pairs must be 2-column SFrame of two item "
"columns, or a list of (item_1, item_2) tuples. ")
item_name = self.item_id
item_pairs = _turicreate.SFrame({item_name + "_1" : [v1 for v1, v2 in item_pairs],
item_name + "_2" : [v2 for v1, v2 in item_pairs]})
if not isinstance(item_pairs, _turicreate.SFrame):
raise TypeError("item_pairs must be 2-column SFrame of two item "
"columns, or a list of (item_1, item_2) tuples. ")
response = self.__proxy__.get_item_intersection_info(item_pairs)
return response | [
"def",
"_get_item_intersection_info",
"(",
"self",
",",
"item_pairs",
")",
":",
"if",
"type",
"(",
"item_pairs",
")",
"is",
"list",
":",
"if",
"not",
"all",
"(",
"type",
"(",
"t",
")",
"in",
"[",
"list",
",",
"tuple",
"]",
"and",
"len",
"(",
"t",
")",
"==",
"2",
"for",
"t",
"in",
"item_pairs",
")",
":",
"raise",
"TypeError",
"(",
"\"item_pairs must be 2-column SFrame of two item \"",
"\"columns, or a list of (item_1, item_2) tuples. \"",
")",
"item_name",
"=",
"self",
".",
"item_id",
"item_pairs",
"=",
"_turicreate",
".",
"SFrame",
"(",
"{",
"item_name",
"+",
"\"_1\"",
":",
"[",
"v1",
"for",
"v1",
",",
"v2",
"in",
"item_pairs",
"]",
",",
"item_name",
"+",
"\"_2\"",
":",
"[",
"v2",
"for",
"v1",
",",
"v2",
"in",
"item_pairs",
"]",
"}",
")",
"if",
"not",
"isinstance",
"(",
"item_pairs",
",",
"_turicreate",
".",
"SFrame",
")",
":",
"raise",
"TypeError",
"(",
"\"item_pairs must be 2-column SFrame of two item \"",
"\"columns, or a list of (item_1, item_2) tuples. \"",
")",
"response",
"=",
"self",
".",
"__proxy__",
".",
"get_item_intersection_info",
"(",
"item_pairs",
")",
"return",
"response"
] | For a collection of item -> item pairs, returns information about the
users in that intersection.
Parameters
----------
item_pairs : 2-column SFrame of two item columns, or a list of
(item_1, item_2) tuples.
Returns
-------
out : SFrame
A SFrame with the two item columns given above, the number of
users that rated each, and a dictionary mapping the user to a
pair of the ratings, with the first rating being the rating of
the first item and the second being the rating of the second item.
If no ratings are provided, these values are always 1.0. | [
"For",
"a",
"collection",
"of",
"item",
"-",
">",
"item",
"pairs",
"returns",
"information",
"about",
"the",
"users",
"in",
"that",
"intersection",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/recommender/util.py#L1774-L1809 |
28,985 | apple/turicreate | deps/src/boost_1_68_0/libs/predef/tools/ci/common.py | utils.query_boost_version | def query_boost_version(boost_root):
'''
Read in the Boost version from a given boost_root.
'''
boost_version = None
if os.path.exists(os.path.join(boost_root,'Jamroot')):
with codecs.open(os.path.join(boost_root,'Jamroot'), 'r', 'utf-8') as f:
for line in f.readlines():
parts = line.split()
if len(parts) >= 5 and parts[1] == 'BOOST_VERSION':
boost_version = parts[3]
break
if not boost_version:
boost_version = 'default'
return boost_version | python | def query_boost_version(boost_root):
'''
Read in the Boost version from a given boost_root.
'''
boost_version = None
if os.path.exists(os.path.join(boost_root,'Jamroot')):
with codecs.open(os.path.join(boost_root,'Jamroot'), 'r', 'utf-8') as f:
for line in f.readlines():
parts = line.split()
if len(parts) >= 5 and parts[1] == 'BOOST_VERSION':
boost_version = parts[3]
break
if not boost_version:
boost_version = 'default'
return boost_version | [
"def",
"query_boost_version",
"(",
"boost_root",
")",
":",
"boost_version",
"=",
"None",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"os",
".",
"path",
".",
"join",
"(",
"boost_root",
",",
"'Jamroot'",
")",
")",
":",
"with",
"codecs",
".",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"boost_root",
",",
"'Jamroot'",
")",
",",
"'r'",
",",
"'utf-8'",
")",
"as",
"f",
":",
"for",
"line",
"in",
"f",
".",
"readlines",
"(",
")",
":",
"parts",
"=",
"line",
".",
"split",
"(",
")",
"if",
"len",
"(",
"parts",
")",
">=",
"5",
"and",
"parts",
"[",
"1",
"]",
"==",
"'BOOST_VERSION'",
":",
"boost_version",
"=",
"parts",
"[",
"3",
"]",
"break",
"if",
"not",
"boost_version",
":",
"boost_version",
"=",
"'default'",
"return",
"boost_version"
] | Read in the Boost version from a given boost_root. | [
"Read",
"in",
"the",
"Boost",
"version",
"from",
"a",
"given",
"boost_root",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/libs/predef/tools/ci/common.py#L421-L435 |
28,986 | apple/turicreate | deps/src/boost_1_68_0/libs/predef/tools/ci/common.py | utils.git_clone | def git_clone(sub_repo, branch, commit = None, cwd = None, no_submodules = False):
'''
This clone mimicks the way Travis-CI clones a project's repo. So far
Travis-CI is the most limiting in the sense of only fetching partial
history of the repo.
'''
if not cwd:
cwd = cwd = os.getcwd()
root_dir = os.path.join(cwd,'boostorg',sub_repo)
if not os.path.exists(os.path.join(root_dir,'.git')):
utils.check_call("git","clone",
"--depth=1",
"--branch=%s"%(branch),
"https://github.com/boostorg/%s.git"%(sub_repo),
root_dir)
os.chdir(root_dir)
else:
os.chdir(root_dir)
utils.check_call("git","pull",
# "--depth=1", # Can't do depth as we get merge errors.
"--quiet","--no-recurse-submodules")
if commit:
utils.check_call("git","checkout","-qf",commit)
if os.path.exists(os.path.join('.git','modules')):
if sys.platform == 'win32':
utils.check_call('dir',os.path.join('.git','modules'))
else:
utils.check_call('ls','-la',os.path.join('.git','modules'))
if not no_submodules:
utils.check_call("git","submodule","--quiet","update",
"--quiet","--init","--recursive",
)
utils.check_call("git","submodule","--quiet","foreach","git","fetch")
return root_dir | python | def git_clone(sub_repo, branch, commit = None, cwd = None, no_submodules = False):
'''
This clone mimicks the way Travis-CI clones a project's repo. So far
Travis-CI is the most limiting in the sense of only fetching partial
history of the repo.
'''
if not cwd:
cwd = cwd = os.getcwd()
root_dir = os.path.join(cwd,'boostorg',sub_repo)
if not os.path.exists(os.path.join(root_dir,'.git')):
utils.check_call("git","clone",
"--depth=1",
"--branch=%s"%(branch),
"https://github.com/boostorg/%s.git"%(sub_repo),
root_dir)
os.chdir(root_dir)
else:
os.chdir(root_dir)
utils.check_call("git","pull",
# "--depth=1", # Can't do depth as we get merge errors.
"--quiet","--no-recurse-submodules")
if commit:
utils.check_call("git","checkout","-qf",commit)
if os.path.exists(os.path.join('.git','modules')):
if sys.platform == 'win32':
utils.check_call('dir',os.path.join('.git','modules'))
else:
utils.check_call('ls','-la',os.path.join('.git','modules'))
if not no_submodules:
utils.check_call("git","submodule","--quiet","update",
"--quiet","--init","--recursive",
)
utils.check_call("git","submodule","--quiet","foreach","git","fetch")
return root_dir | [
"def",
"git_clone",
"(",
"sub_repo",
",",
"branch",
",",
"commit",
"=",
"None",
",",
"cwd",
"=",
"None",
",",
"no_submodules",
"=",
"False",
")",
":",
"if",
"not",
"cwd",
":",
"cwd",
"=",
"cwd",
"=",
"os",
".",
"getcwd",
"(",
")",
"root_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"cwd",
",",
"'boostorg'",
",",
"sub_repo",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"os",
".",
"path",
".",
"join",
"(",
"root_dir",
",",
"'.git'",
")",
")",
":",
"utils",
".",
"check_call",
"(",
"\"git\"",
",",
"\"clone\"",
",",
"\"--depth=1\"",
",",
"\"--branch=%s\"",
"%",
"(",
"branch",
")",
",",
"\"https://github.com/boostorg/%s.git\"",
"%",
"(",
"sub_repo",
")",
",",
"root_dir",
")",
"os",
".",
"chdir",
"(",
"root_dir",
")",
"else",
":",
"os",
".",
"chdir",
"(",
"root_dir",
")",
"utils",
".",
"check_call",
"(",
"\"git\"",
",",
"\"pull\"",
",",
"# \"--depth=1\", # Can't do depth as we get merge errors.",
"\"--quiet\"",
",",
"\"--no-recurse-submodules\"",
")",
"if",
"commit",
":",
"utils",
".",
"check_call",
"(",
"\"git\"",
",",
"\"checkout\"",
",",
"\"-qf\"",
",",
"commit",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"os",
".",
"path",
".",
"join",
"(",
"'.git'",
",",
"'modules'",
")",
")",
":",
"if",
"sys",
".",
"platform",
"==",
"'win32'",
":",
"utils",
".",
"check_call",
"(",
"'dir'",
",",
"os",
".",
"path",
".",
"join",
"(",
"'.git'",
",",
"'modules'",
")",
")",
"else",
":",
"utils",
".",
"check_call",
"(",
"'ls'",
",",
"'-la'",
",",
"os",
".",
"path",
".",
"join",
"(",
"'.git'",
",",
"'modules'",
")",
")",
"if",
"not",
"no_submodules",
":",
"utils",
".",
"check_call",
"(",
"\"git\"",
",",
"\"submodule\"",
",",
"\"--quiet\"",
",",
"\"update\"",
",",
"\"--quiet\"",
",",
"\"--init\"",
",",
"\"--recursive\"",
",",
")",
"utils",
".",
"check_call",
"(",
"\"git\"",
",",
"\"submodule\"",
",",
"\"--quiet\"",
",",
"\"foreach\"",
",",
"\"git\"",
",",
"\"fetch\"",
")",
"return",
"root_dir"
] | This clone mimicks the way Travis-CI clones a project's repo. So far
Travis-CI is the most limiting in the sense of only fetching partial
history of the repo. | [
"This",
"clone",
"mimicks",
"the",
"way",
"Travis",
"-",
"CI",
"clones",
"a",
"project",
"s",
"repo",
".",
"So",
"far",
"Travis",
"-",
"CI",
"is",
"the",
"most",
"limiting",
"in",
"the",
"sense",
"of",
"only",
"fetching",
"partial",
"history",
"of",
"the",
"repo",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/libs/predef/tools/ci/common.py#L438-L471 |
28,987 | apple/turicreate | deps/src/boost_1_68_0/libs/predef/tools/ci/common.py | ci_travis.install_toolset | def install_toolset(self, toolset):
'''
Installs specific toolset on CI system.
'''
info = toolset_info[toolset]
if sys.platform.startswith('linux'):
os.chdir(self.work_dir)
if 'ppa' in info:
for ppa in info['ppa']:
utils.check_call(
'sudo','add-apt-repository','--yes',ppa)
if 'deb' in info:
utils.make_file('sources.list',
"deb %s"%(' '.join(info['deb'])),
"deb-src %s"%(' '.join(info['deb'])))
utils.check_call('sudo','bash','-c','cat sources.list >> /etc/apt/sources.list')
if 'apt-key' in info:
for key in info['apt-key']:
utils.check_call('wget',key,'-O','apt.key')
utils.check_call('sudo','apt-key','add','apt.key')
utils.check_call(
'sudo','apt-get','update','-qq')
utils.check_call(
'sudo','apt-get','install','-qq',info['package'])
if 'debugpackage' in info and info['debugpackage']:
utils.check_call(
'sudo','apt-get','install','-qq',info['debugpackage']) | python | def install_toolset(self, toolset):
'''
Installs specific toolset on CI system.
'''
info = toolset_info[toolset]
if sys.platform.startswith('linux'):
os.chdir(self.work_dir)
if 'ppa' in info:
for ppa in info['ppa']:
utils.check_call(
'sudo','add-apt-repository','--yes',ppa)
if 'deb' in info:
utils.make_file('sources.list',
"deb %s"%(' '.join(info['deb'])),
"deb-src %s"%(' '.join(info['deb'])))
utils.check_call('sudo','bash','-c','cat sources.list >> /etc/apt/sources.list')
if 'apt-key' in info:
for key in info['apt-key']:
utils.check_call('wget',key,'-O','apt.key')
utils.check_call('sudo','apt-key','add','apt.key')
utils.check_call(
'sudo','apt-get','update','-qq')
utils.check_call(
'sudo','apt-get','install','-qq',info['package'])
if 'debugpackage' in info and info['debugpackage']:
utils.check_call(
'sudo','apt-get','install','-qq',info['debugpackage']) | [
"def",
"install_toolset",
"(",
"self",
",",
"toolset",
")",
":",
"info",
"=",
"toolset_info",
"[",
"toolset",
"]",
"if",
"sys",
".",
"platform",
".",
"startswith",
"(",
"'linux'",
")",
":",
"os",
".",
"chdir",
"(",
"self",
".",
"work_dir",
")",
"if",
"'ppa'",
"in",
"info",
":",
"for",
"ppa",
"in",
"info",
"[",
"'ppa'",
"]",
":",
"utils",
".",
"check_call",
"(",
"'sudo'",
",",
"'add-apt-repository'",
",",
"'--yes'",
",",
"ppa",
")",
"if",
"'deb'",
"in",
"info",
":",
"utils",
".",
"make_file",
"(",
"'sources.list'",
",",
"\"deb %s\"",
"%",
"(",
"' '",
".",
"join",
"(",
"info",
"[",
"'deb'",
"]",
")",
")",
",",
"\"deb-src %s\"",
"%",
"(",
"' '",
".",
"join",
"(",
"info",
"[",
"'deb'",
"]",
")",
")",
")",
"utils",
".",
"check_call",
"(",
"'sudo'",
",",
"'bash'",
",",
"'-c'",
",",
"'cat sources.list >> /etc/apt/sources.list'",
")",
"if",
"'apt-key'",
"in",
"info",
":",
"for",
"key",
"in",
"info",
"[",
"'apt-key'",
"]",
":",
"utils",
".",
"check_call",
"(",
"'wget'",
",",
"key",
",",
"'-O'",
",",
"'apt.key'",
")",
"utils",
".",
"check_call",
"(",
"'sudo'",
",",
"'apt-key'",
",",
"'add'",
",",
"'apt.key'",
")",
"utils",
".",
"check_call",
"(",
"'sudo'",
",",
"'apt-get'",
",",
"'update'",
",",
"'-qq'",
")",
"utils",
".",
"check_call",
"(",
"'sudo'",
",",
"'apt-get'",
",",
"'install'",
",",
"'-qq'",
",",
"info",
"[",
"'package'",
"]",
")",
"if",
"'debugpackage'",
"in",
"info",
"and",
"info",
"[",
"'debugpackage'",
"]",
":",
"utils",
".",
"check_call",
"(",
"'sudo'",
",",
"'apt-get'",
",",
"'install'",
",",
"'-qq'",
",",
"info",
"[",
"'debugpackage'",
"]",
")"
] | Installs specific toolset on CI system. | [
"Installs",
"specific",
"toolset",
"on",
"CI",
"system",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/libs/predef/tools/ci/common.py#L683-L709 |
28,988 | apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/converters/keras/_keras2_converter.py | _load_keras_model | def _load_keras_model(model_network_path, model_weight_path, custom_objects=None):
"""Load a keras model from disk
Parameters
----------
model_network_path: str
Path where the model network path is (json file)
model_weight_path: str
Path where the model network weights are (hd5 file)
custom_objects:
A dictionary of layers or other custom classes
or functions used by the model
Returns
-------
model: A keras model
"""
from keras.models import model_from_json
import json
# Load the model network
json_file = open(model_network_path, 'r')
loaded_model_json = json_file.read()
json_file.close()
if not custom_objects:
custom_objects = {}
# Load the model weights
loaded_model = model_from_json(loaded_model_json, custom_objects=custom_objects)
loaded_model.load_weights(model_weight_path)
return loaded_model | python | def _load_keras_model(model_network_path, model_weight_path, custom_objects=None):
"""Load a keras model from disk
Parameters
----------
model_network_path: str
Path where the model network path is (json file)
model_weight_path: str
Path where the model network weights are (hd5 file)
custom_objects:
A dictionary of layers or other custom classes
or functions used by the model
Returns
-------
model: A keras model
"""
from keras.models import model_from_json
import json
# Load the model network
json_file = open(model_network_path, 'r')
loaded_model_json = json_file.read()
json_file.close()
if not custom_objects:
custom_objects = {}
# Load the model weights
loaded_model = model_from_json(loaded_model_json, custom_objects=custom_objects)
loaded_model.load_weights(model_weight_path)
return loaded_model | [
"def",
"_load_keras_model",
"(",
"model_network_path",
",",
"model_weight_path",
",",
"custom_objects",
"=",
"None",
")",
":",
"from",
"keras",
".",
"models",
"import",
"model_from_json",
"import",
"json",
"# Load the model network",
"json_file",
"=",
"open",
"(",
"model_network_path",
",",
"'r'",
")",
"loaded_model_json",
"=",
"json_file",
".",
"read",
"(",
")",
"json_file",
".",
"close",
"(",
")",
"if",
"not",
"custom_objects",
":",
"custom_objects",
"=",
"{",
"}",
"# Load the model weights",
"loaded_model",
"=",
"model_from_json",
"(",
"loaded_model_json",
",",
"custom_objects",
"=",
"custom_objects",
")",
"loaded_model",
".",
"load_weights",
"(",
"model_weight_path",
")",
"return",
"loaded_model"
] | Load a keras model from disk
Parameters
----------
model_network_path: str
Path where the model network path is (json file)
model_weight_path: str
Path where the model network weights are (hd5 file)
custom_objects:
A dictionary of layers or other custom classes
or functions used by the model
Returns
-------
model: A keras model | [
"Load",
"a",
"keras",
"model",
"from",
"disk"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/converters/keras/_keras2_converter.py#L134-L168 |
28,989 | apple/turicreate | src/unity/python/turicreate/visualization/_plot.py | Plot.show | def show(self):
"""
A method for displaying the Plot object
Notes
-----
- The plot will render either inline in a Jupyter Notebook, or in a
native GUI window, depending on the value provided in
`turicreate.visualization.set_target` (defaults to 'auto').
Examples
--------
Suppose 'plt' is an Plot Object
We can view it using:
>>> plt.show()
"""
global _target
display = False
try:
if _target == 'auto' and \
get_ipython().__class__.__name__ == "ZMQInteractiveShell":
self._repr_javascript_()
display = True
except NameError:
pass
finally:
if not display:
if _sys.platform != 'darwin' and _sys.platform != 'linux2' and _sys.platform != 'linux':
raise NotImplementedError('Visualization is currently supported only on macOS and Linux.')
path_to_client = _get_client_app_path()
# TODO: allow autodetection of light/dark mode.
# Disabled for now, since the GUI side needs some work (ie. background color).
plot_variation = 0x10 # force light mode
self.__proxy__.call_function('show', {'path_to_client': path_to_client, 'variation': plot_variation}) | python | def show(self):
"""
A method for displaying the Plot object
Notes
-----
- The plot will render either inline in a Jupyter Notebook, or in a
native GUI window, depending on the value provided in
`turicreate.visualization.set_target` (defaults to 'auto').
Examples
--------
Suppose 'plt' is an Plot Object
We can view it using:
>>> plt.show()
"""
global _target
display = False
try:
if _target == 'auto' and \
get_ipython().__class__.__name__ == "ZMQInteractiveShell":
self._repr_javascript_()
display = True
except NameError:
pass
finally:
if not display:
if _sys.platform != 'darwin' and _sys.platform != 'linux2' and _sys.platform != 'linux':
raise NotImplementedError('Visualization is currently supported only on macOS and Linux.')
path_to_client = _get_client_app_path()
# TODO: allow autodetection of light/dark mode.
# Disabled for now, since the GUI side needs some work (ie. background color).
plot_variation = 0x10 # force light mode
self.__proxy__.call_function('show', {'path_to_client': path_to_client, 'variation': plot_variation}) | [
"def",
"show",
"(",
"self",
")",
":",
"global",
"_target",
"display",
"=",
"False",
"try",
":",
"if",
"_target",
"==",
"'auto'",
"and",
"get_ipython",
"(",
")",
".",
"__class__",
".",
"__name__",
"==",
"\"ZMQInteractiveShell\"",
":",
"self",
".",
"_repr_javascript_",
"(",
")",
"display",
"=",
"True",
"except",
"NameError",
":",
"pass",
"finally",
":",
"if",
"not",
"display",
":",
"if",
"_sys",
".",
"platform",
"!=",
"'darwin'",
"and",
"_sys",
".",
"platform",
"!=",
"'linux2'",
"and",
"_sys",
".",
"platform",
"!=",
"'linux'",
":",
"raise",
"NotImplementedError",
"(",
"'Visualization is currently supported only on macOS and Linux.'",
")",
"path_to_client",
"=",
"_get_client_app_path",
"(",
")",
"# TODO: allow autodetection of light/dark mode.",
"# Disabled for now, since the GUI side needs some work (ie. background color).",
"plot_variation",
"=",
"0x10",
"# force light mode",
"self",
".",
"__proxy__",
".",
"call_function",
"(",
"'show'",
",",
"{",
"'path_to_client'",
":",
"path_to_client",
",",
"'variation'",
":",
"plot_variation",
"}",
")"
] | A method for displaying the Plot object
Notes
-----
- The plot will render either inline in a Jupyter Notebook, or in a
native GUI window, depending on the value provided in
`turicreate.visualization.set_target` (defaults to 'auto').
Examples
--------
Suppose 'plt' is an Plot Object
We can view it using:
>>> plt.show() | [
"A",
"method",
"for",
"displaying",
"the",
"Plot",
"object"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/visualization/_plot.py#L104-L142 |
28,990 | apple/turicreate | src/unity/python/turicreate/visualization/_plot.py | Plot.save | def save(self, filepath):
"""
A method for saving the Plot object in a vega representation
Parameters
----------
filepath: string
The destination filepath where the plot object must be saved as.
The extension of this filepath determines what format the plot will
be saved as. Currently supported formats are JSON, PNG, and SVG.
Examples
--------
Suppose 'plt' is an Plot Object
We can save it using:
>>> plt.save('vega_spec.json')
We can also save the vega representation of the plot without data:
>>> plt.save('vega_spec.json', False)
We can save the plot as a PNG/SVG using:
>>> plt.save('test.png')
>>> plt.save('test.svg')
"""
if type(filepath) != str:
raise ValueError("filepath provided is not a string")
if filepath.endswith(".json"):
# save as vega json
spec = self.get_vega(include_data = True)
with open(filepath, 'w') as fp:
_json.dump(spec, fp)
elif filepath.endswith(".png") or filepath.endswith(".svg"):
# save as png/svg, but json first
spec = self.get_vega(include_data = True)
EXTENSION_START_INDEX = -3
extension = filepath[EXTENSION_START_INDEX:]
temp_file_tuple = _mkstemp()
temp_file_path = temp_file_tuple[1]
with open(temp_file_path, 'w') as fp:
_json.dump(spec, fp)
dirname = _os.path.dirname(__file__)
relative_path_to_vg2png_vg2svg = "../vg2" + extension
absolute_path_to_vg2png_vg2svg = _os.path.join(dirname,
relative_path_to_vg2png_vg2svg)
# try node vg2[png|svg] json_filepath out_filepath
(exitcode, stdout, stderr) = _run_cmdline("node " +
absolute_path_to_vg2png_vg2svg + " "
+ temp_file_path + " " + filepath)
if exitcode == _NODE_NOT_FOUND_ERROR_CODE:
# user doesn't have node installed
raise RuntimeError("Node.js not found. Saving as PNG and SVG" +
" requires Node.js, please download and install Node.js " +
"from here and try again: https://nodejs.org/en/download/")
elif exitcode == _CANVAS_PREBUILT_NOT_FOUND_ERROR:
# try to see if canvas-prebuilt is globally installed
# if it is, then link it
# if not, tell the user to install it
(is_installed_exitcode,
is_installed_stdout,
is_installed_stderr) = _run_cmdline(
"npm ls -g -json | grep canvas-prebuilt")
if is_installed_exitcode == _SUCCESS:
# npm link canvas-prebuilt
link_exitcode, link_stdout, link_stderr = _run_cmdline(
"npm link canvas-prebuilt")
if link_exitcode == _PERMISSION_DENIED_ERROR_CODE:
# They don't have permission, tell them.
raise RuntimeError(link_stderr + '\n\n' +
"`npm link canvas-prebuilt` failed, " +
"Permission Denied.")
elif link_exitcode == _SUCCESS:
# canvas-prebuilt link is now successful, so run the
# node vg2[png|svg] json_filepath out_filepath
# command again.
(exitcode, stdout, stderr) = _run_cmdline("node " +
absolute_path_to_vg2png_vg2svg + " "
+ temp_file_path + " " + filepath)
if exitcode != _SUCCESS:
# something else that we have not identified yet
# happened.
raise RuntimeError(stderr)
else:
raise RuntimeError(link_stderr)
else:
raise RuntimeError("canvas-prebuilt not found. " +
"Saving as PNG and SVG requires canvas-prebuilt, " +
"please download and install canvas-prebuilt by " +
"running this command, and try again: " +
"`npm install -g canvas-prebuilt`")
elif exitcode == _SUCCESS:
pass
else:
raise RuntimeError(stderr)
# delete temp file that user didn't ask for
_run_cmdline("rm " + temp_file_path)
else:
raise NotImplementedError("filename must end in" +
" .json, .svg, or .png") | python | def save(self, filepath):
"""
A method for saving the Plot object in a vega representation
Parameters
----------
filepath: string
The destination filepath where the plot object must be saved as.
The extension of this filepath determines what format the plot will
be saved as. Currently supported formats are JSON, PNG, and SVG.
Examples
--------
Suppose 'plt' is an Plot Object
We can save it using:
>>> plt.save('vega_spec.json')
We can also save the vega representation of the plot without data:
>>> plt.save('vega_spec.json', False)
We can save the plot as a PNG/SVG using:
>>> plt.save('test.png')
>>> plt.save('test.svg')
"""
if type(filepath) != str:
raise ValueError("filepath provided is not a string")
if filepath.endswith(".json"):
# save as vega json
spec = self.get_vega(include_data = True)
with open(filepath, 'w') as fp:
_json.dump(spec, fp)
elif filepath.endswith(".png") or filepath.endswith(".svg"):
# save as png/svg, but json first
spec = self.get_vega(include_data = True)
EXTENSION_START_INDEX = -3
extension = filepath[EXTENSION_START_INDEX:]
temp_file_tuple = _mkstemp()
temp_file_path = temp_file_tuple[1]
with open(temp_file_path, 'w') as fp:
_json.dump(spec, fp)
dirname = _os.path.dirname(__file__)
relative_path_to_vg2png_vg2svg = "../vg2" + extension
absolute_path_to_vg2png_vg2svg = _os.path.join(dirname,
relative_path_to_vg2png_vg2svg)
# try node vg2[png|svg] json_filepath out_filepath
(exitcode, stdout, stderr) = _run_cmdline("node " +
absolute_path_to_vg2png_vg2svg + " "
+ temp_file_path + " " + filepath)
if exitcode == _NODE_NOT_FOUND_ERROR_CODE:
# user doesn't have node installed
raise RuntimeError("Node.js not found. Saving as PNG and SVG" +
" requires Node.js, please download and install Node.js " +
"from here and try again: https://nodejs.org/en/download/")
elif exitcode == _CANVAS_PREBUILT_NOT_FOUND_ERROR:
# try to see if canvas-prebuilt is globally installed
# if it is, then link it
# if not, tell the user to install it
(is_installed_exitcode,
is_installed_stdout,
is_installed_stderr) = _run_cmdline(
"npm ls -g -json | grep canvas-prebuilt")
if is_installed_exitcode == _SUCCESS:
# npm link canvas-prebuilt
link_exitcode, link_stdout, link_stderr = _run_cmdline(
"npm link canvas-prebuilt")
if link_exitcode == _PERMISSION_DENIED_ERROR_CODE:
# They don't have permission, tell them.
raise RuntimeError(link_stderr + '\n\n' +
"`npm link canvas-prebuilt` failed, " +
"Permission Denied.")
elif link_exitcode == _SUCCESS:
# canvas-prebuilt link is now successful, so run the
# node vg2[png|svg] json_filepath out_filepath
# command again.
(exitcode, stdout, stderr) = _run_cmdline("node " +
absolute_path_to_vg2png_vg2svg + " "
+ temp_file_path + " " + filepath)
if exitcode != _SUCCESS:
# something else that we have not identified yet
# happened.
raise RuntimeError(stderr)
else:
raise RuntimeError(link_stderr)
else:
raise RuntimeError("canvas-prebuilt not found. " +
"Saving as PNG and SVG requires canvas-prebuilt, " +
"please download and install canvas-prebuilt by " +
"running this command, and try again: " +
"`npm install -g canvas-prebuilt`")
elif exitcode == _SUCCESS:
pass
else:
raise RuntimeError(stderr)
# delete temp file that user didn't ask for
_run_cmdline("rm " + temp_file_path)
else:
raise NotImplementedError("filename must end in" +
" .json, .svg, or .png") | [
"def",
"save",
"(",
"self",
",",
"filepath",
")",
":",
"if",
"type",
"(",
"filepath",
")",
"!=",
"str",
":",
"raise",
"ValueError",
"(",
"\"filepath provided is not a string\"",
")",
"if",
"filepath",
".",
"endswith",
"(",
"\".json\"",
")",
":",
"# save as vega json",
"spec",
"=",
"self",
".",
"get_vega",
"(",
"include_data",
"=",
"True",
")",
"with",
"open",
"(",
"filepath",
",",
"'w'",
")",
"as",
"fp",
":",
"_json",
".",
"dump",
"(",
"spec",
",",
"fp",
")",
"elif",
"filepath",
".",
"endswith",
"(",
"\".png\"",
")",
"or",
"filepath",
".",
"endswith",
"(",
"\".svg\"",
")",
":",
"# save as png/svg, but json first",
"spec",
"=",
"self",
".",
"get_vega",
"(",
"include_data",
"=",
"True",
")",
"EXTENSION_START_INDEX",
"=",
"-",
"3",
"extension",
"=",
"filepath",
"[",
"EXTENSION_START_INDEX",
":",
"]",
"temp_file_tuple",
"=",
"_mkstemp",
"(",
")",
"temp_file_path",
"=",
"temp_file_tuple",
"[",
"1",
"]",
"with",
"open",
"(",
"temp_file_path",
",",
"'w'",
")",
"as",
"fp",
":",
"_json",
".",
"dump",
"(",
"spec",
",",
"fp",
")",
"dirname",
"=",
"_os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
"relative_path_to_vg2png_vg2svg",
"=",
"\"../vg2\"",
"+",
"extension",
"absolute_path_to_vg2png_vg2svg",
"=",
"_os",
".",
"path",
".",
"join",
"(",
"dirname",
",",
"relative_path_to_vg2png_vg2svg",
")",
"# try node vg2[png|svg] json_filepath out_filepath",
"(",
"exitcode",
",",
"stdout",
",",
"stderr",
")",
"=",
"_run_cmdline",
"(",
"\"node \"",
"+",
"absolute_path_to_vg2png_vg2svg",
"+",
"\" \"",
"+",
"temp_file_path",
"+",
"\" \"",
"+",
"filepath",
")",
"if",
"exitcode",
"==",
"_NODE_NOT_FOUND_ERROR_CODE",
":",
"# user doesn't have node installed",
"raise",
"RuntimeError",
"(",
"\"Node.js not found. Saving as PNG and SVG\"",
"+",
"\" requires Node.js, please download and install Node.js \"",
"+",
"\"from here and try again: https://nodejs.org/en/download/\"",
")",
"elif",
"exitcode",
"==",
"_CANVAS_PREBUILT_NOT_FOUND_ERROR",
":",
"# try to see if canvas-prebuilt is globally installed",
"# if it is, then link it",
"# if not, tell the user to install it",
"(",
"is_installed_exitcode",
",",
"is_installed_stdout",
",",
"is_installed_stderr",
")",
"=",
"_run_cmdline",
"(",
"\"npm ls -g -json | grep canvas-prebuilt\"",
")",
"if",
"is_installed_exitcode",
"==",
"_SUCCESS",
":",
"# npm link canvas-prebuilt ",
"link_exitcode",
",",
"link_stdout",
",",
"link_stderr",
"=",
"_run_cmdline",
"(",
"\"npm link canvas-prebuilt\"",
")",
"if",
"link_exitcode",
"==",
"_PERMISSION_DENIED_ERROR_CODE",
":",
"# They don't have permission, tell them.",
"raise",
"RuntimeError",
"(",
"link_stderr",
"+",
"'\\n\\n'",
"+",
"\"`npm link canvas-prebuilt` failed, \"",
"+",
"\"Permission Denied.\"",
")",
"elif",
"link_exitcode",
"==",
"_SUCCESS",
":",
"# canvas-prebuilt link is now successful, so run the ",
"# node vg2[png|svg] json_filepath out_filepath",
"# command again.",
"(",
"exitcode",
",",
"stdout",
",",
"stderr",
")",
"=",
"_run_cmdline",
"(",
"\"node \"",
"+",
"absolute_path_to_vg2png_vg2svg",
"+",
"\" \"",
"+",
"temp_file_path",
"+",
"\" \"",
"+",
"filepath",
")",
"if",
"exitcode",
"!=",
"_SUCCESS",
":",
"# something else that we have not identified yet",
"# happened.",
"raise",
"RuntimeError",
"(",
"stderr",
")",
"else",
":",
"raise",
"RuntimeError",
"(",
"link_stderr",
")",
"else",
":",
"raise",
"RuntimeError",
"(",
"\"canvas-prebuilt not found. \"",
"+",
"\"Saving as PNG and SVG requires canvas-prebuilt, \"",
"+",
"\"please download and install canvas-prebuilt by \"",
"+",
"\"running this command, and try again: \"",
"+",
"\"`npm install -g canvas-prebuilt`\"",
")",
"elif",
"exitcode",
"==",
"_SUCCESS",
":",
"pass",
"else",
":",
"raise",
"RuntimeError",
"(",
"stderr",
")",
"# delete temp file that user didn't ask for",
"_run_cmdline",
"(",
"\"rm \"",
"+",
"temp_file_path",
")",
"else",
":",
"raise",
"NotImplementedError",
"(",
"\"filename must end in\"",
"+",
"\" .json, .svg, or .png\"",
")"
] | A method for saving the Plot object in a vega representation
Parameters
----------
filepath: string
The destination filepath where the plot object must be saved as.
The extension of this filepath determines what format the plot will
be saved as. Currently supported formats are JSON, PNG, and SVG.
Examples
--------
Suppose 'plt' is an Plot Object
We can save it using:
>>> plt.save('vega_spec.json')
We can also save the vega representation of the plot without data:
>>> plt.save('vega_spec.json', False)
We can save the plot as a PNG/SVG using:
>>> plt.save('test.png')
>>> plt.save('test.svg') | [
"A",
"method",
"for",
"saving",
"the",
"Plot",
"object",
"in",
"a",
"vega",
"representation"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/visualization/_plot.py#L144-L248 |
28,991 | apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/converters/sklearn/_tree_ensemble.py | _get_value | def _get_value(scikit_value, mode = 'regressor', scaling = 1.0, n_classes = 2, tree_index = 0):
""" Get the right value from the scikit-tree
"""
# Regression
if mode == 'regressor':
return scikit_value[0] * scaling
# Binary classification
if n_classes == 2:
# Decision tree
if len(scikit_value[0]) != 1:
value = scikit_value[0][1] * scaling / scikit_value[0].sum()
# boosted tree
else:
value = scikit_value[0][0] * scaling
if value == 0.5:
value = value - 1e-7
# Multiclass classification
else:
# Decision tree
if len(scikit_value[0]) != 1:
value = scikit_value[0] / scikit_value[0].sum()
# boosted tree
else:
value = {tree_index: scikit_value[0] * scaling}
return value | python | def _get_value(scikit_value, mode = 'regressor', scaling = 1.0, n_classes = 2, tree_index = 0):
""" Get the right value from the scikit-tree
"""
# Regression
if mode == 'regressor':
return scikit_value[0] * scaling
# Binary classification
if n_classes == 2:
# Decision tree
if len(scikit_value[0]) != 1:
value = scikit_value[0][1] * scaling / scikit_value[0].sum()
# boosted tree
else:
value = scikit_value[0][0] * scaling
if value == 0.5:
value = value - 1e-7
# Multiclass classification
else:
# Decision tree
if len(scikit_value[0]) != 1:
value = scikit_value[0] / scikit_value[0].sum()
# boosted tree
else:
value = {tree_index: scikit_value[0] * scaling}
return value | [
"def",
"_get_value",
"(",
"scikit_value",
",",
"mode",
"=",
"'regressor'",
",",
"scaling",
"=",
"1.0",
",",
"n_classes",
"=",
"2",
",",
"tree_index",
"=",
"0",
")",
":",
"# Regression",
"if",
"mode",
"==",
"'regressor'",
":",
"return",
"scikit_value",
"[",
"0",
"]",
"*",
"scaling",
"# Binary classification",
"if",
"n_classes",
"==",
"2",
":",
"# Decision tree",
"if",
"len",
"(",
"scikit_value",
"[",
"0",
"]",
")",
"!=",
"1",
":",
"value",
"=",
"scikit_value",
"[",
"0",
"]",
"[",
"1",
"]",
"*",
"scaling",
"/",
"scikit_value",
"[",
"0",
"]",
".",
"sum",
"(",
")",
"# boosted tree",
"else",
":",
"value",
"=",
"scikit_value",
"[",
"0",
"]",
"[",
"0",
"]",
"*",
"scaling",
"if",
"value",
"==",
"0.5",
":",
"value",
"=",
"value",
"-",
"1e-7",
"# Multiclass classification",
"else",
":",
"# Decision tree",
"if",
"len",
"(",
"scikit_value",
"[",
"0",
"]",
")",
"!=",
"1",
":",
"value",
"=",
"scikit_value",
"[",
"0",
"]",
"/",
"scikit_value",
"[",
"0",
"]",
".",
"sum",
"(",
")",
"# boosted tree",
"else",
":",
"value",
"=",
"{",
"tree_index",
":",
"scikit_value",
"[",
"0",
"]",
"*",
"scaling",
"}",
"return",
"value"
] | Get the right value from the scikit-tree | [
"Get",
"the",
"right",
"value",
"from",
"the",
"scikit",
"-",
"tree"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/converters/sklearn/_tree_ensemble.py#L16-L42 |
28,992 | apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/converters/sklearn/_tree_ensemble.py | convert_tree_ensemble | def convert_tree_ensemble(model, input_features,
output_features = ('predicted_class', float),
mode = 'regressor',
base_prediction = None,
class_labels = None,
post_evaluation_transform = None):
"""
Convert a generic tree regressor model to the protobuf spec.
This currently supports:
* Decision tree regression
* Gradient boosted tree regression
* Random forest regression
* Decision tree classifier.
* Gradient boosted tree classifier.
* Random forest classifier.
----------
Parameters
model: [DecisionTreeRegressor | GradientBoostingRegression | RandomForestRegressor]
A scikit learn tree model.
feature_names : list of strings, optional (default=None)
Names of each of the features.
target: str
Name of the output column.
base_prediction: double
Base prediction value.
mode: str in ['regressor', 'classifier']
Mode of the tree model.
class_labels: list[int]
List of classes
post_evaluation_transform: list[int]
Post evaluation transform
Returns
-------
model_spec: An object of type Model_pb.
Protobuf representation of the model
"""
num_dimensions = get_input_dimension(model)
features = process_or_validate_features(input_features, num_dimensions)
n_classes = None
if mode == 'classifier':
n_classes = model.n_classes_
if class_labels is None:
class_labels = range(n_classes)
else:
if len(class_labels) != n_classes:
raise ValueError("Number of classes in model (%d) does not match "
"length of supplied class list (%d)."
% (n_classes, len(class_labels)))
coreml_tree = TreeEnsembleClassifier(input_features, class_labels, output_features)
if post_evaluation_transform is not None:
coreml_tree.set_post_evaluation_transform(post_evaluation_transform)
# Base prediction not provided
if base_prediction is None:
if n_classes == 2:
base_prediction = [0.0]
else:
base_prediction = [0.0 for c in range(n_classes)]
coreml_tree.set_default_prediction_value(base_prediction)
else:
if base_prediction is None:
base_prediction = 0.0
coreml_tree = TreeEnsembleRegressor(input_features, output_features)
coreml_tree.set_default_prediction_value(base_prediction)
# Single tree
if hasattr(model, 'tree_'):
_recurse(coreml_tree, model.tree_, tree_id = 0, node_id = 0,
mode = mode, n_classes = n_classes)
# Multiple trees
elif hasattr(model, 'estimators_'):
is_ensembling_in_separate_trees = False
if type(model.estimators_) != list:
is_ensembling_in_separate_trees = len(model.estimators_.shape) > 0 and model.estimators_.shape[1] > 1
estimators = model.estimators_.flatten()
else:
estimators = model.estimators_
scaling = model.learning_rate if hasattr(model, 'learning_rate') else 1.0 / len(estimators)
for tree_id, base_model in enumerate(estimators):
if is_ensembling_in_separate_trees:
tree_index = tree_id % n_classes
else:
tree_index = 0
_recurse(coreml_tree, base_model.tree_, tree_id, node_id = 0,
scaling = scaling, mode = mode, n_classes = n_classes, tree_index = tree_index)
else:
raise TypeError('Unknown scikit-learn tree model type.')
return coreml_tree.spec | python | def convert_tree_ensemble(model, input_features,
output_features = ('predicted_class', float),
mode = 'regressor',
base_prediction = None,
class_labels = None,
post_evaluation_transform = None):
"""
Convert a generic tree regressor model to the protobuf spec.
This currently supports:
* Decision tree regression
* Gradient boosted tree regression
* Random forest regression
* Decision tree classifier.
* Gradient boosted tree classifier.
* Random forest classifier.
----------
Parameters
model: [DecisionTreeRegressor | GradientBoostingRegression | RandomForestRegressor]
A scikit learn tree model.
feature_names : list of strings, optional (default=None)
Names of each of the features.
target: str
Name of the output column.
base_prediction: double
Base prediction value.
mode: str in ['regressor', 'classifier']
Mode of the tree model.
class_labels: list[int]
List of classes
post_evaluation_transform: list[int]
Post evaluation transform
Returns
-------
model_spec: An object of type Model_pb.
Protobuf representation of the model
"""
num_dimensions = get_input_dimension(model)
features = process_or_validate_features(input_features, num_dimensions)
n_classes = None
if mode == 'classifier':
n_classes = model.n_classes_
if class_labels is None:
class_labels = range(n_classes)
else:
if len(class_labels) != n_classes:
raise ValueError("Number of classes in model (%d) does not match "
"length of supplied class list (%d)."
% (n_classes, len(class_labels)))
coreml_tree = TreeEnsembleClassifier(input_features, class_labels, output_features)
if post_evaluation_transform is not None:
coreml_tree.set_post_evaluation_transform(post_evaluation_transform)
# Base prediction not provided
if base_prediction is None:
if n_classes == 2:
base_prediction = [0.0]
else:
base_prediction = [0.0 for c in range(n_classes)]
coreml_tree.set_default_prediction_value(base_prediction)
else:
if base_prediction is None:
base_prediction = 0.0
coreml_tree = TreeEnsembleRegressor(input_features, output_features)
coreml_tree.set_default_prediction_value(base_prediction)
# Single tree
if hasattr(model, 'tree_'):
_recurse(coreml_tree, model.tree_, tree_id = 0, node_id = 0,
mode = mode, n_classes = n_classes)
# Multiple trees
elif hasattr(model, 'estimators_'):
is_ensembling_in_separate_trees = False
if type(model.estimators_) != list:
is_ensembling_in_separate_trees = len(model.estimators_.shape) > 0 and model.estimators_.shape[1] > 1
estimators = model.estimators_.flatten()
else:
estimators = model.estimators_
scaling = model.learning_rate if hasattr(model, 'learning_rate') else 1.0 / len(estimators)
for tree_id, base_model in enumerate(estimators):
if is_ensembling_in_separate_trees:
tree_index = tree_id % n_classes
else:
tree_index = 0
_recurse(coreml_tree, base_model.tree_, tree_id, node_id = 0,
scaling = scaling, mode = mode, n_classes = n_classes, tree_index = tree_index)
else:
raise TypeError('Unknown scikit-learn tree model type.')
return coreml_tree.spec | [
"def",
"convert_tree_ensemble",
"(",
"model",
",",
"input_features",
",",
"output_features",
"=",
"(",
"'predicted_class'",
",",
"float",
")",
",",
"mode",
"=",
"'regressor'",
",",
"base_prediction",
"=",
"None",
",",
"class_labels",
"=",
"None",
",",
"post_evaluation_transform",
"=",
"None",
")",
":",
"num_dimensions",
"=",
"get_input_dimension",
"(",
"model",
")",
"features",
"=",
"process_or_validate_features",
"(",
"input_features",
",",
"num_dimensions",
")",
"n_classes",
"=",
"None",
"if",
"mode",
"==",
"'classifier'",
":",
"n_classes",
"=",
"model",
".",
"n_classes_",
"if",
"class_labels",
"is",
"None",
":",
"class_labels",
"=",
"range",
"(",
"n_classes",
")",
"else",
":",
"if",
"len",
"(",
"class_labels",
")",
"!=",
"n_classes",
":",
"raise",
"ValueError",
"(",
"\"Number of classes in model (%d) does not match \"",
"\"length of supplied class list (%d).\"",
"%",
"(",
"n_classes",
",",
"len",
"(",
"class_labels",
")",
")",
")",
"coreml_tree",
"=",
"TreeEnsembleClassifier",
"(",
"input_features",
",",
"class_labels",
",",
"output_features",
")",
"if",
"post_evaluation_transform",
"is",
"not",
"None",
":",
"coreml_tree",
".",
"set_post_evaluation_transform",
"(",
"post_evaluation_transform",
")",
"# Base prediction not provided",
"if",
"base_prediction",
"is",
"None",
":",
"if",
"n_classes",
"==",
"2",
":",
"base_prediction",
"=",
"[",
"0.0",
"]",
"else",
":",
"base_prediction",
"=",
"[",
"0.0",
"for",
"c",
"in",
"range",
"(",
"n_classes",
")",
"]",
"coreml_tree",
".",
"set_default_prediction_value",
"(",
"base_prediction",
")",
"else",
":",
"if",
"base_prediction",
"is",
"None",
":",
"base_prediction",
"=",
"0.0",
"coreml_tree",
"=",
"TreeEnsembleRegressor",
"(",
"input_features",
",",
"output_features",
")",
"coreml_tree",
".",
"set_default_prediction_value",
"(",
"base_prediction",
")",
"# Single tree",
"if",
"hasattr",
"(",
"model",
",",
"'tree_'",
")",
":",
"_recurse",
"(",
"coreml_tree",
",",
"model",
".",
"tree_",
",",
"tree_id",
"=",
"0",
",",
"node_id",
"=",
"0",
",",
"mode",
"=",
"mode",
",",
"n_classes",
"=",
"n_classes",
")",
"# Multiple trees",
"elif",
"hasattr",
"(",
"model",
",",
"'estimators_'",
")",
":",
"is_ensembling_in_separate_trees",
"=",
"False",
"if",
"type",
"(",
"model",
".",
"estimators_",
")",
"!=",
"list",
":",
"is_ensembling_in_separate_trees",
"=",
"len",
"(",
"model",
".",
"estimators_",
".",
"shape",
")",
">",
"0",
"and",
"model",
".",
"estimators_",
".",
"shape",
"[",
"1",
"]",
">",
"1",
"estimators",
"=",
"model",
".",
"estimators_",
".",
"flatten",
"(",
")",
"else",
":",
"estimators",
"=",
"model",
".",
"estimators_",
"scaling",
"=",
"model",
".",
"learning_rate",
"if",
"hasattr",
"(",
"model",
",",
"'learning_rate'",
")",
"else",
"1.0",
"/",
"len",
"(",
"estimators",
")",
"for",
"tree_id",
",",
"base_model",
"in",
"enumerate",
"(",
"estimators",
")",
":",
"if",
"is_ensembling_in_separate_trees",
":",
"tree_index",
"=",
"tree_id",
"%",
"n_classes",
"else",
":",
"tree_index",
"=",
"0",
"_recurse",
"(",
"coreml_tree",
",",
"base_model",
".",
"tree_",
",",
"tree_id",
",",
"node_id",
"=",
"0",
",",
"scaling",
"=",
"scaling",
",",
"mode",
"=",
"mode",
",",
"n_classes",
"=",
"n_classes",
",",
"tree_index",
"=",
"tree_index",
")",
"else",
":",
"raise",
"TypeError",
"(",
"'Unknown scikit-learn tree model type.'",
")",
"return",
"coreml_tree",
".",
"spec"
] | Convert a generic tree regressor model to the protobuf spec.
This currently supports:
* Decision tree regression
* Gradient boosted tree regression
* Random forest regression
* Decision tree classifier.
* Gradient boosted tree classifier.
* Random forest classifier.
----------
Parameters
model: [DecisionTreeRegressor | GradientBoostingRegression | RandomForestRegressor]
A scikit learn tree model.
feature_names : list of strings, optional (default=None)
Names of each of the features.
target: str
Name of the output column.
base_prediction: double
Base prediction value.
mode: str in ['regressor', 'classifier']
Mode of the tree model.
class_labels: list[int]
List of classes
post_evaluation_transform: list[int]
Post evaluation transform
Returns
-------
model_spec: An object of type Model_pb.
Protobuf representation of the model | [
"Convert",
"a",
"generic",
"tree",
"regressor",
"model",
"to",
"the",
"protobuf",
"spec",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/converters/sklearn/_tree_ensemble.py#L97-L199 |
28,993 | apple/turicreate | src/unity/python/turicreate/toolkits/style_transfer/style_transfer.py | StyleTransfer.get_styles | def get_styles(self, style=None):
"""
Returns SFrame of style images used for training the model
Parameters
----------
style: int or list, optional
The selected style or list of styles to return. If `None`, all
styles will be returned
See Also
--------
stylize
Examples
--------
>>> model.get_styles()
Columns:
style int
image Image
Rows: 4
Data:
+-------+--------------------------+
| style | image |
+-------+--------------------------+
| 0 | Height: 642 Width: 642 |
| 1 | Height: 642 Width: 642 |
| 2 | Height: 642 Width: 642 |
| 3 | Height: 642 Width: 642 |
+-------+--------------------------+
"""
style, _ = self._style_input_check(style)
return self.styles.filter_by(style, self._index_column) | python | def get_styles(self, style=None):
"""
Returns SFrame of style images used for training the model
Parameters
----------
style: int or list, optional
The selected style or list of styles to return. If `None`, all
styles will be returned
See Also
--------
stylize
Examples
--------
>>> model.get_styles()
Columns:
style int
image Image
Rows: 4
Data:
+-------+--------------------------+
| style | image |
+-------+--------------------------+
| 0 | Height: 642 Width: 642 |
| 1 | Height: 642 Width: 642 |
| 2 | Height: 642 Width: 642 |
| 3 | Height: 642 Width: 642 |
+-------+--------------------------+
"""
style, _ = self._style_input_check(style)
return self.styles.filter_by(style, self._index_column) | [
"def",
"get_styles",
"(",
"self",
",",
"style",
"=",
"None",
")",
":",
"style",
",",
"_",
"=",
"self",
".",
"_style_input_check",
"(",
"style",
")",
"return",
"self",
".",
"styles",
".",
"filter_by",
"(",
"style",
",",
"self",
".",
"_index_column",
")"
] | Returns SFrame of style images used for training the model
Parameters
----------
style: int or list, optional
The selected style or list of styles to return. If `None`, all
styles will be returned
See Also
--------
stylize
Examples
--------
>>> model.get_styles()
Columns:
style int
image Image
Rows: 4
Data:
+-------+--------------------------+
| style | image |
+-------+--------------------------+
| 0 | Height: 642 Width: 642 |
| 1 | Height: 642 Width: 642 |
| 2 | Height: 642 Width: 642 |
| 3 | Height: 642 Width: 642 |
+-------+--------------------------+ | [
"Returns",
"SFrame",
"of",
"style",
"images",
"used",
"for",
"training",
"the",
"model"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/style_transfer/style_transfer.py#L876-L911 |
28,994 | apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/converters/libsvm/_libsvm_util.py | load_model | def load_model(model_path):
"""Load a libsvm model from a path on disk.
This currently supports:
* C-SVC
* NU-SVC
* Epsilon-SVR
* NU-SVR
Parameters
----------
model_path: str
Path on disk where the libsvm model representation is.
Returns
-------
model: libsvm_model
A model of the libsvm format.
"""
if not(HAS_LIBSVM):
raise RuntimeError('libsvm not found. libsvm conversion API is disabled.')
from svmutil import svm_load_model # From libsvm
import os
if (not os.path.exists(model_path)):
raise IOError("Expected a valid file path. %s does not exist" % model_path)
return svm_load_model(model_path) | python | def load_model(model_path):
"""Load a libsvm model from a path on disk.
This currently supports:
* C-SVC
* NU-SVC
* Epsilon-SVR
* NU-SVR
Parameters
----------
model_path: str
Path on disk where the libsvm model representation is.
Returns
-------
model: libsvm_model
A model of the libsvm format.
"""
if not(HAS_LIBSVM):
raise RuntimeError('libsvm not found. libsvm conversion API is disabled.')
from svmutil import svm_load_model # From libsvm
import os
if (not os.path.exists(model_path)):
raise IOError("Expected a valid file path. %s does not exist" % model_path)
return svm_load_model(model_path) | [
"def",
"load_model",
"(",
"model_path",
")",
":",
"if",
"not",
"(",
"HAS_LIBSVM",
")",
":",
"raise",
"RuntimeError",
"(",
"'libsvm not found. libsvm conversion API is disabled.'",
")",
"from",
"svmutil",
"import",
"svm_load_model",
"# From libsvm",
"import",
"os",
"if",
"(",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"model_path",
")",
")",
":",
"raise",
"IOError",
"(",
"\"Expected a valid file path. %s does not exist\"",
"%",
"model_path",
")",
"return",
"svm_load_model",
"(",
"model_path",
")"
] | Load a libsvm model from a path on disk.
This currently supports:
* C-SVC
* NU-SVC
* Epsilon-SVR
* NU-SVR
Parameters
----------
model_path: str
Path on disk where the libsvm model representation is.
Returns
-------
model: libsvm_model
A model of the libsvm format. | [
"Load",
"a",
"libsvm",
"model",
"from",
"a",
"path",
"on",
"disk",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/converters/libsvm/_libsvm_util.py#L8-L34 |
28,995 | apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/models/neural_network/flexible_shape_utils.py | add_enumerated_multiarray_shapes | def add_enumerated_multiarray_shapes(spec, feature_name, shapes):
"""
Annotate an input or output multiArray feature in a Neural Network spec to
to accommodate a list of enumerated array shapes
:param spec: MLModel
The MLModel spec containing the feature
:param feature_name: str
The name of the image feature for which to add shape information.
If the feature is not found in the input or output descriptions then
an exception is thrown
:param shapes: [] | NeuralNetworkMultiArrayShape
A single or a list of NeuralNetworkImageSize objects which encode valid
size information for a image feature
Examples
--------
.. sourcecode:: python
>>> import coremltools
>>> from coremltools.models.neural_network import flexible_shape_utils
>>> spec = coremltools.utils.load_spec('mymodel.mlmodel')
>>> array_shapes = [flexible_shape_utils.NeuralNetworkMultiArrayShape(3)]
>>> second_shape = flexible_shape_utils.NeuralNetworkMultiArrayShape()
>>> second_shape.set_channel_shape(3)
>>> second_shape.set_height_shape(10)
>>> second_shape.set_width_shape(15)
>>> array_shapes.append(second_shape)
>>> flexible_shape_utils.add_enumerated_multiarray_shapes(spec, feature_name='my_multiarray_featurename', shapes=array_shapes)
:return:
None. The spec object is updated
"""
if not isinstance(shapes, list):
shapes = [shapes]
for shape in shapes:
if not isinstance(shape, NeuralNetworkMultiArrayShape):
raise Exception(
'Shape ranges should be of type NeuralNetworkMultiArrayShape')
shape._validate_multiarray_shape()
feature = _get_feature(spec, feature_name)
if feature.type.WhichOneof('Type') != 'multiArrayType':
raise Exception('Trying to add enumerated shapes to '
'a non-multiArray feature type')
if feature.type.multiArrayType.WhichOneof(
'ShapeFlexibility') != 'enumeratedShapes':
feature.type.multiArrayType.ClearField('ShapeFlexibility')
eshape_len = len(feature.type.multiArrayType.enumeratedShapes.shapes)
# Add default array shape to list of enumerated shapes if enumerated shapes
# field is currently empty
if eshape_len == 0:
fixed_shape = feature.type.multiArrayType.shape
if len(fixed_shape) == 1:
fs = NeuralNetworkMultiArrayShape(fixed_shape[0])
shapes.append(fs)
elif len(fixed_shape) == 3:
fs = NeuralNetworkMultiArrayShape()
fs.set_channel_shape(fixed_shape[0])
fs.set_height_shape(fixed_shape[1])
fs.set_width_shape(fixed_shape[2])
shapes.append(fs)
else:
raise Exception('Original fixed multiArray shape for {} is invalid'
.format(feature_name))
for shape in shapes:
s = feature.type.multiArrayType.enumeratedShapes.shapes.add()
s.shape.extend(shape.multiarray_shape)
# Bump up specification version
spec.specificationVersion = max(_MINIMUM_FLEXIBLE_SHAPES_SPEC_VERSION,
spec.specificationVersion) | python | def add_enumerated_multiarray_shapes(spec, feature_name, shapes):
"""
Annotate an input or output multiArray feature in a Neural Network spec to
to accommodate a list of enumerated array shapes
:param spec: MLModel
The MLModel spec containing the feature
:param feature_name: str
The name of the image feature for which to add shape information.
If the feature is not found in the input or output descriptions then
an exception is thrown
:param shapes: [] | NeuralNetworkMultiArrayShape
A single or a list of NeuralNetworkImageSize objects which encode valid
size information for a image feature
Examples
--------
.. sourcecode:: python
>>> import coremltools
>>> from coremltools.models.neural_network import flexible_shape_utils
>>> spec = coremltools.utils.load_spec('mymodel.mlmodel')
>>> array_shapes = [flexible_shape_utils.NeuralNetworkMultiArrayShape(3)]
>>> second_shape = flexible_shape_utils.NeuralNetworkMultiArrayShape()
>>> second_shape.set_channel_shape(3)
>>> second_shape.set_height_shape(10)
>>> second_shape.set_width_shape(15)
>>> array_shapes.append(second_shape)
>>> flexible_shape_utils.add_enumerated_multiarray_shapes(spec, feature_name='my_multiarray_featurename', shapes=array_shapes)
:return:
None. The spec object is updated
"""
if not isinstance(shapes, list):
shapes = [shapes]
for shape in shapes:
if not isinstance(shape, NeuralNetworkMultiArrayShape):
raise Exception(
'Shape ranges should be of type NeuralNetworkMultiArrayShape')
shape._validate_multiarray_shape()
feature = _get_feature(spec, feature_name)
if feature.type.WhichOneof('Type') != 'multiArrayType':
raise Exception('Trying to add enumerated shapes to '
'a non-multiArray feature type')
if feature.type.multiArrayType.WhichOneof(
'ShapeFlexibility') != 'enumeratedShapes':
feature.type.multiArrayType.ClearField('ShapeFlexibility')
eshape_len = len(feature.type.multiArrayType.enumeratedShapes.shapes)
# Add default array shape to list of enumerated shapes if enumerated shapes
# field is currently empty
if eshape_len == 0:
fixed_shape = feature.type.multiArrayType.shape
if len(fixed_shape) == 1:
fs = NeuralNetworkMultiArrayShape(fixed_shape[0])
shapes.append(fs)
elif len(fixed_shape) == 3:
fs = NeuralNetworkMultiArrayShape()
fs.set_channel_shape(fixed_shape[0])
fs.set_height_shape(fixed_shape[1])
fs.set_width_shape(fixed_shape[2])
shapes.append(fs)
else:
raise Exception('Original fixed multiArray shape for {} is invalid'
.format(feature_name))
for shape in shapes:
s = feature.type.multiArrayType.enumeratedShapes.shapes.add()
s.shape.extend(shape.multiarray_shape)
# Bump up specification version
spec.specificationVersion = max(_MINIMUM_FLEXIBLE_SHAPES_SPEC_VERSION,
spec.specificationVersion) | [
"def",
"add_enumerated_multiarray_shapes",
"(",
"spec",
",",
"feature_name",
",",
"shapes",
")",
":",
"if",
"not",
"isinstance",
"(",
"shapes",
",",
"list",
")",
":",
"shapes",
"=",
"[",
"shapes",
"]",
"for",
"shape",
"in",
"shapes",
":",
"if",
"not",
"isinstance",
"(",
"shape",
",",
"NeuralNetworkMultiArrayShape",
")",
":",
"raise",
"Exception",
"(",
"'Shape ranges should be of type NeuralNetworkMultiArrayShape'",
")",
"shape",
".",
"_validate_multiarray_shape",
"(",
")",
"feature",
"=",
"_get_feature",
"(",
"spec",
",",
"feature_name",
")",
"if",
"feature",
".",
"type",
".",
"WhichOneof",
"(",
"'Type'",
")",
"!=",
"'multiArrayType'",
":",
"raise",
"Exception",
"(",
"'Trying to add enumerated shapes to '",
"'a non-multiArray feature type'",
")",
"if",
"feature",
".",
"type",
".",
"multiArrayType",
".",
"WhichOneof",
"(",
"'ShapeFlexibility'",
")",
"!=",
"'enumeratedShapes'",
":",
"feature",
".",
"type",
".",
"multiArrayType",
".",
"ClearField",
"(",
"'ShapeFlexibility'",
")",
"eshape_len",
"=",
"len",
"(",
"feature",
".",
"type",
".",
"multiArrayType",
".",
"enumeratedShapes",
".",
"shapes",
")",
"# Add default array shape to list of enumerated shapes if enumerated shapes",
"# field is currently empty",
"if",
"eshape_len",
"==",
"0",
":",
"fixed_shape",
"=",
"feature",
".",
"type",
".",
"multiArrayType",
".",
"shape",
"if",
"len",
"(",
"fixed_shape",
")",
"==",
"1",
":",
"fs",
"=",
"NeuralNetworkMultiArrayShape",
"(",
"fixed_shape",
"[",
"0",
"]",
")",
"shapes",
".",
"append",
"(",
"fs",
")",
"elif",
"len",
"(",
"fixed_shape",
")",
"==",
"3",
":",
"fs",
"=",
"NeuralNetworkMultiArrayShape",
"(",
")",
"fs",
".",
"set_channel_shape",
"(",
"fixed_shape",
"[",
"0",
"]",
")",
"fs",
".",
"set_height_shape",
"(",
"fixed_shape",
"[",
"1",
"]",
")",
"fs",
".",
"set_width_shape",
"(",
"fixed_shape",
"[",
"2",
"]",
")",
"shapes",
".",
"append",
"(",
"fs",
")",
"else",
":",
"raise",
"Exception",
"(",
"'Original fixed multiArray shape for {} is invalid'",
".",
"format",
"(",
"feature_name",
")",
")",
"for",
"shape",
"in",
"shapes",
":",
"s",
"=",
"feature",
".",
"type",
".",
"multiArrayType",
".",
"enumeratedShapes",
".",
"shapes",
".",
"add",
"(",
")",
"s",
".",
"shape",
".",
"extend",
"(",
"shape",
".",
"multiarray_shape",
")",
"# Bump up specification version",
"spec",
".",
"specificationVersion",
"=",
"max",
"(",
"_MINIMUM_FLEXIBLE_SHAPES_SPEC_VERSION",
",",
"spec",
".",
"specificationVersion",
")"
] | Annotate an input or output multiArray feature in a Neural Network spec to
to accommodate a list of enumerated array shapes
:param spec: MLModel
The MLModel spec containing the feature
:param feature_name: str
The name of the image feature for which to add shape information.
If the feature is not found in the input or output descriptions then
an exception is thrown
:param shapes: [] | NeuralNetworkMultiArrayShape
A single or a list of NeuralNetworkImageSize objects which encode valid
size information for a image feature
Examples
--------
.. sourcecode:: python
>>> import coremltools
>>> from coremltools.models.neural_network import flexible_shape_utils
>>> spec = coremltools.utils.load_spec('mymodel.mlmodel')
>>> array_shapes = [flexible_shape_utils.NeuralNetworkMultiArrayShape(3)]
>>> second_shape = flexible_shape_utils.NeuralNetworkMultiArrayShape()
>>> second_shape.set_channel_shape(3)
>>> second_shape.set_height_shape(10)
>>> second_shape.set_width_shape(15)
>>> array_shapes.append(second_shape)
>>> flexible_shape_utils.add_enumerated_multiarray_shapes(spec, feature_name='my_multiarray_featurename', shapes=array_shapes)
:return:
None. The spec object is updated | [
"Annotate",
"an",
"input",
"or",
"output",
"multiArray",
"feature",
"in",
"a",
"Neural",
"Network",
"spec",
"to",
"to",
"accommodate",
"a",
"list",
"of",
"enumerated",
"array",
"shapes"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/neural_network/flexible_shape_utils.py#L291-L370 |
28,996 | apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/models/neural_network/flexible_shape_utils.py | add_enumerated_image_sizes | def add_enumerated_image_sizes(spec, feature_name, sizes):
"""
Annotate an input or output image feature in a Neural Network spec to
to accommodate a list of enumerated image sizes
:param spec: MLModel
The MLModel spec containing the feature
:param feature_name: str
The name of the image feature for which to add size information.
If the feature is not found in the input or output descriptions then
an exception is thrown
:param sizes: [] | NeuralNetworkImageSize
A single or a list of NeuralNetworkImageSize objects which encode valid
size information for a image feature
Examples
--------
.. sourcecode:: python
>>> import coremltools
>>> from coremltools.models.neural_network import flexible_shape_utils
>>> spec = coremltools.utils.load_spec('mymodel.mlmodel')
>>> image_sizes = [flexible_shape_utils.NeuralNetworkImageSize(128, 128)]
>>> image_sizes.append(flexible_shape_utils.NeuralNetworkImageSize(256, 256))
>>> flexible_shape_utils.add_enumerated_image_sizes(spec, feature_name='my_multiarray_featurename', sizes=image_sizes)
:return:
None. The spec object is updated
"""
if not isinstance(sizes, list):
sizes = [sizes]
for size in sizes:
if not isinstance(size, NeuralNetworkImageSize):
raise Exception(
'Shape ranges should be of type NeuralNetworkImageSize')
feature = _get_feature(spec, feature_name)
if feature.type.WhichOneof('Type') != 'imageType':
raise Exception('Trying to add enumerated sizes to '
'a non-image feature type')
if feature.type.imageType.WhichOneof(
'SizeFlexibility') != 'enumeratedSizes':
feature.type.imageType.ClearField('SizeFlexibility')
esizes_len = len(feature.type.imageType.enumeratedSizes.sizes)
# Add default image size to list of enumerated sizes if enumerated sizes
# field is currently empty
if esizes_len == 0:
fixed_height = feature.type.imageType.height
fixed_width = feature.type.imageType.width
sizes.append(NeuralNetworkImageSize(fixed_height, fixed_width))
for size in sizes:
s = feature.type.imageType.enumeratedSizes.sizes.add()
s.height = size.height
s.width = size.width
# Bump up specification version
spec.specificationVersion = max(_MINIMUM_FLEXIBLE_SHAPES_SPEC_VERSION,
spec.specificationVersion) | python | def add_enumerated_image_sizes(spec, feature_name, sizes):
"""
Annotate an input or output image feature in a Neural Network spec to
to accommodate a list of enumerated image sizes
:param spec: MLModel
The MLModel spec containing the feature
:param feature_name: str
The name of the image feature for which to add size information.
If the feature is not found in the input or output descriptions then
an exception is thrown
:param sizes: [] | NeuralNetworkImageSize
A single or a list of NeuralNetworkImageSize objects which encode valid
size information for a image feature
Examples
--------
.. sourcecode:: python
>>> import coremltools
>>> from coremltools.models.neural_network import flexible_shape_utils
>>> spec = coremltools.utils.load_spec('mymodel.mlmodel')
>>> image_sizes = [flexible_shape_utils.NeuralNetworkImageSize(128, 128)]
>>> image_sizes.append(flexible_shape_utils.NeuralNetworkImageSize(256, 256))
>>> flexible_shape_utils.add_enumerated_image_sizes(spec, feature_name='my_multiarray_featurename', sizes=image_sizes)
:return:
None. The spec object is updated
"""
if not isinstance(sizes, list):
sizes = [sizes]
for size in sizes:
if not isinstance(size, NeuralNetworkImageSize):
raise Exception(
'Shape ranges should be of type NeuralNetworkImageSize')
feature = _get_feature(spec, feature_name)
if feature.type.WhichOneof('Type') != 'imageType':
raise Exception('Trying to add enumerated sizes to '
'a non-image feature type')
if feature.type.imageType.WhichOneof(
'SizeFlexibility') != 'enumeratedSizes':
feature.type.imageType.ClearField('SizeFlexibility')
esizes_len = len(feature.type.imageType.enumeratedSizes.sizes)
# Add default image size to list of enumerated sizes if enumerated sizes
# field is currently empty
if esizes_len == 0:
fixed_height = feature.type.imageType.height
fixed_width = feature.type.imageType.width
sizes.append(NeuralNetworkImageSize(fixed_height, fixed_width))
for size in sizes:
s = feature.type.imageType.enumeratedSizes.sizes.add()
s.height = size.height
s.width = size.width
# Bump up specification version
spec.specificationVersion = max(_MINIMUM_FLEXIBLE_SHAPES_SPEC_VERSION,
spec.specificationVersion) | [
"def",
"add_enumerated_image_sizes",
"(",
"spec",
",",
"feature_name",
",",
"sizes",
")",
":",
"if",
"not",
"isinstance",
"(",
"sizes",
",",
"list",
")",
":",
"sizes",
"=",
"[",
"sizes",
"]",
"for",
"size",
"in",
"sizes",
":",
"if",
"not",
"isinstance",
"(",
"size",
",",
"NeuralNetworkImageSize",
")",
":",
"raise",
"Exception",
"(",
"'Shape ranges should be of type NeuralNetworkImageSize'",
")",
"feature",
"=",
"_get_feature",
"(",
"spec",
",",
"feature_name",
")",
"if",
"feature",
".",
"type",
".",
"WhichOneof",
"(",
"'Type'",
")",
"!=",
"'imageType'",
":",
"raise",
"Exception",
"(",
"'Trying to add enumerated sizes to '",
"'a non-image feature type'",
")",
"if",
"feature",
".",
"type",
".",
"imageType",
".",
"WhichOneof",
"(",
"'SizeFlexibility'",
")",
"!=",
"'enumeratedSizes'",
":",
"feature",
".",
"type",
".",
"imageType",
".",
"ClearField",
"(",
"'SizeFlexibility'",
")",
"esizes_len",
"=",
"len",
"(",
"feature",
".",
"type",
".",
"imageType",
".",
"enumeratedSizes",
".",
"sizes",
")",
"# Add default image size to list of enumerated sizes if enumerated sizes",
"# field is currently empty",
"if",
"esizes_len",
"==",
"0",
":",
"fixed_height",
"=",
"feature",
".",
"type",
".",
"imageType",
".",
"height",
"fixed_width",
"=",
"feature",
".",
"type",
".",
"imageType",
".",
"width",
"sizes",
".",
"append",
"(",
"NeuralNetworkImageSize",
"(",
"fixed_height",
",",
"fixed_width",
")",
")",
"for",
"size",
"in",
"sizes",
":",
"s",
"=",
"feature",
".",
"type",
".",
"imageType",
".",
"enumeratedSizes",
".",
"sizes",
".",
"add",
"(",
")",
"s",
".",
"height",
"=",
"size",
".",
"height",
"s",
".",
"width",
"=",
"size",
".",
"width",
"# Bump up specification version",
"spec",
".",
"specificationVersion",
"=",
"max",
"(",
"_MINIMUM_FLEXIBLE_SHAPES_SPEC_VERSION",
",",
"spec",
".",
"specificationVersion",
")"
] | Annotate an input or output image feature in a Neural Network spec to
to accommodate a list of enumerated image sizes
:param spec: MLModel
The MLModel spec containing the feature
:param feature_name: str
The name of the image feature for which to add size information.
If the feature is not found in the input or output descriptions then
an exception is thrown
:param sizes: [] | NeuralNetworkImageSize
A single or a list of NeuralNetworkImageSize objects which encode valid
size information for a image feature
Examples
--------
.. sourcecode:: python
>>> import coremltools
>>> from coremltools.models.neural_network import flexible_shape_utils
>>> spec = coremltools.utils.load_spec('mymodel.mlmodel')
>>> image_sizes = [flexible_shape_utils.NeuralNetworkImageSize(128, 128)]
>>> image_sizes.append(flexible_shape_utils.NeuralNetworkImageSize(256, 256))
>>> flexible_shape_utils.add_enumerated_image_sizes(spec, feature_name='my_multiarray_featurename', sizes=image_sizes)
:return:
None. The spec object is updated | [
"Annotate",
"an",
"input",
"or",
"output",
"image",
"feature",
"in",
"a",
"Neural",
"Network",
"spec",
"to",
"to",
"accommodate",
"a",
"list",
"of",
"enumerated",
"image",
"sizes"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/neural_network/flexible_shape_utils.py#L373-L437 |
28,997 | apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/models/neural_network/flexible_shape_utils.py | update_image_size_range | def update_image_size_range(spec, feature_name, size_range):
"""
Annotate an input or output Image feature in a Neural Network spec to
to accommodate a range of image sizes
:param spec: MLModel
The MLModel spec containing the feature
:param feature_name: str
The name of the Image feature for which to add shape information.
If the feature is not found in the input or output descriptions then
an exception is thrown
:param size_range: NeuralNetworkImageSizeRange
A NeuralNetworkImageSizeRange object with the populated image size
range information.
Examples
--------
.. sourcecode:: python
>>> import coremltools
>>> from coremltools.models.neural_network import flexible_shape_utils
>>> spec = coremltools.utils.load_spec('mymodel.mlmodel')
>>> img_size_ranges = flexible_shape_utils.NeuralNetworkImageSizeRange()
>>> img_size_ranges.add_height_range(64, 128)
>>> img_size_ranges.add_width_range(128, -1)
>>> flexible_shape_utils.update_image_size_range(spec, feature_name='my_multiarray_featurename', size_range=img_size_ranges)
:return:
None. The spec object is updated
"""
if not isinstance(size_range, NeuralNetworkImageSizeRange):
raise Exception(
'Shape ranges should be of type NeuralNetworkImageSizeRange')
feature = _get_feature(spec, feature_name)
if feature.type.WhichOneof('Type') != 'imageType':
raise Exception('Trying to add size ranges for '
'a non-image feature type')
feature.type.imageType.ClearField('SizeFlexibility')
feature.type.imageType.imageSizeRange.heightRange.lowerBound = size_range.get_height_range().lowerBound
feature.type.imageType.imageSizeRange.heightRange.upperBound = size_range.get_height_range().upperBound
feature.type.imageType.imageSizeRange.widthRange.lowerBound = size_range.get_width_range().lowerBound
feature.type.imageType.imageSizeRange.widthRange.upperBound = size_range.get_width_range().upperBound
# Bump up specification version
spec.specificationVersion = max(_MINIMUM_FLEXIBLE_SHAPES_SPEC_VERSION,
spec.specificationVersion) | python | def update_image_size_range(spec, feature_name, size_range):
"""
Annotate an input or output Image feature in a Neural Network spec to
to accommodate a range of image sizes
:param spec: MLModel
The MLModel spec containing the feature
:param feature_name: str
The name of the Image feature for which to add shape information.
If the feature is not found in the input or output descriptions then
an exception is thrown
:param size_range: NeuralNetworkImageSizeRange
A NeuralNetworkImageSizeRange object with the populated image size
range information.
Examples
--------
.. sourcecode:: python
>>> import coremltools
>>> from coremltools.models.neural_network import flexible_shape_utils
>>> spec = coremltools.utils.load_spec('mymodel.mlmodel')
>>> img_size_ranges = flexible_shape_utils.NeuralNetworkImageSizeRange()
>>> img_size_ranges.add_height_range(64, 128)
>>> img_size_ranges.add_width_range(128, -1)
>>> flexible_shape_utils.update_image_size_range(spec, feature_name='my_multiarray_featurename', size_range=img_size_ranges)
:return:
None. The spec object is updated
"""
if not isinstance(size_range, NeuralNetworkImageSizeRange):
raise Exception(
'Shape ranges should be of type NeuralNetworkImageSizeRange')
feature = _get_feature(spec, feature_name)
if feature.type.WhichOneof('Type') != 'imageType':
raise Exception('Trying to add size ranges for '
'a non-image feature type')
feature.type.imageType.ClearField('SizeFlexibility')
feature.type.imageType.imageSizeRange.heightRange.lowerBound = size_range.get_height_range().lowerBound
feature.type.imageType.imageSizeRange.heightRange.upperBound = size_range.get_height_range().upperBound
feature.type.imageType.imageSizeRange.widthRange.lowerBound = size_range.get_width_range().lowerBound
feature.type.imageType.imageSizeRange.widthRange.upperBound = size_range.get_width_range().upperBound
# Bump up specification version
spec.specificationVersion = max(_MINIMUM_FLEXIBLE_SHAPES_SPEC_VERSION,
spec.specificationVersion) | [
"def",
"update_image_size_range",
"(",
"spec",
",",
"feature_name",
",",
"size_range",
")",
":",
"if",
"not",
"isinstance",
"(",
"size_range",
",",
"NeuralNetworkImageSizeRange",
")",
":",
"raise",
"Exception",
"(",
"'Shape ranges should be of type NeuralNetworkImageSizeRange'",
")",
"feature",
"=",
"_get_feature",
"(",
"spec",
",",
"feature_name",
")",
"if",
"feature",
".",
"type",
".",
"WhichOneof",
"(",
"'Type'",
")",
"!=",
"'imageType'",
":",
"raise",
"Exception",
"(",
"'Trying to add size ranges for '",
"'a non-image feature type'",
")",
"feature",
".",
"type",
".",
"imageType",
".",
"ClearField",
"(",
"'SizeFlexibility'",
")",
"feature",
".",
"type",
".",
"imageType",
".",
"imageSizeRange",
".",
"heightRange",
".",
"lowerBound",
"=",
"size_range",
".",
"get_height_range",
"(",
")",
".",
"lowerBound",
"feature",
".",
"type",
".",
"imageType",
".",
"imageSizeRange",
".",
"heightRange",
".",
"upperBound",
"=",
"size_range",
".",
"get_height_range",
"(",
")",
".",
"upperBound",
"feature",
".",
"type",
".",
"imageType",
".",
"imageSizeRange",
".",
"widthRange",
".",
"lowerBound",
"=",
"size_range",
".",
"get_width_range",
"(",
")",
".",
"lowerBound",
"feature",
".",
"type",
".",
"imageType",
".",
"imageSizeRange",
".",
"widthRange",
".",
"upperBound",
"=",
"size_range",
".",
"get_width_range",
"(",
")",
".",
"upperBound",
"# Bump up specification version",
"spec",
".",
"specificationVersion",
"=",
"max",
"(",
"_MINIMUM_FLEXIBLE_SHAPES_SPEC_VERSION",
",",
"spec",
".",
"specificationVersion",
")"
] | Annotate an input or output Image feature in a Neural Network spec to
to accommodate a range of image sizes
:param spec: MLModel
The MLModel spec containing the feature
:param feature_name: str
The name of the Image feature for which to add shape information.
If the feature is not found in the input or output descriptions then
an exception is thrown
:param size_range: NeuralNetworkImageSizeRange
A NeuralNetworkImageSizeRange object with the populated image size
range information.
Examples
--------
.. sourcecode:: python
>>> import coremltools
>>> from coremltools.models.neural_network import flexible_shape_utils
>>> spec = coremltools.utils.load_spec('mymodel.mlmodel')
>>> img_size_ranges = flexible_shape_utils.NeuralNetworkImageSizeRange()
>>> img_size_ranges.add_height_range(64, 128)
>>> img_size_ranges.add_width_range(128, -1)
>>> flexible_shape_utils.update_image_size_range(spec, feature_name='my_multiarray_featurename', size_range=img_size_ranges)
:return:
None. The spec object is updated | [
"Annotate",
"an",
"input",
"or",
"output",
"Image",
"feature",
"in",
"a",
"Neural",
"Network",
"spec",
"to",
"to",
"accommodate",
"a",
"range",
"of",
"image",
"sizes"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/neural_network/flexible_shape_utils.py#L440-L490 |
28,998 | apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/models/neural_network/flexible_shape_utils.py | update_multiarray_shape_range | def update_multiarray_shape_range(spec, feature_name, shape_range):
"""
Annotate an input or output MLMultiArray feature in a Neural Network spec
to accommodate a range of shapes
:param spec: MLModel
The MLModel spec containing the feature
:param feature_name: str
The name of the feature for which to add shape range
information. If the feature is not found in the input or output
descriptions then an exception is thrown
:param shape_range: NeuralNetworkMultiArrayShapeRange
A NeuralNetworkMultiArrayShapeRange object with the populated shape
range information. The shape_range object must either contain only
shape information for channel or channel, height and width. If
the object is invalid then an exception is thrown
Examples
--------
.. sourcecode:: python
>>> import coremltools
>>> from coremltools.models.neural_network import flexible_shape_utils
>>> spec = coremltools.utils.load_spec('mymodel.mlmodel')
>>> shape_range = flexible_shape_utils.NeuralNetworkMultiArrayShapeRange()
>>> shape_range.add_channel_range((1, 3))
>>> shape_range.add_width_range((128, 256))
>>> shape_range.add_height_range((128, 256))
>>> flexible_shape_utils.update_multiarray_shape_range(spec, feature_name='my_multiarray_featurename', shape_range=shape_range)
:return:
None. The spec is updated
"""
if not isinstance(shape_range, NeuralNetworkMultiArrayShapeRange):
raise Exception('Shape range should be of type MultiArrayShapeRange')
shape_range.validate_array_shape_range()
feature = _get_feature(spec, feature_name)
if feature.type.WhichOneof('Type') != 'multiArrayType':
raise Exception('Trying to update shape range for '
'a non-multiArray feature type')
# Add channel range
feature.type.multiArrayType.ClearField('ShapeFlexibility')
s = feature.type.multiArrayType.shapeRange.sizeRanges.add()
s.lowerBound = shape_range.get_channel_range().lowerBound
s.upperBound = shape_range.get_channel_range().upperBound
if shape_range.get_shape_range_dims() > 1:
# Add height range
s = feature.type.multiArrayType.shapeRange.sizeRanges.add()
s.lowerBound = shape_range.get_height_range().lowerBound
s.upperBound = shape_range.get_height_range().upperBound
# Add width range
s = feature.type.multiArrayType.shapeRange.sizeRanges.add()
s.lowerBound = shape_range.get_width_range().lowerBound
s.upperBound = shape_range.get_width_range().upperBound
# Bump up specification version
spec.specificationVersion = max(_MINIMUM_FLEXIBLE_SHAPES_SPEC_VERSION,
spec.specificationVersion) | python | def update_multiarray_shape_range(spec, feature_name, shape_range):
"""
Annotate an input or output MLMultiArray feature in a Neural Network spec
to accommodate a range of shapes
:param spec: MLModel
The MLModel spec containing the feature
:param feature_name: str
The name of the feature for which to add shape range
information. If the feature is not found in the input or output
descriptions then an exception is thrown
:param shape_range: NeuralNetworkMultiArrayShapeRange
A NeuralNetworkMultiArrayShapeRange object with the populated shape
range information. The shape_range object must either contain only
shape information for channel or channel, height and width. If
the object is invalid then an exception is thrown
Examples
--------
.. sourcecode:: python
>>> import coremltools
>>> from coremltools.models.neural_network import flexible_shape_utils
>>> spec = coremltools.utils.load_spec('mymodel.mlmodel')
>>> shape_range = flexible_shape_utils.NeuralNetworkMultiArrayShapeRange()
>>> shape_range.add_channel_range((1, 3))
>>> shape_range.add_width_range((128, 256))
>>> shape_range.add_height_range((128, 256))
>>> flexible_shape_utils.update_multiarray_shape_range(spec, feature_name='my_multiarray_featurename', shape_range=shape_range)
:return:
None. The spec is updated
"""
if not isinstance(shape_range, NeuralNetworkMultiArrayShapeRange):
raise Exception('Shape range should be of type MultiArrayShapeRange')
shape_range.validate_array_shape_range()
feature = _get_feature(spec, feature_name)
if feature.type.WhichOneof('Type') != 'multiArrayType':
raise Exception('Trying to update shape range for '
'a non-multiArray feature type')
# Add channel range
feature.type.multiArrayType.ClearField('ShapeFlexibility')
s = feature.type.multiArrayType.shapeRange.sizeRanges.add()
s.lowerBound = shape_range.get_channel_range().lowerBound
s.upperBound = shape_range.get_channel_range().upperBound
if shape_range.get_shape_range_dims() > 1:
# Add height range
s = feature.type.multiArrayType.shapeRange.sizeRanges.add()
s.lowerBound = shape_range.get_height_range().lowerBound
s.upperBound = shape_range.get_height_range().upperBound
# Add width range
s = feature.type.multiArrayType.shapeRange.sizeRanges.add()
s.lowerBound = shape_range.get_width_range().lowerBound
s.upperBound = shape_range.get_width_range().upperBound
# Bump up specification version
spec.specificationVersion = max(_MINIMUM_FLEXIBLE_SHAPES_SPEC_VERSION,
spec.specificationVersion) | [
"def",
"update_multiarray_shape_range",
"(",
"spec",
",",
"feature_name",
",",
"shape_range",
")",
":",
"if",
"not",
"isinstance",
"(",
"shape_range",
",",
"NeuralNetworkMultiArrayShapeRange",
")",
":",
"raise",
"Exception",
"(",
"'Shape range should be of type MultiArrayShapeRange'",
")",
"shape_range",
".",
"validate_array_shape_range",
"(",
")",
"feature",
"=",
"_get_feature",
"(",
"spec",
",",
"feature_name",
")",
"if",
"feature",
".",
"type",
".",
"WhichOneof",
"(",
"'Type'",
")",
"!=",
"'multiArrayType'",
":",
"raise",
"Exception",
"(",
"'Trying to update shape range for '",
"'a non-multiArray feature type'",
")",
"# Add channel range",
"feature",
".",
"type",
".",
"multiArrayType",
".",
"ClearField",
"(",
"'ShapeFlexibility'",
")",
"s",
"=",
"feature",
".",
"type",
".",
"multiArrayType",
".",
"shapeRange",
".",
"sizeRanges",
".",
"add",
"(",
")",
"s",
".",
"lowerBound",
"=",
"shape_range",
".",
"get_channel_range",
"(",
")",
".",
"lowerBound",
"s",
".",
"upperBound",
"=",
"shape_range",
".",
"get_channel_range",
"(",
")",
".",
"upperBound",
"if",
"shape_range",
".",
"get_shape_range_dims",
"(",
")",
">",
"1",
":",
"# Add height range",
"s",
"=",
"feature",
".",
"type",
".",
"multiArrayType",
".",
"shapeRange",
".",
"sizeRanges",
".",
"add",
"(",
")",
"s",
".",
"lowerBound",
"=",
"shape_range",
".",
"get_height_range",
"(",
")",
".",
"lowerBound",
"s",
".",
"upperBound",
"=",
"shape_range",
".",
"get_height_range",
"(",
")",
".",
"upperBound",
"# Add width range",
"s",
"=",
"feature",
".",
"type",
".",
"multiArrayType",
".",
"shapeRange",
".",
"sizeRanges",
".",
"add",
"(",
")",
"s",
".",
"lowerBound",
"=",
"shape_range",
".",
"get_width_range",
"(",
")",
".",
"lowerBound",
"s",
".",
"upperBound",
"=",
"shape_range",
".",
"get_width_range",
"(",
")",
".",
"upperBound",
"# Bump up specification version",
"spec",
".",
"specificationVersion",
"=",
"max",
"(",
"_MINIMUM_FLEXIBLE_SHAPES_SPEC_VERSION",
",",
"spec",
".",
"specificationVersion",
")"
] | Annotate an input or output MLMultiArray feature in a Neural Network spec
to accommodate a range of shapes
:param spec: MLModel
The MLModel spec containing the feature
:param feature_name: str
The name of the feature for which to add shape range
information. If the feature is not found in the input or output
descriptions then an exception is thrown
:param shape_range: NeuralNetworkMultiArrayShapeRange
A NeuralNetworkMultiArrayShapeRange object with the populated shape
range information. The shape_range object must either contain only
shape information for channel or channel, height and width. If
the object is invalid then an exception is thrown
Examples
--------
.. sourcecode:: python
>>> import coremltools
>>> from coremltools.models.neural_network import flexible_shape_utils
>>> spec = coremltools.utils.load_spec('mymodel.mlmodel')
>>> shape_range = flexible_shape_utils.NeuralNetworkMultiArrayShapeRange()
>>> shape_range.add_channel_range((1, 3))
>>> shape_range.add_width_range((128, 256))
>>> shape_range.add_height_range((128, 256))
>>> flexible_shape_utils.update_multiarray_shape_range(spec, feature_name='my_multiarray_featurename', shape_range=shape_range)
:return:
None. The spec is updated | [
"Annotate",
"an",
"input",
"or",
"output",
"MLMultiArray",
"feature",
"in",
"a",
"Neural",
"Network",
"spec",
"to",
"accommodate",
"a",
"range",
"of",
"shapes"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/neural_network/flexible_shape_utils.py#L493-L556 |
28,999 | apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/models/neural_network/flexible_shape_utils.py | get_allowed_shape_ranges | def get_allowed_shape_ranges(spec):
"""
For a given model specification, returns a dictionary with a shape range object for each input feature name.
"""
shaper = NeuralNetworkShaper(spec, False)
inputs = _get_input_names(spec)
output = {}
for input in inputs:
output[input] = shaper.shape(input)
return output | python | def get_allowed_shape_ranges(spec):
"""
For a given model specification, returns a dictionary with a shape range object for each input feature name.
"""
shaper = NeuralNetworkShaper(spec, False)
inputs = _get_input_names(spec)
output = {}
for input in inputs:
output[input] = shaper.shape(input)
return output | [
"def",
"get_allowed_shape_ranges",
"(",
"spec",
")",
":",
"shaper",
"=",
"NeuralNetworkShaper",
"(",
"spec",
",",
"False",
")",
"inputs",
"=",
"_get_input_names",
"(",
"spec",
")",
"output",
"=",
"{",
"}",
"for",
"input",
"in",
"inputs",
":",
"output",
"[",
"input",
"]",
"=",
"shaper",
".",
"shape",
"(",
"input",
")",
"return",
"output"
] | For a given model specification, returns a dictionary with a shape range object for each input feature name. | [
"For",
"a",
"given",
"model",
"specification",
"returns",
"a",
"dictionary",
"with",
"a",
"shape",
"range",
"object",
"for",
"each",
"input",
"feature",
"name",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/neural_network/flexible_shape_utils.py#L559-L571 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.