id int32 0 252k | repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1 value | code stringlengths 75 19.8k | code_tokens list | docstring stringlengths 3 17.3k | docstring_tokens list | sha stringlengths 40 40 | url stringlengths 87 242 |
|---|---|---|---|---|---|---|---|---|---|---|---|
24,000 | apache/incubator-mxnet | python/mxnet/contrib/onnx/onnx2mx/_op_translations.py | global_lppooling | def global_lppooling(attrs, inputs, proto_obj):
"""Performs global lp pooling on the input."""
p_value = attrs.get('p', 2)
new_attrs = translation_utils._add_extra_attributes(attrs, {'global_pool': True,
'kernel': (1, 1),
'pool_type': 'lp',
'p_value': p_value})
new_attrs = translation_utils._remove_attributes(new_attrs, ['p'])
return 'Pooling', new_attrs, inputs | python | def global_lppooling(attrs, inputs, proto_obj):
"""Performs global lp pooling on the input."""
p_value = attrs.get('p', 2)
new_attrs = translation_utils._add_extra_attributes(attrs, {'global_pool': True,
'kernel': (1, 1),
'pool_type': 'lp',
'p_value': p_value})
new_attrs = translation_utils._remove_attributes(new_attrs, ['p'])
return 'Pooling', new_attrs, inputs | [
"def",
"global_lppooling",
"(",
"attrs",
",",
"inputs",
",",
"proto_obj",
")",
":",
"p_value",
"=",
"attrs",
".",
"get",
"(",
"'p'",
",",
"2",
")",
"new_attrs",
"=",
"translation_utils",
".",
"_add_extra_attributes",
"(",
"attrs",
",",
"{",
"'global_pool'",
":",
"True",
",",
"'kernel'",
":",
"(",
"1",
",",
"1",
")",
",",
"'pool_type'",
":",
"'lp'",
",",
"'p_value'",
":",
"p_value",
"}",
")",
"new_attrs",
"=",
"translation_utils",
".",
"_remove_attributes",
"(",
"new_attrs",
",",
"[",
"'p'",
"]",
")",
"return",
"'Pooling'",
",",
"new_attrs",
",",
"inputs"
] | Performs global lp pooling on the input. | [
"Performs",
"global",
"lp",
"pooling",
"on",
"the",
"input",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/onnx/onnx2mx/_op_translations.py#L405-L413 |
24,001 | apache/incubator-mxnet | python/mxnet/contrib/onnx/onnx2mx/_op_translations.py | linalg_gemm | def linalg_gemm(attrs, inputs, proto_obj):
"""Performs general matrix multiplication and accumulation"""
trans_a = 0
trans_b = 0
alpha = 1
beta = 1
if 'transA' in attrs:
trans_a = attrs['transA']
if 'transB' in attrs:
trans_b = attrs['transB']
if 'alpha' in attrs:
alpha = attrs['alpha']
if 'beta' in attrs:
beta = attrs['beta']
flatten_a = symbol.flatten(inputs[0])
matmul_op = symbol.linalg_gemm2(A=flatten_a, B=inputs[1],
transpose_a=trans_a, transpose_b=trans_b,
alpha=alpha)
gemm_op = symbol.broadcast_add(matmul_op, beta*inputs[2])
new_attrs = translation_utils._fix_attribute_names(attrs, {'transA': 'transpose_a',
'transB': 'transpose_b'})
new_attrs = translation_utils._remove_attributes(new_attrs, ['broadcast'])
return gemm_op, new_attrs, inputs | python | def linalg_gemm(attrs, inputs, proto_obj):
"""Performs general matrix multiplication and accumulation"""
trans_a = 0
trans_b = 0
alpha = 1
beta = 1
if 'transA' in attrs:
trans_a = attrs['transA']
if 'transB' in attrs:
trans_b = attrs['transB']
if 'alpha' in attrs:
alpha = attrs['alpha']
if 'beta' in attrs:
beta = attrs['beta']
flatten_a = symbol.flatten(inputs[0])
matmul_op = symbol.linalg_gemm2(A=flatten_a, B=inputs[1],
transpose_a=trans_a, transpose_b=trans_b,
alpha=alpha)
gemm_op = symbol.broadcast_add(matmul_op, beta*inputs[2])
new_attrs = translation_utils._fix_attribute_names(attrs, {'transA': 'transpose_a',
'transB': 'transpose_b'})
new_attrs = translation_utils._remove_attributes(new_attrs, ['broadcast'])
return gemm_op, new_attrs, inputs | [
"def",
"linalg_gemm",
"(",
"attrs",
",",
"inputs",
",",
"proto_obj",
")",
":",
"trans_a",
"=",
"0",
"trans_b",
"=",
"0",
"alpha",
"=",
"1",
"beta",
"=",
"1",
"if",
"'transA'",
"in",
"attrs",
":",
"trans_a",
"=",
"attrs",
"[",
"'transA'",
"]",
"if",
"'transB'",
"in",
"attrs",
":",
"trans_b",
"=",
"attrs",
"[",
"'transB'",
"]",
"if",
"'alpha'",
"in",
"attrs",
":",
"alpha",
"=",
"attrs",
"[",
"'alpha'",
"]",
"if",
"'beta'",
"in",
"attrs",
":",
"beta",
"=",
"attrs",
"[",
"'beta'",
"]",
"flatten_a",
"=",
"symbol",
".",
"flatten",
"(",
"inputs",
"[",
"0",
"]",
")",
"matmul_op",
"=",
"symbol",
".",
"linalg_gemm2",
"(",
"A",
"=",
"flatten_a",
",",
"B",
"=",
"inputs",
"[",
"1",
"]",
",",
"transpose_a",
"=",
"trans_a",
",",
"transpose_b",
"=",
"trans_b",
",",
"alpha",
"=",
"alpha",
")",
"gemm_op",
"=",
"symbol",
".",
"broadcast_add",
"(",
"matmul_op",
",",
"beta",
"*",
"inputs",
"[",
"2",
"]",
")",
"new_attrs",
"=",
"translation_utils",
".",
"_fix_attribute_names",
"(",
"attrs",
",",
"{",
"'transA'",
":",
"'transpose_a'",
",",
"'transB'",
":",
"'transpose_b'",
"}",
")",
"new_attrs",
"=",
"translation_utils",
".",
"_remove_attributes",
"(",
"new_attrs",
",",
"[",
"'broadcast'",
"]",
")",
"return",
"gemm_op",
",",
"new_attrs",
",",
"inputs"
] | Performs general matrix multiplication and accumulation | [
"Performs",
"general",
"matrix",
"multiplication",
"and",
"accumulation"
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/onnx/onnx2mx/_op_translations.py#L415-L437 |
24,002 | apache/incubator-mxnet | python/mxnet/contrib/onnx/onnx2mx/_op_translations.py | local_response_norm | def local_response_norm(attrs, inputs, proto_obj):
"""Local Response Normalization."""
new_attrs = translation_utils._fix_attribute_names(attrs,
{'bias': 'knorm',
'size' : 'nsize'})
return 'LRN', new_attrs, inputs | python | def local_response_norm(attrs, inputs, proto_obj):
"""Local Response Normalization."""
new_attrs = translation_utils._fix_attribute_names(attrs,
{'bias': 'knorm',
'size' : 'nsize'})
return 'LRN', new_attrs, inputs | [
"def",
"local_response_norm",
"(",
"attrs",
",",
"inputs",
",",
"proto_obj",
")",
":",
"new_attrs",
"=",
"translation_utils",
".",
"_fix_attribute_names",
"(",
"attrs",
",",
"{",
"'bias'",
":",
"'knorm'",
",",
"'size'",
":",
"'nsize'",
"}",
")",
"return",
"'LRN'",
",",
"new_attrs",
",",
"inputs"
] | Local Response Normalization. | [
"Local",
"Response",
"Normalization",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/onnx/onnx2mx/_op_translations.py#L439-L444 |
24,003 | apache/incubator-mxnet | python/mxnet/contrib/onnx/onnx2mx/_op_translations.py | dropout | def dropout(attrs, inputs, proto_obj):
"""Dropout Regularization."""
mode = 'training'
if 'is_test' in attrs and attrs['is_test'] == 0:
mode = 'always'
new_attrs = translation_utils._fix_attribute_names(attrs,
{'ratio': 'p'})
new_attrs = translation_utils._remove_attributes(new_attrs, ['is_test'])
new_attrs = translation_utils._add_extra_attributes(new_attrs, {'mode': mode})
return 'Dropout', new_attrs, inputs | python | def dropout(attrs, inputs, proto_obj):
"""Dropout Regularization."""
mode = 'training'
if 'is_test' in attrs and attrs['is_test'] == 0:
mode = 'always'
new_attrs = translation_utils._fix_attribute_names(attrs,
{'ratio': 'p'})
new_attrs = translation_utils._remove_attributes(new_attrs, ['is_test'])
new_attrs = translation_utils._add_extra_attributes(new_attrs, {'mode': mode})
return 'Dropout', new_attrs, inputs | [
"def",
"dropout",
"(",
"attrs",
",",
"inputs",
",",
"proto_obj",
")",
":",
"mode",
"=",
"'training'",
"if",
"'is_test'",
"in",
"attrs",
"and",
"attrs",
"[",
"'is_test'",
"]",
"==",
"0",
":",
"mode",
"=",
"'always'",
"new_attrs",
"=",
"translation_utils",
".",
"_fix_attribute_names",
"(",
"attrs",
",",
"{",
"'ratio'",
":",
"'p'",
"}",
")",
"new_attrs",
"=",
"translation_utils",
".",
"_remove_attributes",
"(",
"new_attrs",
",",
"[",
"'is_test'",
"]",
")",
"new_attrs",
"=",
"translation_utils",
".",
"_add_extra_attributes",
"(",
"new_attrs",
",",
"{",
"'mode'",
":",
"mode",
"}",
")",
"return",
"'Dropout'",
",",
"new_attrs",
",",
"inputs"
] | Dropout Regularization. | [
"Dropout",
"Regularization",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/onnx/onnx2mx/_op_translations.py#L446-L455 |
24,004 | apache/incubator-mxnet | python/mxnet/contrib/onnx/onnx2mx/_op_translations.py | reshape | def reshape(attrs, inputs, proto_obj):
"""Reshape the given array by the shape attribute."""
if len(inputs) == 1:
return 'reshape', attrs, inputs[0]
reshape_shape = list(proto_obj._params[inputs[1].name].asnumpy())
reshape_shape = [int(i) for i in reshape_shape]
new_attrs = {'shape': reshape_shape}
return 'reshape', new_attrs, inputs[:1] | python | def reshape(attrs, inputs, proto_obj):
"""Reshape the given array by the shape attribute."""
if len(inputs) == 1:
return 'reshape', attrs, inputs[0]
reshape_shape = list(proto_obj._params[inputs[1].name].asnumpy())
reshape_shape = [int(i) for i in reshape_shape]
new_attrs = {'shape': reshape_shape}
return 'reshape', new_attrs, inputs[:1] | [
"def",
"reshape",
"(",
"attrs",
",",
"inputs",
",",
"proto_obj",
")",
":",
"if",
"len",
"(",
"inputs",
")",
"==",
"1",
":",
"return",
"'reshape'",
",",
"attrs",
",",
"inputs",
"[",
"0",
"]",
"reshape_shape",
"=",
"list",
"(",
"proto_obj",
".",
"_params",
"[",
"inputs",
"[",
"1",
"]",
".",
"name",
"]",
".",
"asnumpy",
"(",
")",
")",
"reshape_shape",
"=",
"[",
"int",
"(",
"i",
")",
"for",
"i",
"in",
"reshape_shape",
"]",
"new_attrs",
"=",
"{",
"'shape'",
":",
"reshape_shape",
"}",
"return",
"'reshape'",
",",
"new_attrs",
",",
"inputs",
"[",
":",
"1",
"]"
] | Reshape the given array by the shape attribute. | [
"Reshape",
"the",
"given",
"array",
"by",
"the",
"shape",
"attribute",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/onnx/onnx2mx/_op_translations.py#L458-L465 |
24,005 | apache/incubator-mxnet | python/mxnet/contrib/onnx/onnx2mx/_op_translations.py | cast | def cast(attrs, inputs, proto_obj):
""" Cast input to a given dtype"""
try:
from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE
except ImportError:
raise ImportError("Onnx and protobuf need to be installed. "
+ "Instructions to install - https://github.com/onnx/onnx")
new_attrs = translation_utils._fix_attribute_names(attrs, {'to' : 'dtype'})
new_attrs['dtype'] = TENSOR_TYPE_TO_NP_TYPE[int(new_attrs['dtype'])]
return 'cast', new_attrs, inputs | python | def cast(attrs, inputs, proto_obj):
""" Cast input to a given dtype"""
try:
from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE
except ImportError:
raise ImportError("Onnx and protobuf need to be installed. "
+ "Instructions to install - https://github.com/onnx/onnx")
new_attrs = translation_utils._fix_attribute_names(attrs, {'to' : 'dtype'})
new_attrs['dtype'] = TENSOR_TYPE_TO_NP_TYPE[int(new_attrs['dtype'])]
return 'cast', new_attrs, inputs | [
"def",
"cast",
"(",
"attrs",
",",
"inputs",
",",
"proto_obj",
")",
":",
"try",
":",
"from",
"onnx",
".",
"mapping",
"import",
"TENSOR_TYPE_TO_NP_TYPE",
"except",
"ImportError",
":",
"raise",
"ImportError",
"(",
"\"Onnx and protobuf need to be installed. \"",
"+",
"\"Instructions to install - https://github.com/onnx/onnx\"",
")",
"new_attrs",
"=",
"translation_utils",
".",
"_fix_attribute_names",
"(",
"attrs",
",",
"{",
"'to'",
":",
"'dtype'",
"}",
")",
"new_attrs",
"[",
"'dtype'",
"]",
"=",
"TENSOR_TYPE_TO_NP_TYPE",
"[",
"int",
"(",
"new_attrs",
"[",
"'dtype'",
"]",
")",
"]",
"return",
"'cast'",
",",
"new_attrs",
",",
"inputs"
] | Cast input to a given dtype | [
"Cast",
"input",
"to",
"a",
"given",
"dtype"
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/onnx/onnx2mx/_op_translations.py#L467-L476 |
24,006 | apache/incubator-mxnet | python/mxnet/contrib/onnx/onnx2mx/_op_translations.py | split | def split(attrs, inputs, proto_obj):
"""Splits an array along a particular axis into multiple sub-arrays."""
split_list = attrs.get('split') if 'split' in attrs else []
new_attrs = translation_utils._fix_attribute_names(attrs,
{'split' : 'num_outputs'})
if 'axis' not in attrs:
new_attrs = translation_utils._add_extra_attributes(new_attrs, {'axis': 0})
if not split_list:
num_outputs = len(proto_obj.model_metadata.get('output_tensor_data'))
else:
if len(set(split_list)) == 1:
num_outputs = len(split_list)
else:
raise NotImplementedError("Operator {} in MXNet does not support variable splits."
"Tracking the issue to support variable split here: "
"https://github.com/apache/incubator-mxnet/issues/11594"
.format('split'))
new_attrs['num_outputs'] = num_outputs
return 'split', new_attrs, inputs | python | def split(attrs, inputs, proto_obj):
"""Splits an array along a particular axis into multiple sub-arrays."""
split_list = attrs.get('split') if 'split' in attrs else []
new_attrs = translation_utils._fix_attribute_names(attrs,
{'split' : 'num_outputs'})
if 'axis' not in attrs:
new_attrs = translation_utils._add_extra_attributes(new_attrs, {'axis': 0})
if not split_list:
num_outputs = len(proto_obj.model_metadata.get('output_tensor_data'))
else:
if len(set(split_list)) == 1:
num_outputs = len(split_list)
else:
raise NotImplementedError("Operator {} in MXNet does not support variable splits."
"Tracking the issue to support variable split here: "
"https://github.com/apache/incubator-mxnet/issues/11594"
.format('split'))
new_attrs['num_outputs'] = num_outputs
return 'split', new_attrs, inputs | [
"def",
"split",
"(",
"attrs",
",",
"inputs",
",",
"proto_obj",
")",
":",
"split_list",
"=",
"attrs",
".",
"get",
"(",
"'split'",
")",
"if",
"'split'",
"in",
"attrs",
"else",
"[",
"]",
"new_attrs",
"=",
"translation_utils",
".",
"_fix_attribute_names",
"(",
"attrs",
",",
"{",
"'split'",
":",
"'num_outputs'",
"}",
")",
"if",
"'axis'",
"not",
"in",
"attrs",
":",
"new_attrs",
"=",
"translation_utils",
".",
"_add_extra_attributes",
"(",
"new_attrs",
",",
"{",
"'axis'",
":",
"0",
"}",
")",
"if",
"not",
"split_list",
":",
"num_outputs",
"=",
"len",
"(",
"proto_obj",
".",
"model_metadata",
".",
"get",
"(",
"'output_tensor_data'",
")",
")",
"else",
":",
"if",
"len",
"(",
"set",
"(",
"split_list",
")",
")",
"==",
"1",
":",
"num_outputs",
"=",
"len",
"(",
"split_list",
")",
"else",
":",
"raise",
"NotImplementedError",
"(",
"\"Operator {} in MXNet does not support variable splits.\"",
"\"Tracking the issue to support variable split here: \"",
"\"https://github.com/apache/incubator-mxnet/issues/11594\"",
".",
"format",
"(",
"'split'",
")",
")",
"new_attrs",
"[",
"'num_outputs'",
"]",
"=",
"num_outputs",
"return",
"'split'",
",",
"new_attrs",
",",
"inputs"
] | Splits an array along a particular axis into multiple sub-arrays. | [
"Splits",
"an",
"array",
"along",
"a",
"particular",
"axis",
"into",
"multiple",
"sub",
"-",
"arrays",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/onnx/onnx2mx/_op_translations.py#L478-L498 |
24,007 | apache/incubator-mxnet | python/mxnet/contrib/onnx/onnx2mx/_op_translations.py | _slice | def _slice(attrs, inputs, proto_obj):
"""Returns a slice of the input tensor along multiple axes."""
new_attrs = translation_utils._fix_attribute_names(attrs,
{'axes' : 'axis',
'ends' : 'end',
'starts' : 'begin'})
# onnx slice provides slicing on multiple axis. Adding multiple slice_axis operator
# for multiple axes from mxnet
begin = new_attrs.get('begin')
end = new_attrs.get('end')
axes = new_attrs.get('axis', tuple(range(len(begin))))
slice_op = symbol.slice_axis(inputs[0], axis=axes[0], begin=begin[0], end=end[0])
if len(axes) > 1:
for i, axis in enumerate(axes):
slice_op = symbol.slice_axis(slice_op, axis=axis, begin=begin[i], end=end[i])
return slice_op, new_attrs, inputs | python | def _slice(attrs, inputs, proto_obj):
"""Returns a slice of the input tensor along multiple axes."""
new_attrs = translation_utils._fix_attribute_names(attrs,
{'axes' : 'axis',
'ends' : 'end',
'starts' : 'begin'})
# onnx slice provides slicing on multiple axis. Adding multiple slice_axis operator
# for multiple axes from mxnet
begin = new_attrs.get('begin')
end = new_attrs.get('end')
axes = new_attrs.get('axis', tuple(range(len(begin))))
slice_op = symbol.slice_axis(inputs[0], axis=axes[0], begin=begin[0], end=end[0])
if len(axes) > 1:
for i, axis in enumerate(axes):
slice_op = symbol.slice_axis(slice_op, axis=axis, begin=begin[i], end=end[i])
return slice_op, new_attrs, inputs | [
"def",
"_slice",
"(",
"attrs",
",",
"inputs",
",",
"proto_obj",
")",
":",
"new_attrs",
"=",
"translation_utils",
".",
"_fix_attribute_names",
"(",
"attrs",
",",
"{",
"'axes'",
":",
"'axis'",
",",
"'ends'",
":",
"'end'",
",",
"'starts'",
":",
"'begin'",
"}",
")",
"# onnx slice provides slicing on multiple axis. Adding multiple slice_axis operator",
"# for multiple axes from mxnet",
"begin",
"=",
"new_attrs",
".",
"get",
"(",
"'begin'",
")",
"end",
"=",
"new_attrs",
".",
"get",
"(",
"'end'",
")",
"axes",
"=",
"new_attrs",
".",
"get",
"(",
"'axis'",
",",
"tuple",
"(",
"range",
"(",
"len",
"(",
"begin",
")",
")",
")",
")",
"slice_op",
"=",
"symbol",
".",
"slice_axis",
"(",
"inputs",
"[",
"0",
"]",
",",
"axis",
"=",
"axes",
"[",
"0",
"]",
",",
"begin",
"=",
"begin",
"[",
"0",
"]",
",",
"end",
"=",
"end",
"[",
"0",
"]",
")",
"if",
"len",
"(",
"axes",
")",
">",
"1",
":",
"for",
"i",
",",
"axis",
"in",
"enumerate",
"(",
"axes",
")",
":",
"slice_op",
"=",
"symbol",
".",
"slice_axis",
"(",
"slice_op",
",",
"axis",
"=",
"axis",
",",
"begin",
"=",
"begin",
"[",
"i",
"]",
",",
"end",
"=",
"end",
"[",
"i",
"]",
")",
"return",
"slice_op",
",",
"new_attrs",
",",
"inputs"
] | Returns a slice of the input tensor along multiple axes. | [
"Returns",
"a",
"slice",
"of",
"the",
"input",
"tensor",
"along",
"multiple",
"axes",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/onnx/onnx2mx/_op_translations.py#L500-L515 |
24,008 | apache/incubator-mxnet | python/mxnet/contrib/onnx/onnx2mx/_op_translations.py | transpose | def transpose(attrs, inputs, proto_obj):
"""Transpose the input array."""
new_attrs = translation_utils._fix_attribute_names(attrs,
{'perm' : 'axes'})
return 'transpose', new_attrs, inputs | python | def transpose(attrs, inputs, proto_obj):
"""Transpose the input array."""
new_attrs = translation_utils._fix_attribute_names(attrs,
{'perm' : 'axes'})
return 'transpose', new_attrs, inputs | [
"def",
"transpose",
"(",
"attrs",
",",
"inputs",
",",
"proto_obj",
")",
":",
"new_attrs",
"=",
"translation_utils",
".",
"_fix_attribute_names",
"(",
"attrs",
",",
"{",
"'perm'",
":",
"'axes'",
"}",
")",
"return",
"'transpose'",
",",
"new_attrs",
",",
"inputs"
] | Transpose the input array. | [
"Transpose",
"the",
"input",
"array",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/onnx/onnx2mx/_op_translations.py#L517-L521 |
24,009 | apache/incubator-mxnet | python/mxnet/contrib/onnx/onnx2mx/_op_translations.py | squeeze | def squeeze(attrs, inputs, proto_obj):
"""Remove single-dimensional entries from the shape of a tensor."""
new_attrs = translation_utils._fix_attribute_names(attrs,
{'axes' : 'axis'})
return 'squeeze', new_attrs, inputs | python | def squeeze(attrs, inputs, proto_obj):
"""Remove single-dimensional entries from the shape of a tensor."""
new_attrs = translation_utils._fix_attribute_names(attrs,
{'axes' : 'axis'})
return 'squeeze', new_attrs, inputs | [
"def",
"squeeze",
"(",
"attrs",
",",
"inputs",
",",
"proto_obj",
")",
":",
"new_attrs",
"=",
"translation_utils",
".",
"_fix_attribute_names",
"(",
"attrs",
",",
"{",
"'axes'",
":",
"'axis'",
"}",
")",
"return",
"'squeeze'",
",",
"new_attrs",
",",
"inputs"
] | Remove single-dimensional entries from the shape of a tensor. | [
"Remove",
"single",
"-",
"dimensional",
"entries",
"from",
"the",
"shape",
"of",
"a",
"tensor",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/onnx/onnx2mx/_op_translations.py#L523-L527 |
24,010 | apache/incubator-mxnet | python/mxnet/contrib/onnx/onnx2mx/_op_translations.py | unsqueeze | def unsqueeze(attrs, inputs, cls):
"""Inserts a new axis of size 1 into the array shape"""
# MXNet can only add one axis at a time.
mxnet_op = inputs[0]
for axis in attrs["axes"]:
mxnet_op = symbol.expand_dims(mxnet_op, axis=axis)
return mxnet_op, attrs, inputs | python | def unsqueeze(attrs, inputs, cls):
"""Inserts a new axis of size 1 into the array shape"""
# MXNet can only add one axis at a time.
mxnet_op = inputs[0]
for axis in attrs["axes"]:
mxnet_op = symbol.expand_dims(mxnet_op, axis=axis)
return mxnet_op, attrs, inputs | [
"def",
"unsqueeze",
"(",
"attrs",
",",
"inputs",
",",
"cls",
")",
":",
"# MXNet can only add one axis at a time.",
"mxnet_op",
"=",
"inputs",
"[",
"0",
"]",
"for",
"axis",
"in",
"attrs",
"[",
"\"axes\"",
"]",
":",
"mxnet_op",
"=",
"symbol",
".",
"expand_dims",
"(",
"mxnet_op",
",",
"axis",
"=",
"axis",
")",
"return",
"mxnet_op",
",",
"attrs",
",",
"inputs"
] | Inserts a new axis of size 1 into the array shape | [
"Inserts",
"a",
"new",
"axis",
"of",
"size",
"1",
"into",
"the",
"array",
"shape"
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/onnx/onnx2mx/_op_translations.py#L529-L536 |
24,011 | apache/incubator-mxnet | python/mxnet/contrib/onnx/onnx2mx/_op_translations.py | flatten | def flatten(attrs, inputs, proto_obj):
"""Flattens the input array into a 2-D array by collapsing the higher dimensions."""
#Mxnet does not have axis support. By default uses axis=1
if 'axis' in attrs and attrs['axis'] != 1:
raise RuntimeError("Flatten operator only supports axis=1")
new_attrs = translation_utils._remove_attributes(attrs, ['axis'])
return 'Flatten', new_attrs, inputs | python | def flatten(attrs, inputs, proto_obj):
"""Flattens the input array into a 2-D array by collapsing the higher dimensions."""
#Mxnet does not have axis support. By default uses axis=1
if 'axis' in attrs and attrs['axis'] != 1:
raise RuntimeError("Flatten operator only supports axis=1")
new_attrs = translation_utils._remove_attributes(attrs, ['axis'])
return 'Flatten', new_attrs, inputs | [
"def",
"flatten",
"(",
"attrs",
",",
"inputs",
",",
"proto_obj",
")",
":",
"#Mxnet does not have axis support. By default uses axis=1",
"if",
"'axis'",
"in",
"attrs",
"and",
"attrs",
"[",
"'axis'",
"]",
"!=",
"1",
":",
"raise",
"RuntimeError",
"(",
"\"Flatten operator only supports axis=1\"",
")",
"new_attrs",
"=",
"translation_utils",
".",
"_remove_attributes",
"(",
"attrs",
",",
"[",
"'axis'",
"]",
")",
"return",
"'Flatten'",
",",
"new_attrs",
",",
"inputs"
] | Flattens the input array into a 2-D array by collapsing the higher dimensions. | [
"Flattens",
"the",
"input",
"array",
"into",
"a",
"2",
"-",
"D",
"array",
"by",
"collapsing",
"the",
"higher",
"dimensions",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/onnx/onnx2mx/_op_translations.py#L538-L544 |
24,012 | apache/incubator-mxnet | python/mxnet/contrib/onnx/onnx2mx/_op_translations.py | reduce_max | def reduce_max(attrs, inputs, proto_obj):
"""Reduce the array along a given axis by maximum value"""
new_attrs = translation_utils._fix_attribute_names(attrs, {'axes':'axis'})
return 'max', new_attrs, inputs | python | def reduce_max(attrs, inputs, proto_obj):
"""Reduce the array along a given axis by maximum value"""
new_attrs = translation_utils._fix_attribute_names(attrs, {'axes':'axis'})
return 'max', new_attrs, inputs | [
"def",
"reduce_max",
"(",
"attrs",
",",
"inputs",
",",
"proto_obj",
")",
":",
"new_attrs",
"=",
"translation_utils",
".",
"_fix_attribute_names",
"(",
"attrs",
",",
"{",
"'axes'",
":",
"'axis'",
"}",
")",
"return",
"'max'",
",",
"new_attrs",
",",
"inputs"
] | Reduce the array along a given axis by maximum value | [
"Reduce",
"the",
"array",
"along",
"a",
"given",
"axis",
"by",
"maximum",
"value"
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/onnx/onnx2mx/_op_translations.py#L615-L618 |
24,013 | apache/incubator-mxnet | python/mxnet/contrib/onnx/onnx2mx/_op_translations.py | reduce_mean | def reduce_mean(attrs, inputs, proto_obj):
"""Reduce the array along a given axis by mean value"""
new_attrs = translation_utils._fix_attribute_names(attrs, {'axes':'axis'})
return 'mean', new_attrs, inputs | python | def reduce_mean(attrs, inputs, proto_obj):
"""Reduce the array along a given axis by mean value"""
new_attrs = translation_utils._fix_attribute_names(attrs, {'axes':'axis'})
return 'mean', new_attrs, inputs | [
"def",
"reduce_mean",
"(",
"attrs",
",",
"inputs",
",",
"proto_obj",
")",
":",
"new_attrs",
"=",
"translation_utils",
".",
"_fix_attribute_names",
"(",
"attrs",
",",
"{",
"'axes'",
":",
"'axis'",
"}",
")",
"return",
"'mean'",
",",
"new_attrs",
",",
"inputs"
] | Reduce the array along a given axis by mean value | [
"Reduce",
"the",
"array",
"along",
"a",
"given",
"axis",
"by",
"mean",
"value"
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/onnx/onnx2mx/_op_translations.py#L620-L623 |
24,014 | apache/incubator-mxnet | python/mxnet/contrib/onnx/onnx2mx/_op_translations.py | reduce_min | def reduce_min(attrs, inputs, proto_obj):
"""Reduce the array along a given axis by minimum value"""
new_attrs = translation_utils._fix_attribute_names(attrs, {'axes':'axis'})
return 'min', new_attrs, inputs | python | def reduce_min(attrs, inputs, proto_obj):
"""Reduce the array along a given axis by minimum value"""
new_attrs = translation_utils._fix_attribute_names(attrs, {'axes':'axis'})
return 'min', new_attrs, inputs | [
"def",
"reduce_min",
"(",
"attrs",
",",
"inputs",
",",
"proto_obj",
")",
":",
"new_attrs",
"=",
"translation_utils",
".",
"_fix_attribute_names",
"(",
"attrs",
",",
"{",
"'axes'",
":",
"'axis'",
"}",
")",
"return",
"'min'",
",",
"new_attrs",
",",
"inputs"
] | Reduce the array along a given axis by minimum value | [
"Reduce",
"the",
"array",
"along",
"a",
"given",
"axis",
"by",
"minimum",
"value"
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/onnx/onnx2mx/_op_translations.py#L625-L628 |
24,015 | apache/incubator-mxnet | python/mxnet/contrib/onnx/onnx2mx/_op_translations.py | reduce_sum | def reduce_sum(attrs, inputs, proto_obj):
"""Reduce the array along a given axis by sum value"""
new_attrs = translation_utils._fix_attribute_names(attrs, {'axes':'axis'})
return 'sum', new_attrs, inputs | python | def reduce_sum(attrs, inputs, proto_obj):
"""Reduce the array along a given axis by sum value"""
new_attrs = translation_utils._fix_attribute_names(attrs, {'axes':'axis'})
return 'sum', new_attrs, inputs | [
"def",
"reduce_sum",
"(",
"attrs",
",",
"inputs",
",",
"proto_obj",
")",
":",
"new_attrs",
"=",
"translation_utils",
".",
"_fix_attribute_names",
"(",
"attrs",
",",
"{",
"'axes'",
":",
"'axis'",
"}",
")",
"return",
"'sum'",
",",
"new_attrs",
",",
"inputs"
] | Reduce the array along a given axis by sum value | [
"Reduce",
"the",
"array",
"along",
"a",
"given",
"axis",
"by",
"sum",
"value"
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/onnx/onnx2mx/_op_translations.py#L630-L633 |
24,016 | apache/incubator-mxnet | python/mxnet/contrib/onnx/onnx2mx/_op_translations.py | reduce_prod | def reduce_prod(attrs, inputs, proto_obj):
"""Reduce the array along a given axis by product value"""
new_attrs = translation_utils._fix_attribute_names(attrs, {'axes':'axis'})
return 'prod', new_attrs, inputs | python | def reduce_prod(attrs, inputs, proto_obj):
"""Reduce the array along a given axis by product value"""
new_attrs = translation_utils._fix_attribute_names(attrs, {'axes':'axis'})
return 'prod', new_attrs, inputs | [
"def",
"reduce_prod",
"(",
"attrs",
",",
"inputs",
",",
"proto_obj",
")",
":",
"new_attrs",
"=",
"translation_utils",
".",
"_fix_attribute_names",
"(",
"attrs",
",",
"{",
"'axes'",
":",
"'axis'",
"}",
")",
"return",
"'prod'",
",",
"new_attrs",
",",
"inputs"
] | Reduce the array along a given axis by product value | [
"Reduce",
"the",
"array",
"along",
"a",
"given",
"axis",
"by",
"product",
"value"
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/onnx/onnx2mx/_op_translations.py#L635-L638 |
24,017 | apache/incubator-mxnet | python/mxnet/contrib/onnx/onnx2mx/_op_translations.py | reduce_log_sum | def reduce_log_sum(attrs, inputs, proto_obj):
"""Reduce the array along a given axis by log sum value"""
keep_dims = True if 'keepdims' not in attrs else attrs.get('keepdims')
sum_op = symbol.sum(inputs[0], axis=attrs.get('axes'),
keepdims=keep_dims)
log_sym = symbol.log(sum_op)
return log_sym, attrs, inputs | python | def reduce_log_sum(attrs, inputs, proto_obj):
"""Reduce the array along a given axis by log sum value"""
keep_dims = True if 'keepdims' not in attrs else attrs.get('keepdims')
sum_op = symbol.sum(inputs[0], axis=attrs.get('axes'),
keepdims=keep_dims)
log_sym = symbol.log(sum_op)
return log_sym, attrs, inputs | [
"def",
"reduce_log_sum",
"(",
"attrs",
",",
"inputs",
",",
"proto_obj",
")",
":",
"keep_dims",
"=",
"True",
"if",
"'keepdims'",
"not",
"in",
"attrs",
"else",
"attrs",
".",
"get",
"(",
"'keepdims'",
")",
"sum_op",
"=",
"symbol",
".",
"sum",
"(",
"inputs",
"[",
"0",
"]",
",",
"axis",
"=",
"attrs",
".",
"get",
"(",
"'axes'",
")",
",",
"keepdims",
"=",
"keep_dims",
")",
"log_sym",
"=",
"symbol",
".",
"log",
"(",
"sum_op",
")",
"return",
"log_sym",
",",
"attrs",
",",
"inputs"
] | Reduce the array along a given axis by log sum value | [
"Reduce",
"the",
"array",
"along",
"a",
"given",
"axis",
"by",
"log",
"sum",
"value"
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/onnx/onnx2mx/_op_translations.py#L640-L646 |
24,018 | apache/incubator-mxnet | python/mxnet/contrib/onnx/onnx2mx/_op_translations.py | reduce_log_sum_exp | def reduce_log_sum_exp(attrs, inputs, proto_obj):
"""Reduce the array along a given axis by log sum exp value"""
keep_dims = True if 'keepdims' not in attrs else attrs.get('keepdims')
exp_op = symbol.exp(inputs[0])
sum_op = symbol.sum(exp_op, axis=attrs.get('axes'),
keepdims=keep_dims)
log_sym = symbol.log(sum_op)
return log_sym, attrs, inputs | python | def reduce_log_sum_exp(attrs, inputs, proto_obj):
"""Reduce the array along a given axis by log sum exp value"""
keep_dims = True if 'keepdims' not in attrs else attrs.get('keepdims')
exp_op = symbol.exp(inputs[0])
sum_op = symbol.sum(exp_op, axis=attrs.get('axes'),
keepdims=keep_dims)
log_sym = symbol.log(sum_op)
return log_sym, attrs, inputs | [
"def",
"reduce_log_sum_exp",
"(",
"attrs",
",",
"inputs",
",",
"proto_obj",
")",
":",
"keep_dims",
"=",
"True",
"if",
"'keepdims'",
"not",
"in",
"attrs",
"else",
"attrs",
".",
"get",
"(",
"'keepdims'",
")",
"exp_op",
"=",
"symbol",
".",
"exp",
"(",
"inputs",
"[",
"0",
"]",
")",
"sum_op",
"=",
"symbol",
".",
"sum",
"(",
"exp_op",
",",
"axis",
"=",
"attrs",
".",
"get",
"(",
"'axes'",
")",
",",
"keepdims",
"=",
"keep_dims",
")",
"log_sym",
"=",
"symbol",
".",
"log",
"(",
"sum_op",
")",
"return",
"log_sym",
",",
"attrs",
",",
"inputs"
] | Reduce the array along a given axis by log sum exp value | [
"Reduce",
"the",
"array",
"along",
"a",
"given",
"axis",
"by",
"log",
"sum",
"exp",
"value"
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/onnx/onnx2mx/_op_translations.py#L648-L655 |
24,019 | apache/incubator-mxnet | python/mxnet/contrib/onnx/onnx2mx/_op_translations.py | reduce_sum_square | def reduce_sum_square(attrs, inputs, proto_obj):
"""Reduce the array along a given axis by sum square value"""
square_op = symbol.square(inputs[0])
sum_op = symbol.sum(square_op, axis=attrs.get('axes'),
keepdims=attrs.get('keepdims'))
return sum_op, attrs, inputs | python | def reduce_sum_square(attrs, inputs, proto_obj):
"""Reduce the array along a given axis by sum square value"""
square_op = symbol.square(inputs[0])
sum_op = symbol.sum(square_op, axis=attrs.get('axes'),
keepdims=attrs.get('keepdims'))
return sum_op, attrs, inputs | [
"def",
"reduce_sum_square",
"(",
"attrs",
",",
"inputs",
",",
"proto_obj",
")",
":",
"square_op",
"=",
"symbol",
".",
"square",
"(",
"inputs",
"[",
"0",
"]",
")",
"sum_op",
"=",
"symbol",
".",
"sum",
"(",
"square_op",
",",
"axis",
"=",
"attrs",
".",
"get",
"(",
"'axes'",
")",
",",
"keepdims",
"=",
"attrs",
".",
"get",
"(",
"'keepdims'",
")",
")",
"return",
"sum_op",
",",
"attrs",
",",
"inputs"
] | Reduce the array along a given axis by sum square value | [
"Reduce",
"the",
"array",
"along",
"a",
"given",
"axis",
"by",
"sum",
"square",
"value"
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/onnx/onnx2mx/_op_translations.py#L657-L662 |
24,020 | apache/incubator-mxnet | python/mxnet/contrib/onnx/onnx2mx/_op_translations.py | reduce_l1 | def reduce_l1(attrs, inputs, proto_obj):
"""Reduce input tensor by l1 normalization."""
new_attrs = translation_utils._fix_attribute_names(attrs, {'axes':'axis'})
new_attrs = translation_utils._add_extra_attributes(new_attrs,
{'ord' : 1})
return 'norm', new_attrs, inputs | python | def reduce_l1(attrs, inputs, proto_obj):
"""Reduce input tensor by l1 normalization."""
new_attrs = translation_utils._fix_attribute_names(attrs, {'axes':'axis'})
new_attrs = translation_utils._add_extra_attributes(new_attrs,
{'ord' : 1})
return 'norm', new_attrs, inputs | [
"def",
"reduce_l1",
"(",
"attrs",
",",
"inputs",
",",
"proto_obj",
")",
":",
"new_attrs",
"=",
"translation_utils",
".",
"_fix_attribute_names",
"(",
"attrs",
",",
"{",
"'axes'",
":",
"'axis'",
"}",
")",
"new_attrs",
"=",
"translation_utils",
".",
"_add_extra_attributes",
"(",
"new_attrs",
",",
"{",
"'ord'",
":",
"1",
"}",
")",
"return",
"'norm'",
",",
"new_attrs",
",",
"inputs"
] | Reduce input tensor by l1 normalization. | [
"Reduce",
"input",
"tensor",
"by",
"l1",
"normalization",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/onnx/onnx2mx/_op_translations.py#L664-L669 |
24,021 | apache/incubator-mxnet | python/mxnet/contrib/onnx/onnx2mx/_op_translations.py | reduce_l2 | def reduce_l2(attrs, inputs, proto_obj):
"""Reduce input tensor by l2 normalization."""
new_attrs = translation_utils._fix_attribute_names(attrs, {'axes':'axis'})
return 'norm', new_attrs, inputs | python | def reduce_l2(attrs, inputs, proto_obj):
"""Reduce input tensor by l2 normalization."""
new_attrs = translation_utils._fix_attribute_names(attrs, {'axes':'axis'})
return 'norm', new_attrs, inputs | [
"def",
"reduce_l2",
"(",
"attrs",
",",
"inputs",
",",
"proto_obj",
")",
":",
"new_attrs",
"=",
"translation_utils",
".",
"_fix_attribute_names",
"(",
"attrs",
",",
"{",
"'axes'",
":",
"'axis'",
"}",
")",
"return",
"'norm'",
",",
"new_attrs",
",",
"inputs"
] | Reduce input tensor by l2 normalization. | [
"Reduce",
"input",
"tensor",
"by",
"l2",
"normalization",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/onnx/onnx2mx/_op_translations.py#L679-L682 |
24,022 | apache/incubator-mxnet | python/mxnet/contrib/onnx/onnx2mx/_op_translations.py | max_roi_pooling | def max_roi_pooling(attrs, inputs, proto_obj):
"""Max ROI Pooling."""
new_attrs = translation_utils._fix_attribute_names(attrs,
{'pooled_shape': 'pooled_size',
'spatial_scale': 'spatial_scale'
})
return 'ROIPooling', new_attrs, inputs | python | def max_roi_pooling(attrs, inputs, proto_obj):
"""Max ROI Pooling."""
new_attrs = translation_utils._fix_attribute_names(attrs,
{'pooled_shape': 'pooled_size',
'spatial_scale': 'spatial_scale'
})
return 'ROIPooling', new_attrs, inputs | [
"def",
"max_roi_pooling",
"(",
"attrs",
",",
"inputs",
",",
"proto_obj",
")",
":",
"new_attrs",
"=",
"translation_utils",
".",
"_fix_attribute_names",
"(",
"attrs",
",",
"{",
"'pooled_shape'",
":",
"'pooled_size'",
",",
"'spatial_scale'",
":",
"'spatial_scale'",
"}",
")",
"return",
"'ROIPooling'",
",",
"new_attrs",
",",
"inputs"
] | Max ROI Pooling. | [
"Max",
"ROI",
"Pooling",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/onnx/onnx2mx/_op_translations.py#L729-L735 |
24,023 | apache/incubator-mxnet | python/mxnet/contrib/onnx/onnx2mx/_op_translations.py | depthtospace | def depthtospace(attrs, inputs, proto_obj):
"""Rearranges data from depth into blocks of spatial data."""
new_attrs = translation_utils._fix_attribute_names(attrs, {'blocksize':'block_size'})
return "depth_to_space", new_attrs, inputs | python | def depthtospace(attrs, inputs, proto_obj):
"""Rearranges data from depth into blocks of spatial data."""
new_attrs = translation_utils._fix_attribute_names(attrs, {'blocksize':'block_size'})
return "depth_to_space", new_attrs, inputs | [
"def",
"depthtospace",
"(",
"attrs",
",",
"inputs",
",",
"proto_obj",
")",
":",
"new_attrs",
"=",
"translation_utils",
".",
"_fix_attribute_names",
"(",
"attrs",
",",
"{",
"'blocksize'",
":",
"'block_size'",
"}",
")",
"return",
"\"depth_to_space\"",
",",
"new_attrs",
",",
"inputs"
] | Rearranges data from depth into blocks of spatial data. | [
"Rearranges",
"data",
"from",
"depth",
"into",
"blocks",
"of",
"spatial",
"data",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/onnx/onnx2mx/_op_translations.py#L737-L741 |
24,024 | apache/incubator-mxnet | python/mxnet/contrib/onnx/onnx2mx/_op_translations.py | spacetodepth | def spacetodepth(attrs, inputs, proto_obj):
"""Rearranges blocks of spatial data into depth."""
new_attrs = translation_utils._fix_attribute_names(attrs, {'blocksize':'block_size'})
return "space_to_depth", new_attrs, inputs | python | def spacetodepth(attrs, inputs, proto_obj):
"""Rearranges blocks of spatial data into depth."""
new_attrs = translation_utils._fix_attribute_names(attrs, {'blocksize':'block_size'})
return "space_to_depth", new_attrs, inputs | [
"def",
"spacetodepth",
"(",
"attrs",
",",
"inputs",
",",
"proto_obj",
")",
":",
"new_attrs",
"=",
"translation_utils",
".",
"_fix_attribute_names",
"(",
"attrs",
",",
"{",
"'blocksize'",
":",
"'block_size'",
"}",
")",
"return",
"\"space_to_depth\"",
",",
"new_attrs",
",",
"inputs"
] | Rearranges blocks of spatial data into depth. | [
"Rearranges",
"blocks",
"of",
"spatial",
"data",
"into",
"depth",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/onnx/onnx2mx/_op_translations.py#L743-L747 |
24,025 | apache/incubator-mxnet | python/mxnet/contrib/onnx/onnx2mx/_op_translations.py | hardmax | def hardmax(attrs, inputs, proto_obj):
"""Returns batched one-hot vectors."""
input_tensor_data = proto_obj.model_metadata.get('input_tensor_data')[0]
input_shape = input_tensor_data[1]
axis = int(attrs.get('axis', 1))
axis = axis if axis >= 0 else len(input_shape) + axis
if axis == len(input_shape) - 1:
amax = symbol.argmax(inputs[0], axis=-1)
one_hot = symbol.one_hot(amax, depth=input_shape[-1])
return one_hot, attrs, inputs
# since reshape doesn't take a tensor for shape,
# computing with np.prod. This needs to be changed to
# to use mx.sym.prod() when mx.sym.reshape() is fixed.
# (https://github.com/apache/incubator-mxnet/issues/10789)
new_shape = (int(np.prod(input_shape[:axis])),
int(np.prod(input_shape[axis:])))
reshape_op = symbol.reshape(inputs[0], new_shape)
amax = symbol.argmax(reshape_op, axis=-1)
one_hot = symbol.one_hot(amax, depth=new_shape[-1])
hardmax_op = symbol.reshape(one_hot, input_shape)
return hardmax_op, attrs, inputs | python | def hardmax(attrs, inputs, proto_obj):
"""Returns batched one-hot vectors."""
input_tensor_data = proto_obj.model_metadata.get('input_tensor_data')[0]
input_shape = input_tensor_data[1]
axis = int(attrs.get('axis', 1))
axis = axis if axis >= 0 else len(input_shape) + axis
if axis == len(input_shape) - 1:
amax = symbol.argmax(inputs[0], axis=-1)
one_hot = symbol.one_hot(amax, depth=input_shape[-1])
return one_hot, attrs, inputs
# since reshape doesn't take a tensor for shape,
# computing with np.prod. This needs to be changed to
# to use mx.sym.prod() when mx.sym.reshape() is fixed.
# (https://github.com/apache/incubator-mxnet/issues/10789)
new_shape = (int(np.prod(input_shape[:axis])),
int(np.prod(input_shape[axis:])))
reshape_op = symbol.reshape(inputs[0], new_shape)
amax = symbol.argmax(reshape_op, axis=-1)
one_hot = symbol.one_hot(amax, depth=new_shape[-1])
hardmax_op = symbol.reshape(one_hot, input_shape)
return hardmax_op, attrs, inputs | [
"def",
"hardmax",
"(",
"attrs",
",",
"inputs",
",",
"proto_obj",
")",
":",
"input_tensor_data",
"=",
"proto_obj",
".",
"model_metadata",
".",
"get",
"(",
"'input_tensor_data'",
")",
"[",
"0",
"]",
"input_shape",
"=",
"input_tensor_data",
"[",
"1",
"]",
"axis",
"=",
"int",
"(",
"attrs",
".",
"get",
"(",
"'axis'",
",",
"1",
")",
")",
"axis",
"=",
"axis",
"if",
"axis",
">=",
"0",
"else",
"len",
"(",
"input_shape",
")",
"+",
"axis",
"if",
"axis",
"==",
"len",
"(",
"input_shape",
")",
"-",
"1",
":",
"amax",
"=",
"symbol",
".",
"argmax",
"(",
"inputs",
"[",
"0",
"]",
",",
"axis",
"=",
"-",
"1",
")",
"one_hot",
"=",
"symbol",
".",
"one_hot",
"(",
"amax",
",",
"depth",
"=",
"input_shape",
"[",
"-",
"1",
"]",
")",
"return",
"one_hot",
",",
"attrs",
",",
"inputs",
"# since reshape doesn't take a tensor for shape,",
"# computing with np.prod. This needs to be changed to",
"# to use mx.sym.prod() when mx.sym.reshape() is fixed.",
"# (https://github.com/apache/incubator-mxnet/issues/10789)",
"new_shape",
"=",
"(",
"int",
"(",
"np",
".",
"prod",
"(",
"input_shape",
"[",
":",
"axis",
"]",
")",
")",
",",
"int",
"(",
"np",
".",
"prod",
"(",
"input_shape",
"[",
"axis",
":",
"]",
")",
")",
")",
"reshape_op",
"=",
"symbol",
".",
"reshape",
"(",
"inputs",
"[",
"0",
"]",
",",
"new_shape",
")",
"amax",
"=",
"symbol",
".",
"argmax",
"(",
"reshape_op",
",",
"axis",
"=",
"-",
"1",
")",
"one_hot",
"=",
"symbol",
".",
"one_hot",
"(",
"amax",
",",
"depth",
"=",
"new_shape",
"[",
"-",
"1",
"]",
")",
"hardmax_op",
"=",
"symbol",
".",
"reshape",
"(",
"one_hot",
",",
"input_shape",
")",
"return",
"hardmax_op",
",",
"attrs",
",",
"inputs"
] | Returns batched one-hot vectors. | [
"Returns",
"batched",
"one",
"-",
"hot",
"vectors",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/onnx/onnx2mx/_op_translations.py#L749-L772 |
24,026 | apache/incubator-mxnet | ci/docker/qemu/runtime_functions.py | run_ut_python3_qemu_internal | def run_ut_python3_qemu_internal():
"""this runs inside the vm"""
pkg = glob.glob('mxnet_dist/*.whl')[0]
logging.info("=== NOW Running inside QEMU ===")
logging.info("PIP Installing %s", pkg)
check_call(['sudo', 'pip3', 'install', pkg])
logging.info("PIP Installing mxnet/test_requirements.txt")
check_call(['sudo', 'pip3', 'install', '-r', 'mxnet/test_requirements.txt'])
logging.info("Running tests in mxnet/tests/python/unittest/")
check_call(['nosetests', '--with-timer', '--with-xunit', '--xunit-file', 'nosetests_unittest.xml', '--verbose', 'mxnet/tests/python/unittest/test_engine.py']) | python | def run_ut_python3_qemu_internal():
"""this runs inside the vm"""
pkg = glob.glob('mxnet_dist/*.whl')[0]
logging.info("=== NOW Running inside QEMU ===")
logging.info("PIP Installing %s", pkg)
check_call(['sudo', 'pip3', 'install', pkg])
logging.info("PIP Installing mxnet/test_requirements.txt")
check_call(['sudo', 'pip3', 'install', '-r', 'mxnet/test_requirements.txt'])
logging.info("Running tests in mxnet/tests/python/unittest/")
check_call(['nosetests', '--with-timer', '--with-xunit', '--xunit-file', 'nosetests_unittest.xml', '--verbose', 'mxnet/tests/python/unittest/test_engine.py']) | [
"def",
"run_ut_python3_qemu_internal",
"(",
")",
":",
"pkg",
"=",
"glob",
".",
"glob",
"(",
"'mxnet_dist/*.whl'",
")",
"[",
"0",
"]",
"logging",
".",
"info",
"(",
"\"=== NOW Running inside QEMU ===\"",
")",
"logging",
".",
"info",
"(",
"\"PIP Installing %s\"",
",",
"pkg",
")",
"check_call",
"(",
"[",
"'sudo'",
",",
"'pip3'",
",",
"'install'",
",",
"pkg",
"]",
")",
"logging",
".",
"info",
"(",
"\"PIP Installing mxnet/test_requirements.txt\"",
")",
"check_call",
"(",
"[",
"'sudo'",
",",
"'pip3'",
",",
"'install'",
",",
"'-r'",
",",
"'mxnet/test_requirements.txt'",
"]",
")",
"logging",
".",
"info",
"(",
"\"Running tests in mxnet/tests/python/unittest/\"",
")",
"check_call",
"(",
"[",
"'nosetests'",
",",
"'--with-timer'",
",",
"'--with-xunit'",
",",
"'--xunit-file'",
",",
"'nosetests_unittest.xml'",
",",
"'--verbose'",
",",
"'mxnet/tests/python/unittest/test_engine.py'",
"]",
")"
] | this runs inside the vm | [
"this",
"runs",
"inside",
"the",
"vm"
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/ci/docker/qemu/runtime_functions.py#L74-L83 |
24,027 | apache/incubator-mxnet | python/mxnet/ndarray/ndarray.py | _new_empty_handle | def _new_empty_handle():
"""Returns a new empty handle.
Empty handle can be used to hold a result.
Returns
-------
handle
A new empty `NDArray` handle.
"""
hdl = NDArrayHandle()
check_call(_LIB.MXNDArrayCreateNone(ctypes.byref(hdl)))
return hdl | python | def _new_empty_handle():
"""Returns a new empty handle.
Empty handle can be used to hold a result.
Returns
-------
handle
A new empty `NDArray` handle.
"""
hdl = NDArrayHandle()
check_call(_LIB.MXNDArrayCreateNone(ctypes.byref(hdl)))
return hdl | [
"def",
"_new_empty_handle",
"(",
")",
":",
"hdl",
"=",
"NDArrayHandle",
"(",
")",
"check_call",
"(",
"_LIB",
".",
"MXNDArrayCreateNone",
"(",
"ctypes",
".",
"byref",
"(",
"hdl",
")",
")",
")",
"return",
"hdl"
] | Returns a new empty handle.
Empty handle can be used to hold a result.
Returns
-------
handle
A new empty `NDArray` handle. | [
"Returns",
"a",
"new",
"empty",
"handle",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/ndarray.py#L107-L119 |
24,028 | apache/incubator-mxnet | python/mxnet/ndarray/ndarray.py | _new_alloc_handle | def _new_alloc_handle(shape, ctx, delay_alloc, dtype=mx_real_t):
"""Return a new handle with specified shape and context.
Empty handle is only used to hold results.
Returns
-------
handle
A new empty `NDArray` handle.
"""
hdl = NDArrayHandle()
check_call(_LIB.MXNDArrayCreateEx(
c_array_buf(mx_uint, native_array('I', shape)),
mx_uint(len(shape)),
ctypes.c_int(ctx.device_typeid),
ctypes.c_int(ctx.device_id),
ctypes.c_int(int(delay_alloc)),
ctypes.c_int(int(_DTYPE_NP_TO_MX[np.dtype(dtype).type])),
ctypes.byref(hdl)))
return hdl | python | def _new_alloc_handle(shape, ctx, delay_alloc, dtype=mx_real_t):
"""Return a new handle with specified shape and context.
Empty handle is only used to hold results.
Returns
-------
handle
A new empty `NDArray` handle.
"""
hdl = NDArrayHandle()
check_call(_LIB.MXNDArrayCreateEx(
c_array_buf(mx_uint, native_array('I', shape)),
mx_uint(len(shape)),
ctypes.c_int(ctx.device_typeid),
ctypes.c_int(ctx.device_id),
ctypes.c_int(int(delay_alloc)),
ctypes.c_int(int(_DTYPE_NP_TO_MX[np.dtype(dtype).type])),
ctypes.byref(hdl)))
return hdl | [
"def",
"_new_alloc_handle",
"(",
"shape",
",",
"ctx",
",",
"delay_alloc",
",",
"dtype",
"=",
"mx_real_t",
")",
":",
"hdl",
"=",
"NDArrayHandle",
"(",
")",
"check_call",
"(",
"_LIB",
".",
"MXNDArrayCreateEx",
"(",
"c_array_buf",
"(",
"mx_uint",
",",
"native_array",
"(",
"'I'",
",",
"shape",
")",
")",
",",
"mx_uint",
"(",
"len",
"(",
"shape",
")",
")",
",",
"ctypes",
".",
"c_int",
"(",
"ctx",
".",
"device_typeid",
")",
",",
"ctypes",
".",
"c_int",
"(",
"ctx",
".",
"device_id",
")",
",",
"ctypes",
".",
"c_int",
"(",
"int",
"(",
"delay_alloc",
")",
")",
",",
"ctypes",
".",
"c_int",
"(",
"int",
"(",
"_DTYPE_NP_TO_MX",
"[",
"np",
".",
"dtype",
"(",
"dtype",
")",
".",
"type",
"]",
")",
")",
",",
"ctypes",
".",
"byref",
"(",
"hdl",
")",
")",
")",
"return",
"hdl"
] | Return a new handle with specified shape and context.
Empty handle is only used to hold results.
Returns
-------
handle
A new empty `NDArray` handle. | [
"Return",
"a",
"new",
"handle",
"with",
"specified",
"shape",
"and",
"context",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/ndarray.py#L122-L141 |
24,029 | apache/incubator-mxnet | python/mxnet/ndarray/ndarray.py | _get_indexing_dispatch_code | def _get_indexing_dispatch_code(key):
"""Returns a dispatch code for calling basic or advanced indexing functions."""
if isinstance(key, (NDArray, np.ndarray)):
return _NDARRAY_ADVANCED_INDEXING
elif isinstance(key, list):
# TODO(junwu): Add support for nested lists besides integer list
for i in key:
if not isinstance(i, integer_types):
raise TypeError('Indexing NDArray only supports a list of integers as index'
' when key is of list type, received element=%s of type=%s'
% (str(i), str(type(i))))
return _NDARRAY_ADVANCED_INDEXING
elif isinstance(key, (integer_types, py_slice)):
return _NDARRAY_BASIC_INDEXING
elif isinstance(key, tuple):
for idx in key:
if isinstance(idx, (NDArray, np.ndarray, list, tuple)):
return _NDARRAY_ADVANCED_INDEXING
elif not isinstance(idx, (py_slice, integer_types)):
raise ValueError("NDArray does not support slicing with key %s of type %s."
% (str(idx), str(type(idx))))
return _NDARRAY_BASIC_INDEXING
else:
return _NDARRAY_UNSUPPORTED_INDEXING | python | def _get_indexing_dispatch_code(key):
"""Returns a dispatch code for calling basic or advanced indexing functions."""
if isinstance(key, (NDArray, np.ndarray)):
return _NDARRAY_ADVANCED_INDEXING
elif isinstance(key, list):
# TODO(junwu): Add support for nested lists besides integer list
for i in key:
if not isinstance(i, integer_types):
raise TypeError('Indexing NDArray only supports a list of integers as index'
' when key is of list type, received element=%s of type=%s'
% (str(i), str(type(i))))
return _NDARRAY_ADVANCED_INDEXING
elif isinstance(key, (integer_types, py_slice)):
return _NDARRAY_BASIC_INDEXING
elif isinstance(key, tuple):
for idx in key:
if isinstance(idx, (NDArray, np.ndarray, list, tuple)):
return _NDARRAY_ADVANCED_INDEXING
elif not isinstance(idx, (py_slice, integer_types)):
raise ValueError("NDArray does not support slicing with key %s of type %s."
% (str(idx), str(type(idx))))
return _NDARRAY_BASIC_INDEXING
else:
return _NDARRAY_UNSUPPORTED_INDEXING | [
"def",
"_get_indexing_dispatch_code",
"(",
"key",
")",
":",
"if",
"isinstance",
"(",
"key",
",",
"(",
"NDArray",
",",
"np",
".",
"ndarray",
")",
")",
":",
"return",
"_NDARRAY_ADVANCED_INDEXING",
"elif",
"isinstance",
"(",
"key",
",",
"list",
")",
":",
"# TODO(junwu): Add support for nested lists besides integer list",
"for",
"i",
"in",
"key",
":",
"if",
"not",
"isinstance",
"(",
"i",
",",
"integer_types",
")",
":",
"raise",
"TypeError",
"(",
"'Indexing NDArray only supports a list of integers as index'",
"' when key is of list type, received element=%s of type=%s'",
"%",
"(",
"str",
"(",
"i",
")",
",",
"str",
"(",
"type",
"(",
"i",
")",
")",
")",
")",
"return",
"_NDARRAY_ADVANCED_INDEXING",
"elif",
"isinstance",
"(",
"key",
",",
"(",
"integer_types",
",",
"py_slice",
")",
")",
":",
"return",
"_NDARRAY_BASIC_INDEXING",
"elif",
"isinstance",
"(",
"key",
",",
"tuple",
")",
":",
"for",
"idx",
"in",
"key",
":",
"if",
"isinstance",
"(",
"idx",
",",
"(",
"NDArray",
",",
"np",
".",
"ndarray",
",",
"list",
",",
"tuple",
")",
")",
":",
"return",
"_NDARRAY_ADVANCED_INDEXING",
"elif",
"not",
"isinstance",
"(",
"idx",
",",
"(",
"py_slice",
",",
"integer_types",
")",
")",
":",
"raise",
"ValueError",
"(",
"\"NDArray does not support slicing with key %s of type %s.\"",
"%",
"(",
"str",
"(",
"idx",
")",
",",
"str",
"(",
"type",
"(",
"idx",
")",
")",
")",
")",
"return",
"_NDARRAY_BASIC_INDEXING",
"else",
":",
"return",
"_NDARRAY_UNSUPPORTED_INDEXING"
] | Returns a dispatch code for calling basic or advanced indexing functions. | [
"Returns",
"a",
"dispatch",
"code",
"for",
"calling",
"basic",
"or",
"advanced",
"indexing",
"functions",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/ndarray.py#L2278-L2301 |
24,030 | apache/incubator-mxnet | python/mxnet/ndarray/ndarray.py | _get_oshape_of_gather_nd_op | def _get_oshape_of_gather_nd_op(dshape, ishape):
"""Given data and index shapes, get the output `NDArray` shape.
This basically implements the infer shape logic of op gather_nd."""
assert len(dshape) > 0 and len(ishape) > 0
oshape = list(ishape[1:])
if ishape[0] < len(dshape):
oshape.extend(dshape[ishape[0]:])
return tuple(oshape) | python | def _get_oshape_of_gather_nd_op(dshape, ishape):
"""Given data and index shapes, get the output `NDArray` shape.
This basically implements the infer shape logic of op gather_nd."""
assert len(dshape) > 0 and len(ishape) > 0
oshape = list(ishape[1:])
if ishape[0] < len(dshape):
oshape.extend(dshape[ishape[0]:])
return tuple(oshape) | [
"def",
"_get_oshape_of_gather_nd_op",
"(",
"dshape",
",",
"ishape",
")",
":",
"assert",
"len",
"(",
"dshape",
")",
">",
"0",
"and",
"len",
"(",
"ishape",
")",
">",
"0",
"oshape",
"=",
"list",
"(",
"ishape",
"[",
"1",
":",
"]",
")",
"if",
"ishape",
"[",
"0",
"]",
"<",
"len",
"(",
"dshape",
")",
":",
"oshape",
".",
"extend",
"(",
"dshape",
"[",
"ishape",
"[",
"0",
"]",
":",
"]",
")",
"return",
"tuple",
"(",
"oshape",
")"
] | Given data and index shapes, get the output `NDArray` shape.
This basically implements the infer shape logic of op gather_nd. | [
"Given",
"data",
"and",
"index",
"shapes",
"get",
"the",
"output",
"NDArray",
"shape",
".",
"This",
"basically",
"implements",
"the",
"infer",
"shape",
"logic",
"of",
"op",
"gather_nd",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/ndarray.py#L2347-L2354 |
24,031 | apache/incubator-mxnet | python/mxnet/ndarray/ndarray.py | _get_dim_size | def _get_dim_size(start, stop, step):
"""Given start, stop, and stop, calculate the number of elements
of this slice."""
assert step != 0
if step > 0:
assert start < stop
dim_size = (stop - start - 1) // step + 1
else:
assert stop < start
dim_size = (start - stop - 1) // (-step) + 1
return dim_size | python | def _get_dim_size(start, stop, step):
"""Given start, stop, and stop, calculate the number of elements
of this slice."""
assert step != 0
if step > 0:
assert start < stop
dim_size = (stop - start - 1) // step + 1
else:
assert stop < start
dim_size = (start - stop - 1) // (-step) + 1
return dim_size | [
"def",
"_get_dim_size",
"(",
"start",
",",
"stop",
",",
"step",
")",
":",
"assert",
"step",
"!=",
"0",
"if",
"step",
">",
"0",
":",
"assert",
"start",
"<",
"stop",
"dim_size",
"=",
"(",
"stop",
"-",
"start",
"-",
"1",
")",
"//",
"step",
"+",
"1",
"else",
":",
"assert",
"stop",
"<",
"start",
"dim_size",
"=",
"(",
"start",
"-",
"stop",
"-",
"1",
")",
"//",
"(",
"-",
"step",
")",
"+",
"1",
"return",
"dim_size"
] | Given start, stop, and stop, calculate the number of elements
of this slice. | [
"Given",
"start",
"stop",
"and",
"stop",
"calculate",
"the",
"number",
"of",
"elements",
"of",
"this",
"slice",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/ndarray.py#L2357-L2367 |
24,032 | apache/incubator-mxnet | python/mxnet/ndarray/ndarray.py | _get_broadcast_shape | def _get_broadcast_shape(shape1, shape2):
"""Given two shapes that are not identical, find the shape
that both input shapes can broadcast to."""
if shape1 == shape2:
return shape1
length1 = len(shape1)
length2 = len(shape2)
if length1 > length2:
shape = list(shape1)
else:
shape = list(shape2)
i = max(length1, length2) - 1
for a, b in zip(shape1[::-1], shape2[::-1]):
if a != 1 and b != 1 and a != b:
raise ValueError('shape1=%s is not broadcastable to shape2=%s' % (shape1, shape2))
shape[i] = max(a, b)
i -= 1
return tuple(shape) | python | def _get_broadcast_shape(shape1, shape2):
"""Given two shapes that are not identical, find the shape
that both input shapes can broadcast to."""
if shape1 == shape2:
return shape1
length1 = len(shape1)
length2 = len(shape2)
if length1 > length2:
shape = list(shape1)
else:
shape = list(shape2)
i = max(length1, length2) - 1
for a, b in zip(shape1[::-1], shape2[::-1]):
if a != 1 and b != 1 and a != b:
raise ValueError('shape1=%s is not broadcastable to shape2=%s' % (shape1, shape2))
shape[i] = max(a, b)
i -= 1
return tuple(shape) | [
"def",
"_get_broadcast_shape",
"(",
"shape1",
",",
"shape2",
")",
":",
"if",
"shape1",
"==",
"shape2",
":",
"return",
"shape1",
"length1",
"=",
"len",
"(",
"shape1",
")",
"length2",
"=",
"len",
"(",
"shape2",
")",
"if",
"length1",
">",
"length2",
":",
"shape",
"=",
"list",
"(",
"shape1",
")",
"else",
":",
"shape",
"=",
"list",
"(",
"shape2",
")",
"i",
"=",
"max",
"(",
"length1",
",",
"length2",
")",
"-",
"1",
"for",
"a",
",",
"b",
"in",
"zip",
"(",
"shape1",
"[",
":",
":",
"-",
"1",
"]",
",",
"shape2",
"[",
":",
":",
"-",
"1",
"]",
")",
":",
"if",
"a",
"!=",
"1",
"and",
"b",
"!=",
"1",
"and",
"a",
"!=",
"b",
":",
"raise",
"ValueError",
"(",
"'shape1=%s is not broadcastable to shape2=%s'",
"%",
"(",
"shape1",
",",
"shape2",
")",
")",
"shape",
"[",
"i",
"]",
"=",
"max",
"(",
"a",
",",
"b",
")",
"i",
"-=",
"1",
"return",
"tuple",
"(",
"shape",
")"
] | Given two shapes that are not identical, find the shape
that both input shapes can broadcast to. | [
"Given",
"two",
"shapes",
"that",
"are",
"not",
"identical",
"find",
"the",
"shape",
"that",
"both",
"input",
"shapes",
"can",
"broadcast",
"to",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/ndarray.py#L2370-L2388 |
24,033 | apache/incubator-mxnet | python/mxnet/ndarray/ndarray.py | ones | def ones(shape, ctx=None, dtype=None, **kwargs):
"""Returns a new array filled with all ones, with the given shape and type.
Parameters
----------
shape : int or tuple of int or list of int
The shape of the empty array.
ctx : Context, optional
An optional device context.
Defaults to the current default context (``mxnet.context.current_context()``).
dtype : str or numpy.dtype, optional
An optional value type (default is `float32`).
out : NDArray, optional
The output NDArray (default is `None`).
Returns
-------
NDArray
A new array of the specified shape filled with all ones.
Examples
--------
>>> mx.nd.ones(1).asnumpy()
array([ 1.], dtype=float32)
>>> mx.nd.ones((1,2), mx.gpu(0))
<NDArray 1x2 @gpu(0)>
>>> mx.nd.ones((1,2), dtype='float16').asnumpy()
array([[ 1., 1.]], dtype=float16)
"""
# pylint: disable= unused-argument
if ctx is None:
ctx = current_context()
dtype = mx_real_t if dtype is None else dtype
# pylint: disable= no-member, protected-access
return _internal._ones(shape=shape, ctx=ctx, dtype=dtype, **kwargs) | python | def ones(shape, ctx=None, dtype=None, **kwargs):
"""Returns a new array filled with all ones, with the given shape and type.
Parameters
----------
shape : int or tuple of int or list of int
The shape of the empty array.
ctx : Context, optional
An optional device context.
Defaults to the current default context (``mxnet.context.current_context()``).
dtype : str or numpy.dtype, optional
An optional value type (default is `float32`).
out : NDArray, optional
The output NDArray (default is `None`).
Returns
-------
NDArray
A new array of the specified shape filled with all ones.
Examples
--------
>>> mx.nd.ones(1).asnumpy()
array([ 1.], dtype=float32)
>>> mx.nd.ones((1,2), mx.gpu(0))
<NDArray 1x2 @gpu(0)>
>>> mx.nd.ones((1,2), dtype='float16').asnumpy()
array([[ 1., 1.]], dtype=float16)
"""
# pylint: disable= unused-argument
if ctx is None:
ctx = current_context()
dtype = mx_real_t if dtype is None else dtype
# pylint: disable= no-member, protected-access
return _internal._ones(shape=shape, ctx=ctx, dtype=dtype, **kwargs) | [
"def",
"ones",
"(",
"shape",
",",
"ctx",
"=",
"None",
",",
"dtype",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"# pylint: disable= unused-argument",
"if",
"ctx",
"is",
"None",
":",
"ctx",
"=",
"current_context",
"(",
")",
"dtype",
"=",
"mx_real_t",
"if",
"dtype",
"is",
"None",
"else",
"dtype",
"# pylint: disable= no-member, protected-access",
"return",
"_internal",
".",
"_ones",
"(",
"shape",
"=",
"shape",
",",
"ctx",
"=",
"ctx",
",",
"dtype",
"=",
"dtype",
",",
"*",
"*",
"kwargs",
")"
] | Returns a new array filled with all ones, with the given shape and type.
Parameters
----------
shape : int or tuple of int or list of int
The shape of the empty array.
ctx : Context, optional
An optional device context.
Defaults to the current default context (``mxnet.context.current_context()``).
dtype : str or numpy.dtype, optional
An optional value type (default is `float32`).
out : NDArray, optional
The output NDArray (default is `None`).
Returns
-------
NDArray
A new array of the specified shape filled with all ones.
Examples
--------
>>> mx.nd.ones(1).asnumpy()
array([ 1.], dtype=float32)
>>> mx.nd.ones((1,2), mx.gpu(0))
<NDArray 1x2 @gpu(0)>
>>> mx.nd.ones((1,2), dtype='float16').asnumpy()
array([[ 1., 1.]], dtype=float16) | [
"Returns",
"a",
"new",
"array",
"filled",
"with",
"all",
"ones",
"with",
"the",
"given",
"shape",
"and",
"type",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/ndarray.py#L2402-L2436 |
24,034 | apache/incubator-mxnet | python/mxnet/ndarray/ndarray.py | moveaxis | def moveaxis(tensor, source, destination):
"""Moves the `source` axis into the `destination` position
while leaving the other axes in their original order
Parameters
----------
tensor : mx.nd.array
The array which axes should be reordered
source : int or sequence of int
Original position of the axes to move. Can be negative but must be unique.
destination : int or sequence of int
Destination position for each of the original axes. Can be negative but must be unique.
Returns
-------
result : mx.nd.array
Array with moved axes.
Examples
--------
>>> X = mx.nd.array([[1, 2, 3], [4, 5, 6]])
>>> mx.nd.moveaxis(X, 0, 1).shape
(3L, 2L)
>>> X = mx.nd.zeros((3, 4, 5))
>>> mx.nd.moveaxis(X, [0, 1], [-1, -2]).shape
(5, 4, 3)
"""
try:
source = np.core.numeric.normalize_axis_tuple(source, tensor.ndim)
except IndexError:
raise ValueError('Source should verify 0 <= source < tensor.ndim'
'Got %d' % source)
try:
destination = np.core.numeric.normalize_axis_tuple(destination, tensor.ndim)
except IndexError:
raise ValueError('Destination should verify 0 <= destination < tensor.ndim (%d).'
% tensor.ndim, 'Got %d' % destination)
if len(source) != len(destination):
raise ValueError('`source` and `destination` arguments must have '
'the same number of elements')
order = [n for n in range(tensor.ndim) if n not in source]
for dest, src in sorted(zip(destination, source)):
order.insert(dest, src)
return op.transpose(tensor, order) | python | def moveaxis(tensor, source, destination):
"""Moves the `source` axis into the `destination` position
while leaving the other axes in their original order
Parameters
----------
tensor : mx.nd.array
The array which axes should be reordered
source : int or sequence of int
Original position of the axes to move. Can be negative but must be unique.
destination : int or sequence of int
Destination position for each of the original axes. Can be negative but must be unique.
Returns
-------
result : mx.nd.array
Array with moved axes.
Examples
--------
>>> X = mx.nd.array([[1, 2, 3], [4, 5, 6]])
>>> mx.nd.moveaxis(X, 0, 1).shape
(3L, 2L)
>>> X = mx.nd.zeros((3, 4, 5))
>>> mx.nd.moveaxis(X, [0, 1], [-1, -2]).shape
(5, 4, 3)
"""
try:
source = np.core.numeric.normalize_axis_tuple(source, tensor.ndim)
except IndexError:
raise ValueError('Source should verify 0 <= source < tensor.ndim'
'Got %d' % source)
try:
destination = np.core.numeric.normalize_axis_tuple(destination, tensor.ndim)
except IndexError:
raise ValueError('Destination should verify 0 <= destination < tensor.ndim (%d).'
% tensor.ndim, 'Got %d' % destination)
if len(source) != len(destination):
raise ValueError('`source` and `destination` arguments must have '
'the same number of elements')
order = [n for n in range(tensor.ndim) if n not in source]
for dest, src in sorted(zip(destination, source)):
order.insert(dest, src)
return op.transpose(tensor, order) | [
"def",
"moveaxis",
"(",
"tensor",
",",
"source",
",",
"destination",
")",
":",
"try",
":",
"source",
"=",
"np",
".",
"core",
".",
"numeric",
".",
"normalize_axis_tuple",
"(",
"source",
",",
"tensor",
".",
"ndim",
")",
"except",
"IndexError",
":",
"raise",
"ValueError",
"(",
"'Source should verify 0 <= source < tensor.ndim'",
"'Got %d'",
"%",
"source",
")",
"try",
":",
"destination",
"=",
"np",
".",
"core",
".",
"numeric",
".",
"normalize_axis_tuple",
"(",
"destination",
",",
"tensor",
".",
"ndim",
")",
"except",
"IndexError",
":",
"raise",
"ValueError",
"(",
"'Destination should verify 0 <= destination < tensor.ndim (%d).'",
"%",
"tensor",
".",
"ndim",
",",
"'Got %d'",
"%",
"destination",
")",
"if",
"len",
"(",
"source",
")",
"!=",
"len",
"(",
"destination",
")",
":",
"raise",
"ValueError",
"(",
"'`source` and `destination` arguments must have '",
"'the same number of elements'",
")",
"order",
"=",
"[",
"n",
"for",
"n",
"in",
"range",
"(",
"tensor",
".",
"ndim",
")",
"if",
"n",
"not",
"in",
"source",
"]",
"for",
"dest",
",",
"src",
"in",
"sorted",
"(",
"zip",
"(",
"destination",
",",
"source",
")",
")",
":",
"order",
".",
"insert",
"(",
"dest",
",",
"src",
")",
"return",
"op",
".",
"transpose",
"(",
"tensor",
",",
"order",
")"
] | Moves the `source` axis into the `destination` position
while leaving the other axes in their original order
Parameters
----------
tensor : mx.nd.array
The array which axes should be reordered
source : int or sequence of int
Original position of the axes to move. Can be negative but must be unique.
destination : int or sequence of int
Destination position for each of the original axes. Can be negative but must be unique.
Returns
-------
result : mx.nd.array
Array with moved axes.
Examples
--------
>>> X = mx.nd.array([[1, 2, 3], [4, 5, 6]])
>>> mx.nd.moveaxis(X, 0, 1).shape
(3L, 2L)
>>> X = mx.nd.zeros((3, 4, 5))
>>> mx.nd.moveaxis(X, [0, 1], [-1, -2]).shape
(5, 4, 3) | [
"Moves",
"the",
"source",
"axis",
"into",
"the",
"destination",
"position",
"while",
"leaving",
"the",
"other",
"axes",
"in",
"their",
"original",
"order"
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/ndarray.py#L2508-L2556 |
24,035 | apache/incubator-mxnet | python/mxnet/ndarray/ndarray.py | _ufunc_helper | def _ufunc_helper(lhs, rhs, fn_array, fn_scalar, lfn_scalar, rfn_scalar=None):
""" Helper function for element-wise operation.
The function will perform numpy-like broadcasting if needed and call different functions.
Parameters
--------
lhs : NDArray or numeric value
Left-hand side operand.
rhs : NDArray or numeric value
Right-hand operand,
fn_array : function
Function to be called if both lhs and rhs are of ``NDArray`` type.
fn_scalar : function
Function to be called if both lhs and rhs are numeric values.
lfn_scalar : function
Function to be called if lhs is ``NDArray`` while rhs is numeric value
rfn_scalar : function
Function to be called if lhs is numeric value while rhs is ``NDArray``;
if none is provided, then the function is commutative, so rfn_scalar is equal to lfn_scalar
Returns
--------
NDArray
result array
"""
if isinstance(lhs, numeric_types):
if isinstance(rhs, numeric_types):
return fn_scalar(lhs, rhs)
else:
if rfn_scalar is None:
# commutative function
return lfn_scalar(rhs, float(lhs))
else:
return rfn_scalar(rhs, float(lhs))
elif isinstance(rhs, numeric_types):
return lfn_scalar(lhs, float(rhs))
elif isinstance(rhs, NDArray):
return fn_array(lhs, rhs)
else:
raise TypeError('type %s not supported' % str(type(rhs))) | python | def _ufunc_helper(lhs, rhs, fn_array, fn_scalar, lfn_scalar, rfn_scalar=None):
""" Helper function for element-wise operation.
The function will perform numpy-like broadcasting if needed and call different functions.
Parameters
--------
lhs : NDArray or numeric value
Left-hand side operand.
rhs : NDArray or numeric value
Right-hand operand,
fn_array : function
Function to be called if both lhs and rhs are of ``NDArray`` type.
fn_scalar : function
Function to be called if both lhs and rhs are numeric values.
lfn_scalar : function
Function to be called if lhs is ``NDArray`` while rhs is numeric value
rfn_scalar : function
Function to be called if lhs is numeric value while rhs is ``NDArray``;
if none is provided, then the function is commutative, so rfn_scalar is equal to lfn_scalar
Returns
--------
NDArray
result array
"""
if isinstance(lhs, numeric_types):
if isinstance(rhs, numeric_types):
return fn_scalar(lhs, rhs)
else:
if rfn_scalar is None:
# commutative function
return lfn_scalar(rhs, float(lhs))
else:
return rfn_scalar(rhs, float(lhs))
elif isinstance(rhs, numeric_types):
return lfn_scalar(lhs, float(rhs))
elif isinstance(rhs, NDArray):
return fn_array(lhs, rhs)
else:
raise TypeError('type %s not supported' % str(type(rhs))) | [
"def",
"_ufunc_helper",
"(",
"lhs",
",",
"rhs",
",",
"fn_array",
",",
"fn_scalar",
",",
"lfn_scalar",
",",
"rfn_scalar",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"lhs",
",",
"numeric_types",
")",
":",
"if",
"isinstance",
"(",
"rhs",
",",
"numeric_types",
")",
":",
"return",
"fn_scalar",
"(",
"lhs",
",",
"rhs",
")",
"else",
":",
"if",
"rfn_scalar",
"is",
"None",
":",
"# commutative function",
"return",
"lfn_scalar",
"(",
"rhs",
",",
"float",
"(",
"lhs",
")",
")",
"else",
":",
"return",
"rfn_scalar",
"(",
"rhs",
",",
"float",
"(",
"lhs",
")",
")",
"elif",
"isinstance",
"(",
"rhs",
",",
"numeric_types",
")",
":",
"return",
"lfn_scalar",
"(",
"lhs",
",",
"float",
"(",
"rhs",
")",
")",
"elif",
"isinstance",
"(",
"rhs",
",",
"NDArray",
")",
":",
"return",
"fn_array",
"(",
"lhs",
",",
"rhs",
")",
"else",
":",
"raise",
"TypeError",
"(",
"'type %s not supported'",
"%",
"str",
"(",
"type",
"(",
"rhs",
")",
")",
")"
] | Helper function for element-wise operation.
The function will perform numpy-like broadcasting if needed and call different functions.
Parameters
--------
lhs : NDArray or numeric value
Left-hand side operand.
rhs : NDArray or numeric value
Right-hand operand,
fn_array : function
Function to be called if both lhs and rhs are of ``NDArray`` type.
fn_scalar : function
Function to be called if both lhs and rhs are numeric values.
lfn_scalar : function
Function to be called if lhs is ``NDArray`` while rhs is numeric value
rfn_scalar : function
Function to be called if lhs is numeric value while rhs is ``NDArray``;
if none is provided, then the function is commutative, so rfn_scalar is equal to lfn_scalar
Returns
--------
NDArray
result array | [
"Helper",
"function",
"for",
"element",
"-",
"wise",
"operation",
".",
"The",
"function",
"will",
"perform",
"numpy",
"-",
"like",
"broadcasting",
"if",
"needed",
"and",
"call",
"different",
"functions",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/ndarray.py#L2615-L2659 |
24,036 | apache/incubator-mxnet | python/mxnet/ndarray/ndarray.py | modulo | def modulo(lhs, rhs):
"""Returns element-wise modulo of the input arrays with broadcasting.
Equivalent to ``lhs % rhs`` and ``mx.nd.broadcast_mod(lhs, rhs)``.
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape.
Parameters
----------
lhs : scalar or mxnet.ndarray.array
First array in modulo.
rhs : scalar or mxnet.ndarray.array
Second array in modulo.
The arrays to be taken modulo. If ``lhs.shape != rhs.shape``, they must be
broadcastable to a common shape.
Returns
-------
NDArray
The element-wise modulo of the input arrays.
Examples
--------
>>> x = mx.nd.ones((2,3))*6
>>> y = mx.nd.ones((2,1))*4
>>> x.asnumpy()
array([[ 6., 6., 6.],
[ 6., 6., 6.]], dtype=float32)
>>> y.asnumpy()
array([[ 4.],
[ 4.]], dtype=float32)
>>> x%5
<NDArray 2x3 @cpu(0)>
>>> (x%5).asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> (x%y).asnumpy()
array([[ 2., 2., 2.],
[ 2., 2., 2.]], dtype=float32)
>>> mx.nd.modulo(x,y).asnumpy()
array([[ 2., 2., 2.],
[ 2., 2., 2.]], dtype=float32)
"""
# pylint: disable= no-member, protected-access
return _ufunc_helper(
lhs,
rhs,
op.broadcast_mod,
operator.mod,
_internal._mod_scalar,
_internal._rmod_scalar) | python | def modulo(lhs, rhs):
"""Returns element-wise modulo of the input arrays with broadcasting.
Equivalent to ``lhs % rhs`` and ``mx.nd.broadcast_mod(lhs, rhs)``.
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape.
Parameters
----------
lhs : scalar or mxnet.ndarray.array
First array in modulo.
rhs : scalar or mxnet.ndarray.array
Second array in modulo.
The arrays to be taken modulo. If ``lhs.shape != rhs.shape``, they must be
broadcastable to a common shape.
Returns
-------
NDArray
The element-wise modulo of the input arrays.
Examples
--------
>>> x = mx.nd.ones((2,3))*6
>>> y = mx.nd.ones((2,1))*4
>>> x.asnumpy()
array([[ 6., 6., 6.],
[ 6., 6., 6.]], dtype=float32)
>>> y.asnumpy()
array([[ 4.],
[ 4.]], dtype=float32)
>>> x%5
<NDArray 2x3 @cpu(0)>
>>> (x%5).asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> (x%y).asnumpy()
array([[ 2., 2., 2.],
[ 2., 2., 2.]], dtype=float32)
>>> mx.nd.modulo(x,y).asnumpy()
array([[ 2., 2., 2.],
[ 2., 2., 2.]], dtype=float32)
"""
# pylint: disable= no-member, protected-access
return _ufunc_helper(
lhs,
rhs,
op.broadcast_mod,
operator.mod,
_internal._mod_scalar,
_internal._rmod_scalar) | [
"def",
"modulo",
"(",
"lhs",
",",
"rhs",
")",
":",
"# pylint: disable= no-member, protected-access",
"return",
"_ufunc_helper",
"(",
"lhs",
",",
"rhs",
",",
"op",
".",
"broadcast_mod",
",",
"operator",
".",
"mod",
",",
"_internal",
".",
"_mod_scalar",
",",
"_internal",
".",
"_rmod_scalar",
")"
] | Returns element-wise modulo of the input arrays with broadcasting.
Equivalent to ``lhs % rhs`` and ``mx.nd.broadcast_mod(lhs, rhs)``.
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape.
Parameters
----------
lhs : scalar or mxnet.ndarray.array
First array in modulo.
rhs : scalar or mxnet.ndarray.array
Second array in modulo.
The arrays to be taken modulo. If ``lhs.shape != rhs.shape``, they must be
broadcastable to a common shape.
Returns
-------
NDArray
The element-wise modulo of the input arrays.
Examples
--------
>>> x = mx.nd.ones((2,3))*6
>>> y = mx.nd.ones((2,1))*4
>>> x.asnumpy()
array([[ 6., 6., 6.],
[ 6., 6., 6.]], dtype=float32)
>>> y.asnumpy()
array([[ 4.],
[ 4.]], dtype=float32)
>>> x%5
<NDArray 2x3 @cpu(0)>
>>> (x%5).asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> (x%y).asnumpy()
array([[ 2., 2., 2.],
[ 2., 2., 2.]], dtype=float32)
>>> mx.nd.modulo(x,y).asnumpy()
array([[ 2., 2., 2.],
[ 2., 2., 2.]], dtype=float32) | [
"Returns",
"element",
"-",
"wise",
"modulo",
"of",
"the",
"input",
"arrays",
"with",
"broadcasting",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/ndarray.py#L2905-L2958 |
24,037 | apache/incubator-mxnet | python/mxnet/ndarray/ndarray.py | power | def power(base, exp):
"""Returns result of first array elements raised to powers from second array, element-wise
with broadcasting.
Equivalent to ``base ** exp`` and ``mx.nd.broadcast_power(lhs, rhs)``.
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape.
Parameters
----------
base : scalar or NDArray
The base array
exp : scalar or NDArray
The exponent array. If ``base.shape != exp.shape``, they must be
broadcastable to a common shape.
Returns
--------
NDArray
The bases in x raised to the exponents in y.
Examples
--------
>>> x = mx.nd.ones((2,3))*2
>>> y = mx.nd.arange(1,3).reshape((2,1))
>>> z = mx.nd.arange(1,3).reshape((2,1))
>>> x.asnumpy()
array([[ 2., 2., 2.],
[ 2., 2., 2.]], dtype=float32)
>>> y.asnumpy()
array([[ 1.],
[ 2.]], dtype=float32)
>>> z.asnumpy()
array([[ 1.],
[ 2.]], dtype=float32)
>>> (x**2).asnumpy()
array([[ 4., 4., 4.],
[ 4., 4., 4.]], dtype=float32)
>>> (x**y).asnumpy()
array([[ 2., 2., 2.],
[ 4., 4., 4.]], dtype=float32)
>>> mx.nd.power(x,y).asnumpy()
array([[ 2., 2., 2.],
[ 4., 4., 4.]], dtype=float32)
>>> (z**y).asnumpy()
array([[ 1.],
[ 4.]], dtype=float32)
"""
# pylint: disable= no-member, protected-access
return _ufunc_helper(
base,
exp,
op.broadcast_power,
operator.pow,
_internal._power_scalar,
_internal._rpower_scalar) | python | def power(base, exp):
"""Returns result of first array elements raised to powers from second array, element-wise
with broadcasting.
Equivalent to ``base ** exp`` and ``mx.nd.broadcast_power(lhs, rhs)``.
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape.
Parameters
----------
base : scalar or NDArray
The base array
exp : scalar or NDArray
The exponent array. If ``base.shape != exp.shape``, they must be
broadcastable to a common shape.
Returns
--------
NDArray
The bases in x raised to the exponents in y.
Examples
--------
>>> x = mx.nd.ones((2,3))*2
>>> y = mx.nd.arange(1,3).reshape((2,1))
>>> z = mx.nd.arange(1,3).reshape((2,1))
>>> x.asnumpy()
array([[ 2., 2., 2.],
[ 2., 2., 2.]], dtype=float32)
>>> y.asnumpy()
array([[ 1.],
[ 2.]], dtype=float32)
>>> z.asnumpy()
array([[ 1.],
[ 2.]], dtype=float32)
>>> (x**2).asnumpy()
array([[ 4., 4., 4.],
[ 4., 4., 4.]], dtype=float32)
>>> (x**y).asnumpy()
array([[ 2., 2., 2.],
[ 4., 4., 4.]], dtype=float32)
>>> mx.nd.power(x,y).asnumpy()
array([[ 2., 2., 2.],
[ 4., 4., 4.]], dtype=float32)
>>> (z**y).asnumpy()
array([[ 1.],
[ 4.]], dtype=float32)
"""
# pylint: disable= no-member, protected-access
return _ufunc_helper(
base,
exp,
op.broadcast_power,
operator.pow,
_internal._power_scalar,
_internal._rpower_scalar) | [
"def",
"power",
"(",
"base",
",",
"exp",
")",
":",
"# pylint: disable= no-member, protected-access",
"return",
"_ufunc_helper",
"(",
"base",
",",
"exp",
",",
"op",
".",
"broadcast_power",
",",
"operator",
".",
"pow",
",",
"_internal",
".",
"_power_scalar",
",",
"_internal",
".",
"_rpower_scalar",
")"
] | Returns result of first array elements raised to powers from second array, element-wise
with broadcasting.
Equivalent to ``base ** exp`` and ``mx.nd.broadcast_power(lhs, rhs)``.
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape.
Parameters
----------
base : scalar or NDArray
The base array
exp : scalar or NDArray
The exponent array. If ``base.shape != exp.shape``, they must be
broadcastable to a common shape.
Returns
--------
NDArray
The bases in x raised to the exponents in y.
Examples
--------
>>> x = mx.nd.ones((2,3))*2
>>> y = mx.nd.arange(1,3).reshape((2,1))
>>> z = mx.nd.arange(1,3).reshape((2,1))
>>> x.asnumpy()
array([[ 2., 2., 2.],
[ 2., 2., 2.]], dtype=float32)
>>> y.asnumpy()
array([[ 1.],
[ 2.]], dtype=float32)
>>> z.asnumpy()
array([[ 1.],
[ 2.]], dtype=float32)
>>> (x**2).asnumpy()
array([[ 4., 4., 4.],
[ 4., 4., 4.]], dtype=float32)
>>> (x**y).asnumpy()
array([[ 2., 2., 2.],
[ 4., 4., 4.]], dtype=float32)
>>> mx.nd.power(x,y).asnumpy()
array([[ 2., 2., 2.],
[ 4., 4., 4.]], dtype=float32)
>>> (z**y).asnumpy()
array([[ 1.],
[ 4.]], dtype=float32) | [
"Returns",
"result",
"of",
"first",
"array",
"elements",
"raised",
"to",
"powers",
"from",
"second",
"array",
"element",
"-",
"wise",
"with",
"broadcasting",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/ndarray.py#L2962-L3020 |
24,038 | apache/incubator-mxnet | python/mxnet/ndarray/ndarray.py | maximum | def maximum(lhs, rhs):
"""Returns element-wise maximum of the input arrays with broadcasting.
Equivalent to ``mx.nd.broadcast_maximum(lhs, rhs)``.
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape.
Parameters
----------
lhs : scalar or mxnet.ndarray.array
First array to be compared.
rhs : scalar or mxnet.ndarray.array
Second array to be compared. If ``lhs.shape != rhs.shape``, they must be
broadcastable to a common shape.
Returns
-------
NDArray
The element-wise maximum of the input arrays.
Examples
--------
>>> x = mx.nd.ones((2,3))
>>> y = mx.nd.arange(2).reshape((2,1))
>>> z = mx.nd.arange(2).reshape((1,2))
>>> x.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> y.asnumpy()
array([[ 0.],
[ 1.]], dtype=float32)
>>> z.asnumpy()
array([[ 0., 1.]], dtype=float32)
>>> mx.nd.maximum(x, 2).asnumpy()
array([[ 2., 2., 2.],
[ 2., 2., 2.]], dtype=float32)
>>> mx.nd.maximum(x, y).asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> mx.nd.maximum(y, z).asnumpy()
array([[ 0., 1.],
[ 1., 1.]], dtype=float32)
"""
# pylint: disable= no-member, protected-access
return _ufunc_helper(
lhs,
rhs,
op.broadcast_maximum,
lambda x, y: x if x > y else y,
_internal._maximum_scalar,
None) | python | def maximum(lhs, rhs):
"""Returns element-wise maximum of the input arrays with broadcasting.
Equivalent to ``mx.nd.broadcast_maximum(lhs, rhs)``.
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape.
Parameters
----------
lhs : scalar or mxnet.ndarray.array
First array to be compared.
rhs : scalar or mxnet.ndarray.array
Second array to be compared. If ``lhs.shape != rhs.shape``, they must be
broadcastable to a common shape.
Returns
-------
NDArray
The element-wise maximum of the input arrays.
Examples
--------
>>> x = mx.nd.ones((2,3))
>>> y = mx.nd.arange(2).reshape((2,1))
>>> z = mx.nd.arange(2).reshape((1,2))
>>> x.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> y.asnumpy()
array([[ 0.],
[ 1.]], dtype=float32)
>>> z.asnumpy()
array([[ 0., 1.]], dtype=float32)
>>> mx.nd.maximum(x, 2).asnumpy()
array([[ 2., 2., 2.],
[ 2., 2., 2.]], dtype=float32)
>>> mx.nd.maximum(x, y).asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> mx.nd.maximum(y, z).asnumpy()
array([[ 0., 1.],
[ 1., 1.]], dtype=float32)
"""
# pylint: disable= no-member, protected-access
return _ufunc_helper(
lhs,
rhs,
op.broadcast_maximum,
lambda x, y: x if x > y else y,
_internal._maximum_scalar,
None) | [
"def",
"maximum",
"(",
"lhs",
",",
"rhs",
")",
":",
"# pylint: disable= no-member, protected-access",
"return",
"_ufunc_helper",
"(",
"lhs",
",",
"rhs",
",",
"op",
".",
"broadcast_maximum",
",",
"lambda",
"x",
",",
"y",
":",
"x",
"if",
"x",
">",
"y",
"else",
"y",
",",
"_internal",
".",
"_maximum_scalar",
",",
"None",
")"
] | Returns element-wise maximum of the input arrays with broadcasting.
Equivalent to ``mx.nd.broadcast_maximum(lhs, rhs)``.
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape.
Parameters
----------
lhs : scalar or mxnet.ndarray.array
First array to be compared.
rhs : scalar or mxnet.ndarray.array
Second array to be compared. If ``lhs.shape != rhs.shape``, they must be
broadcastable to a common shape.
Returns
-------
NDArray
The element-wise maximum of the input arrays.
Examples
--------
>>> x = mx.nd.ones((2,3))
>>> y = mx.nd.arange(2).reshape((2,1))
>>> z = mx.nd.arange(2).reshape((1,2))
>>> x.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> y.asnumpy()
array([[ 0.],
[ 1.]], dtype=float32)
>>> z.asnumpy()
array([[ 0., 1.]], dtype=float32)
>>> mx.nd.maximum(x, 2).asnumpy()
array([[ 2., 2., 2.],
[ 2., 2., 2.]], dtype=float32)
>>> mx.nd.maximum(x, y).asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> mx.nd.maximum(y, z).asnumpy()
array([[ 0., 1.],
[ 1., 1.]], dtype=float32) | [
"Returns",
"element",
"-",
"wise",
"maximum",
"of",
"the",
"input",
"arrays",
"with",
"broadcasting",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/ndarray.py#L3024-L3077 |
24,039 | apache/incubator-mxnet | python/mxnet/ndarray/ndarray.py | minimum | def minimum(lhs, rhs):
"""Returns element-wise minimum of the input arrays with broadcasting.
Equivalent to ``mx.nd.broadcast_minimum(lhs, rhs)``.
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape.
Parameters
----------
lhs : scalar or mxnet.ndarray.array
First array to be compared.
rhs : scalar or mxnet.ndarray.array
Second array to be compared. If ``lhs.shape != rhs.shape``, they must be
broadcastable to a common shape.
Returns
-------
NDArray
The element-wise minimum of the input arrays.
Examples
--------
>>> x = mx.nd.ones((2,3))
>>> y = mx.nd.arange(2).reshape((2,1))
>>> z = mx.nd.arange(2).reshape((1,2))
>>> x.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> y.asnumpy()
array([[ 0.],
[ 1.]], dtype=float32)
>>> z.asnumpy()
array([[ 0., 1.]], dtype=float32)
>>> mx.nd.minimum(x, 2).asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> mx.nd.minimum(x, y).asnumpy()
array([[ 0., 0., 0.],
[ 1., 1., 1.]], dtype=float32)
>>> mx.nd.minimum(z, y).asnumpy()
array([[ 0., 0.],
[ 0., 1.]], dtype=float32)
"""
# pylint: disable= no-member, protected-access
return _ufunc_helper(
lhs,
rhs,
op.broadcast_minimum,
lambda x, y: x if x < y else y,
_internal._minimum_scalar,
None) | python | def minimum(lhs, rhs):
"""Returns element-wise minimum of the input arrays with broadcasting.
Equivalent to ``mx.nd.broadcast_minimum(lhs, rhs)``.
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape.
Parameters
----------
lhs : scalar or mxnet.ndarray.array
First array to be compared.
rhs : scalar or mxnet.ndarray.array
Second array to be compared. If ``lhs.shape != rhs.shape``, they must be
broadcastable to a common shape.
Returns
-------
NDArray
The element-wise minimum of the input arrays.
Examples
--------
>>> x = mx.nd.ones((2,3))
>>> y = mx.nd.arange(2).reshape((2,1))
>>> z = mx.nd.arange(2).reshape((1,2))
>>> x.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> y.asnumpy()
array([[ 0.],
[ 1.]], dtype=float32)
>>> z.asnumpy()
array([[ 0., 1.]], dtype=float32)
>>> mx.nd.minimum(x, 2).asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> mx.nd.minimum(x, y).asnumpy()
array([[ 0., 0., 0.],
[ 1., 1., 1.]], dtype=float32)
>>> mx.nd.minimum(z, y).asnumpy()
array([[ 0., 0.],
[ 0., 1.]], dtype=float32)
"""
# pylint: disable= no-member, protected-access
return _ufunc_helper(
lhs,
rhs,
op.broadcast_minimum,
lambda x, y: x if x < y else y,
_internal._minimum_scalar,
None) | [
"def",
"minimum",
"(",
"lhs",
",",
"rhs",
")",
":",
"# pylint: disable= no-member, protected-access",
"return",
"_ufunc_helper",
"(",
"lhs",
",",
"rhs",
",",
"op",
".",
"broadcast_minimum",
",",
"lambda",
"x",
",",
"y",
":",
"x",
"if",
"x",
"<",
"y",
"else",
"y",
",",
"_internal",
".",
"_minimum_scalar",
",",
"None",
")"
] | Returns element-wise minimum of the input arrays with broadcasting.
Equivalent to ``mx.nd.broadcast_minimum(lhs, rhs)``.
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape.
Parameters
----------
lhs : scalar or mxnet.ndarray.array
First array to be compared.
rhs : scalar or mxnet.ndarray.array
Second array to be compared. If ``lhs.shape != rhs.shape``, they must be
broadcastable to a common shape.
Returns
-------
NDArray
The element-wise minimum of the input arrays.
Examples
--------
>>> x = mx.nd.ones((2,3))
>>> y = mx.nd.arange(2).reshape((2,1))
>>> z = mx.nd.arange(2).reshape((1,2))
>>> x.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> y.asnumpy()
array([[ 0.],
[ 1.]], dtype=float32)
>>> z.asnumpy()
array([[ 0., 1.]], dtype=float32)
>>> mx.nd.minimum(x, 2).asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> mx.nd.minimum(x, y).asnumpy()
array([[ 0., 0., 0.],
[ 1., 1., 1.]], dtype=float32)
>>> mx.nd.minimum(z, y).asnumpy()
array([[ 0., 0.],
[ 0., 1.]], dtype=float32) | [
"Returns",
"element",
"-",
"wise",
"minimum",
"of",
"the",
"input",
"arrays",
"with",
"broadcasting",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/ndarray.py#L3081-L3134 |
24,040 | apache/incubator-mxnet | python/mxnet/ndarray/ndarray.py | concatenate | def concatenate(arrays, axis=0, always_copy=True):
"""DEPRECATED, use ``concat`` instead
Parameters
----------
arrays : list of `NDArray`
Arrays to be concatenate. They must have identical shape except
the first dimension. They also must have the same data type.
axis : int
The axis along which to concatenate.
always_copy : bool
Default `True`. When not `True`, if the arrays only contain one
`NDArray`, that element will be returned directly, avoid copying.
Returns
-------
NDArray
An `NDArray` that lives on the same context as `arrays[0].context`.
"""
assert isinstance(arrays, list)
assert len(arrays) > 0
assert isinstance(arrays[0], NDArray)
if not always_copy and len(arrays) == 1:
return arrays[0]
shape_axis = arrays[0].shape[axis]
shape_rest1 = arrays[0].shape[0:axis]
shape_rest2 = arrays[0].shape[axis+1:]
dtype = arrays[0].dtype
for arr in arrays[1:]:
shape_axis += arr.shape[axis]
assert shape_rest1 == arr.shape[0:axis]
assert shape_rest2 == arr.shape[axis+1:]
assert dtype == arr.dtype
ret_shape = shape_rest1 + (shape_axis,) + shape_rest2
ret = empty(ret_shape, ctx=arrays[0].context, dtype=dtype)
idx = 0
begin = [0 for _ in ret_shape]
end = list(ret_shape)
for arr in arrays:
if axis == 0:
ret[idx:idx+arr.shape[0]] = arr
else:
begin[axis] = idx
end[axis] = idx+arr.shape[axis]
# pylint: disable=no-member,protected-access
_internal._crop_assign(ret, arr, out=ret,
begin=tuple(begin),
end=tuple(end))
# pylint: enable=no-member,protected-access
idx += arr.shape[axis]
return ret | python | def concatenate(arrays, axis=0, always_copy=True):
"""DEPRECATED, use ``concat`` instead
Parameters
----------
arrays : list of `NDArray`
Arrays to be concatenate. They must have identical shape except
the first dimension. They also must have the same data type.
axis : int
The axis along which to concatenate.
always_copy : bool
Default `True`. When not `True`, if the arrays only contain one
`NDArray`, that element will be returned directly, avoid copying.
Returns
-------
NDArray
An `NDArray` that lives on the same context as `arrays[0].context`.
"""
assert isinstance(arrays, list)
assert len(arrays) > 0
assert isinstance(arrays[0], NDArray)
if not always_copy and len(arrays) == 1:
return arrays[0]
shape_axis = arrays[0].shape[axis]
shape_rest1 = arrays[0].shape[0:axis]
shape_rest2 = arrays[0].shape[axis+1:]
dtype = arrays[0].dtype
for arr in arrays[1:]:
shape_axis += arr.shape[axis]
assert shape_rest1 == arr.shape[0:axis]
assert shape_rest2 == arr.shape[axis+1:]
assert dtype == arr.dtype
ret_shape = shape_rest1 + (shape_axis,) + shape_rest2
ret = empty(ret_shape, ctx=arrays[0].context, dtype=dtype)
idx = 0
begin = [0 for _ in ret_shape]
end = list(ret_shape)
for arr in arrays:
if axis == 0:
ret[idx:idx+arr.shape[0]] = arr
else:
begin[axis] = idx
end[axis] = idx+arr.shape[axis]
# pylint: disable=no-member,protected-access
_internal._crop_assign(ret, arr, out=ret,
begin=tuple(begin),
end=tuple(end))
# pylint: enable=no-member,protected-access
idx += arr.shape[axis]
return ret | [
"def",
"concatenate",
"(",
"arrays",
",",
"axis",
"=",
"0",
",",
"always_copy",
"=",
"True",
")",
":",
"assert",
"isinstance",
"(",
"arrays",
",",
"list",
")",
"assert",
"len",
"(",
"arrays",
")",
">",
"0",
"assert",
"isinstance",
"(",
"arrays",
"[",
"0",
"]",
",",
"NDArray",
")",
"if",
"not",
"always_copy",
"and",
"len",
"(",
"arrays",
")",
"==",
"1",
":",
"return",
"arrays",
"[",
"0",
"]",
"shape_axis",
"=",
"arrays",
"[",
"0",
"]",
".",
"shape",
"[",
"axis",
"]",
"shape_rest1",
"=",
"arrays",
"[",
"0",
"]",
".",
"shape",
"[",
"0",
":",
"axis",
"]",
"shape_rest2",
"=",
"arrays",
"[",
"0",
"]",
".",
"shape",
"[",
"axis",
"+",
"1",
":",
"]",
"dtype",
"=",
"arrays",
"[",
"0",
"]",
".",
"dtype",
"for",
"arr",
"in",
"arrays",
"[",
"1",
":",
"]",
":",
"shape_axis",
"+=",
"arr",
".",
"shape",
"[",
"axis",
"]",
"assert",
"shape_rest1",
"==",
"arr",
".",
"shape",
"[",
"0",
":",
"axis",
"]",
"assert",
"shape_rest2",
"==",
"arr",
".",
"shape",
"[",
"axis",
"+",
"1",
":",
"]",
"assert",
"dtype",
"==",
"arr",
".",
"dtype",
"ret_shape",
"=",
"shape_rest1",
"+",
"(",
"shape_axis",
",",
")",
"+",
"shape_rest2",
"ret",
"=",
"empty",
"(",
"ret_shape",
",",
"ctx",
"=",
"arrays",
"[",
"0",
"]",
".",
"context",
",",
"dtype",
"=",
"dtype",
")",
"idx",
"=",
"0",
"begin",
"=",
"[",
"0",
"for",
"_",
"in",
"ret_shape",
"]",
"end",
"=",
"list",
"(",
"ret_shape",
")",
"for",
"arr",
"in",
"arrays",
":",
"if",
"axis",
"==",
"0",
":",
"ret",
"[",
"idx",
":",
"idx",
"+",
"arr",
".",
"shape",
"[",
"0",
"]",
"]",
"=",
"arr",
"else",
":",
"begin",
"[",
"axis",
"]",
"=",
"idx",
"end",
"[",
"axis",
"]",
"=",
"idx",
"+",
"arr",
".",
"shape",
"[",
"axis",
"]",
"# pylint: disable=no-member,protected-access",
"_internal",
".",
"_crop_assign",
"(",
"ret",
",",
"arr",
",",
"out",
"=",
"ret",
",",
"begin",
"=",
"tuple",
"(",
"begin",
")",
",",
"end",
"=",
"tuple",
"(",
"end",
")",
")",
"# pylint: enable=no-member,protected-access",
"idx",
"+=",
"arr",
".",
"shape",
"[",
"axis",
"]",
"return",
"ret"
] | DEPRECATED, use ``concat`` instead
Parameters
----------
arrays : list of `NDArray`
Arrays to be concatenate. They must have identical shape except
the first dimension. They also must have the same data type.
axis : int
The axis along which to concatenate.
always_copy : bool
Default `True`. When not `True`, if the arrays only contain one
`NDArray`, that element will be returned directly, avoid copying.
Returns
-------
NDArray
An `NDArray` that lives on the same context as `arrays[0].context`. | [
"DEPRECATED",
"use",
"concat",
"instead"
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/ndarray.py#L3705-L3759 |
24,041 | apache/incubator-mxnet | python/mxnet/ndarray/ndarray.py | imdecode | def imdecode(str_img, clip_rect=(0, 0, 0, 0), out=None, index=0, channels=3, mean=None):
"""DEPRECATED, use mx.img instead
Parameters
----------
str_img : str
Binary image data
clip_rect : iterable of 4 int
Clip decoded image to rectangle (x0, y0, x1, y1).
out : NDArray
Output buffer. Can be 3 dimensional (c, h, w) or 4 dimensional (n, c, h, w).
index : int
Output decoded image to i-th slice of 4 dimensional buffer.
channels : int
Number of channels to output. Decode to grey scale when channels = 1.
mean : NDArray
Subtract mean from decode image before outputing.
"""
# pylint: disable= no-member, protected-access, too-many-arguments
if mean is None:
mean = NDArray(_new_empty_handle())
if out is None:
return _internal._imdecode(mean, index,
clip_rect[0],
clip_rect[1],
clip_rect[2],
clip_rect[3],
channels,
len(str_img),
str_img=str_img)
else:
return _internal._imdecode(mean, index,
clip_rect[0],
clip_rect[1],
clip_rect[2],
clip_rect[3],
channels,
len(str_img),
str_img=str_img,
out=out) | python | def imdecode(str_img, clip_rect=(0, 0, 0, 0), out=None, index=0, channels=3, mean=None):
"""DEPRECATED, use mx.img instead
Parameters
----------
str_img : str
Binary image data
clip_rect : iterable of 4 int
Clip decoded image to rectangle (x0, y0, x1, y1).
out : NDArray
Output buffer. Can be 3 dimensional (c, h, w) or 4 dimensional (n, c, h, w).
index : int
Output decoded image to i-th slice of 4 dimensional buffer.
channels : int
Number of channels to output. Decode to grey scale when channels = 1.
mean : NDArray
Subtract mean from decode image before outputing.
"""
# pylint: disable= no-member, protected-access, too-many-arguments
if mean is None:
mean = NDArray(_new_empty_handle())
if out is None:
return _internal._imdecode(mean, index,
clip_rect[0],
clip_rect[1],
clip_rect[2],
clip_rect[3],
channels,
len(str_img),
str_img=str_img)
else:
return _internal._imdecode(mean, index,
clip_rect[0],
clip_rect[1],
clip_rect[2],
clip_rect[3],
channels,
len(str_img),
str_img=str_img,
out=out) | [
"def",
"imdecode",
"(",
"str_img",
",",
"clip_rect",
"=",
"(",
"0",
",",
"0",
",",
"0",
",",
"0",
")",
",",
"out",
"=",
"None",
",",
"index",
"=",
"0",
",",
"channels",
"=",
"3",
",",
"mean",
"=",
"None",
")",
":",
"# pylint: disable= no-member, protected-access, too-many-arguments",
"if",
"mean",
"is",
"None",
":",
"mean",
"=",
"NDArray",
"(",
"_new_empty_handle",
"(",
")",
")",
"if",
"out",
"is",
"None",
":",
"return",
"_internal",
".",
"_imdecode",
"(",
"mean",
",",
"index",
",",
"clip_rect",
"[",
"0",
"]",
",",
"clip_rect",
"[",
"1",
"]",
",",
"clip_rect",
"[",
"2",
"]",
",",
"clip_rect",
"[",
"3",
"]",
",",
"channels",
",",
"len",
"(",
"str_img",
")",
",",
"str_img",
"=",
"str_img",
")",
"else",
":",
"return",
"_internal",
".",
"_imdecode",
"(",
"mean",
",",
"index",
",",
"clip_rect",
"[",
"0",
"]",
",",
"clip_rect",
"[",
"1",
"]",
",",
"clip_rect",
"[",
"2",
"]",
",",
"clip_rect",
"[",
"3",
"]",
",",
"channels",
",",
"len",
"(",
"str_img",
")",
",",
"str_img",
"=",
"str_img",
",",
"out",
"=",
"out",
")"
] | DEPRECATED, use mx.img instead
Parameters
----------
str_img : str
Binary image data
clip_rect : iterable of 4 int
Clip decoded image to rectangle (x0, y0, x1, y1).
out : NDArray
Output buffer. Can be 3 dimensional (c, h, w) or 4 dimensional (n, c, h, w).
index : int
Output decoded image to i-th slice of 4 dimensional buffer.
channels : int
Number of channels to output. Decode to grey scale when channels = 1.
mean : NDArray
Subtract mean from decode image before outputing. | [
"DEPRECATED",
"use",
"mx",
".",
"img",
"instead"
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/ndarray.py#L3763-L3802 |
24,042 | apache/incubator-mxnet | python/mxnet/ndarray/ndarray.py | zeros | def zeros(shape, ctx=None, dtype=None, **kwargs):
"""Returns a new array filled with all zeros, with the given shape and type.
Parameters
----------
shape : int or tuple of int
The shape of the empty array.
ctx : Context, optional
An optional device context (default is the current default context).
dtype : str or numpy.dtype, optional
An optional value type (default is `float32`).
out : NDArray, optional
The output NDArray (default is `None`).
Returns
-------
NDArray
A created array
Examples
--------
>>> mx.nd.zeros(1).asnumpy()
array([ 0.], dtype=float32)
>>> mx.nd.zeros((1,2), mx.gpu(0))
<NDArray 1x2 @gpu(0)>
>>> mx.nd.zeros((1,2), mx.gpu(0), 'float16').asnumpy()
array([[ 0., 0.]], dtype=float16)
"""
# pylint: disable= unused-argument
if ctx is None:
ctx = current_context()
dtype = mx_real_t if dtype is None else dtype
# pylint: disable= no-member, protected-access
return _internal._zeros(shape=shape, ctx=ctx, dtype=dtype, **kwargs) | python | def zeros(shape, ctx=None, dtype=None, **kwargs):
"""Returns a new array filled with all zeros, with the given shape and type.
Parameters
----------
shape : int or tuple of int
The shape of the empty array.
ctx : Context, optional
An optional device context (default is the current default context).
dtype : str or numpy.dtype, optional
An optional value type (default is `float32`).
out : NDArray, optional
The output NDArray (default is `None`).
Returns
-------
NDArray
A created array
Examples
--------
>>> mx.nd.zeros(1).asnumpy()
array([ 0.], dtype=float32)
>>> mx.nd.zeros((1,2), mx.gpu(0))
<NDArray 1x2 @gpu(0)>
>>> mx.nd.zeros((1,2), mx.gpu(0), 'float16').asnumpy()
array([[ 0., 0.]], dtype=float16)
"""
# pylint: disable= unused-argument
if ctx is None:
ctx = current_context()
dtype = mx_real_t if dtype is None else dtype
# pylint: disable= no-member, protected-access
return _internal._zeros(shape=shape, ctx=ctx, dtype=dtype, **kwargs) | [
"def",
"zeros",
"(",
"shape",
",",
"ctx",
"=",
"None",
",",
"dtype",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"# pylint: disable= unused-argument",
"if",
"ctx",
"is",
"None",
":",
"ctx",
"=",
"current_context",
"(",
")",
"dtype",
"=",
"mx_real_t",
"if",
"dtype",
"is",
"None",
"else",
"dtype",
"# pylint: disable= no-member, protected-access",
"return",
"_internal",
".",
"_zeros",
"(",
"shape",
"=",
"shape",
",",
"ctx",
"=",
"ctx",
",",
"dtype",
"=",
"dtype",
",",
"*",
"*",
"kwargs",
")"
] | Returns a new array filled with all zeros, with the given shape and type.
Parameters
----------
shape : int or tuple of int
The shape of the empty array.
ctx : Context, optional
An optional device context (default is the current default context).
dtype : str or numpy.dtype, optional
An optional value type (default is `float32`).
out : NDArray, optional
The output NDArray (default is `None`).
Returns
-------
NDArray
A created array
Examples
--------
>>> mx.nd.zeros(1).asnumpy()
array([ 0.], dtype=float32)
>>> mx.nd.zeros((1,2), mx.gpu(0))
<NDArray 1x2 @gpu(0)>
>>> mx.nd.zeros((1,2), mx.gpu(0), 'float16').asnumpy()
array([[ 0., 0.]], dtype=float16) | [
"Returns",
"a",
"new",
"array",
"filled",
"with",
"all",
"zeros",
"with",
"the",
"given",
"shape",
"and",
"type",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/ndarray.py#L3805-L3838 |
24,043 | apache/incubator-mxnet | python/mxnet/ndarray/ndarray.py | eye | def eye(N, M=0, k=0, ctx=None, dtype=None, **kwargs):
"""Return a 2-D array with ones on the diagonal and zeros elsewhere.
Parameters
----------
N: int
Number of rows in the output.
M: int, optional
Number of columns in the output. If 0, defaults to N.
k: int, optional
Index of the diagonal: 0 (the default) refers to the main diagonal,
a positive value refers to an upper diagonal,
and a negative value to a lower diagonal.
ctx: Context, optional
An optional device context (default is the current default context)
dtype: str or numpy.dtype, optional
An optional value type (default is `float32`)
Returns
-------
NDArray
A created array
Examples
--------
>>> mx.nd.eye(2)
[[ 1. 0.]
[ 0. 1.]]
<NDArray 2x2 @cpu(0)>
>>> mx.nd.eye(2, 3, 1)
[[ 0. 1. 0.]
[ 0. 0. 1.]]
<NDArray 2x3 @cpu(0)>
"""
# pylint: disable= unused-argument
if ctx is None:
ctx = current_context()
dtype = mx_real_t if dtype is None else dtype
# pylint: disable= no-member, protected-access
return _internal._eye(N=N, M=M, k=k, ctx=ctx, dtype=dtype, **kwargs) | python | def eye(N, M=0, k=0, ctx=None, dtype=None, **kwargs):
"""Return a 2-D array with ones on the diagonal and zeros elsewhere.
Parameters
----------
N: int
Number of rows in the output.
M: int, optional
Number of columns in the output. If 0, defaults to N.
k: int, optional
Index of the diagonal: 0 (the default) refers to the main diagonal,
a positive value refers to an upper diagonal,
and a negative value to a lower diagonal.
ctx: Context, optional
An optional device context (default is the current default context)
dtype: str or numpy.dtype, optional
An optional value type (default is `float32`)
Returns
-------
NDArray
A created array
Examples
--------
>>> mx.nd.eye(2)
[[ 1. 0.]
[ 0. 1.]]
<NDArray 2x2 @cpu(0)>
>>> mx.nd.eye(2, 3, 1)
[[ 0. 1. 0.]
[ 0. 0. 1.]]
<NDArray 2x3 @cpu(0)>
"""
# pylint: disable= unused-argument
if ctx is None:
ctx = current_context()
dtype = mx_real_t if dtype is None else dtype
# pylint: disable= no-member, protected-access
return _internal._eye(N=N, M=M, k=k, ctx=ctx, dtype=dtype, **kwargs) | [
"def",
"eye",
"(",
"N",
",",
"M",
"=",
"0",
",",
"k",
"=",
"0",
",",
"ctx",
"=",
"None",
",",
"dtype",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"# pylint: disable= unused-argument",
"if",
"ctx",
"is",
"None",
":",
"ctx",
"=",
"current_context",
"(",
")",
"dtype",
"=",
"mx_real_t",
"if",
"dtype",
"is",
"None",
"else",
"dtype",
"# pylint: disable= no-member, protected-access",
"return",
"_internal",
".",
"_eye",
"(",
"N",
"=",
"N",
",",
"M",
"=",
"M",
",",
"k",
"=",
"k",
",",
"ctx",
"=",
"ctx",
",",
"dtype",
"=",
"dtype",
",",
"*",
"*",
"kwargs",
")"
] | Return a 2-D array with ones on the diagonal and zeros elsewhere.
Parameters
----------
N: int
Number of rows in the output.
M: int, optional
Number of columns in the output. If 0, defaults to N.
k: int, optional
Index of the diagonal: 0 (the default) refers to the main diagonal,
a positive value refers to an upper diagonal,
and a negative value to a lower diagonal.
ctx: Context, optional
An optional device context (default is the current default context)
dtype: str or numpy.dtype, optional
An optional value type (default is `float32`)
Returns
-------
NDArray
A created array
Examples
--------
>>> mx.nd.eye(2)
[[ 1. 0.]
[ 0. 1.]]
<NDArray 2x2 @cpu(0)>
>>> mx.nd.eye(2, 3, 1)
[[ 0. 1. 0.]
[ 0. 0. 1.]]
<NDArray 2x3 @cpu(0)> | [
"Return",
"a",
"2",
"-",
"D",
"array",
"with",
"ones",
"on",
"the",
"diagonal",
"and",
"zeros",
"elsewhere",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/ndarray.py#L3841-L3880 |
24,044 | apache/incubator-mxnet | python/mxnet/ndarray/ndarray.py | to_dlpack_for_read | def to_dlpack_for_read(data):
"""Returns a reference view of NDArray that represents as DLManagedTensor until
all previous write operations on the current array are finished.
Parameters
----------
data: NDArray
input data.
Returns
-------
PyCapsule (the pointer of DLManagedTensor)
a reference view of NDArray that represents as DLManagedTensor.
Examples
--------
>>> x = mx.nd.ones((2,3))
>>> y = mx.nd.to_dlpack_for_read(x)
>>> type(y)
<class 'PyCapsule'>
>>> z = mx.nd.from_dlpack(y)
>>> z
[[1. 1. 1.]
[1. 1. 1.]]
<NDArray 2x3 @cpu(0)>
"""
data.wait_to_read()
dlpack = DLPackHandle()
check_call(_LIB.MXNDArrayToDLPack(data.handle, ctypes.byref(dlpack)))
return ctypes.pythonapi.PyCapsule_New(dlpack, _c_str_dltensor, _c_dlpack_deleter) | python | def to_dlpack_for_read(data):
"""Returns a reference view of NDArray that represents as DLManagedTensor until
all previous write operations on the current array are finished.
Parameters
----------
data: NDArray
input data.
Returns
-------
PyCapsule (the pointer of DLManagedTensor)
a reference view of NDArray that represents as DLManagedTensor.
Examples
--------
>>> x = mx.nd.ones((2,3))
>>> y = mx.nd.to_dlpack_for_read(x)
>>> type(y)
<class 'PyCapsule'>
>>> z = mx.nd.from_dlpack(y)
>>> z
[[1. 1. 1.]
[1. 1. 1.]]
<NDArray 2x3 @cpu(0)>
"""
data.wait_to_read()
dlpack = DLPackHandle()
check_call(_LIB.MXNDArrayToDLPack(data.handle, ctypes.byref(dlpack)))
return ctypes.pythonapi.PyCapsule_New(dlpack, _c_str_dltensor, _c_dlpack_deleter) | [
"def",
"to_dlpack_for_read",
"(",
"data",
")",
":",
"data",
".",
"wait_to_read",
"(",
")",
"dlpack",
"=",
"DLPackHandle",
"(",
")",
"check_call",
"(",
"_LIB",
".",
"MXNDArrayToDLPack",
"(",
"data",
".",
"handle",
",",
"ctypes",
".",
"byref",
"(",
"dlpack",
")",
")",
")",
"return",
"ctypes",
".",
"pythonapi",
".",
"PyCapsule_New",
"(",
"dlpack",
",",
"_c_str_dltensor",
",",
"_c_dlpack_deleter",
")"
] | Returns a reference view of NDArray that represents as DLManagedTensor until
all previous write operations on the current array are finished.
Parameters
----------
data: NDArray
input data.
Returns
-------
PyCapsule (the pointer of DLManagedTensor)
a reference view of NDArray that represents as DLManagedTensor.
Examples
--------
>>> x = mx.nd.ones((2,3))
>>> y = mx.nd.to_dlpack_for_read(x)
>>> type(y)
<class 'PyCapsule'>
>>> z = mx.nd.from_dlpack(y)
>>> z
[[1. 1. 1.]
[1. 1. 1.]]
<NDArray 2x3 @cpu(0)> | [
"Returns",
"a",
"reference",
"view",
"of",
"NDArray",
"that",
"represents",
"as",
"DLManagedTensor",
"until",
"all",
"previous",
"write",
"operations",
"on",
"the",
"current",
"array",
"are",
"finished",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/ndarray.py#L4007-L4036 |
24,045 | apache/incubator-mxnet | python/mxnet/ndarray/ndarray.py | from_dlpack | def from_dlpack(dlpack):
"""Returns a NDArray backed by a dlpack tensor.
Parameters
----------
dlpack: PyCapsule (the pointer of DLManagedTensor)
input data
Returns
-------
NDArray
a NDArray backed by a dlpack tensor
Examples
--------
>>> x = mx.nd.ones((2,3))
>>> y = mx.nd.to_dlpack_for_read(x)
>>> type(y)
<class 'PyCapsule'>
>>> z = mx.nd.from_dlpack(y)
>>> type(z)
<class 'mxnet.ndarray.ndarray.NDArray'>
>>> z
[[ 1. 1. 1.]
[ 1. 1. 1.]]
<NDArray 2x3 @cpu(0)>
>>> w = mx.nd.to_dlpack_for_write(x)
>>> type(w)
<class 'PyCapsule'>
>>> u = mx.nd.from_dlpack(w)
>>> u += 1
>>> x
[[2. 2. 2.]
[2. 2. 2.]]
<NDArray 2x3 @cpu(0)>
"""
handle = NDArrayHandle()
dlpack = ctypes.py_object(dlpack)
assert ctypes.pythonapi.PyCapsule_IsValid(dlpack, _c_str_dltensor), ValueError(
'Invalid DLPack Tensor. DLTensor capsules can be consumed only once.')
dlpack_handle = ctypes.c_void_p(ctypes.pythonapi.PyCapsule_GetPointer(dlpack, _c_str_dltensor))
check_call(_LIB.MXNDArrayFromDLPack(dlpack_handle, ctypes.byref(handle)))
# Rename PyCapsule (DLPack)
ctypes.pythonapi.PyCapsule_SetName(dlpack, _c_str_used_dltensor)
# delete the deleter of the old dlpack
ctypes.pythonapi.PyCapsule_SetDestructor(dlpack, None)
return NDArray(handle=handle) | python | def from_dlpack(dlpack):
"""Returns a NDArray backed by a dlpack tensor.
Parameters
----------
dlpack: PyCapsule (the pointer of DLManagedTensor)
input data
Returns
-------
NDArray
a NDArray backed by a dlpack tensor
Examples
--------
>>> x = mx.nd.ones((2,3))
>>> y = mx.nd.to_dlpack_for_read(x)
>>> type(y)
<class 'PyCapsule'>
>>> z = mx.nd.from_dlpack(y)
>>> type(z)
<class 'mxnet.ndarray.ndarray.NDArray'>
>>> z
[[ 1. 1. 1.]
[ 1. 1. 1.]]
<NDArray 2x3 @cpu(0)>
>>> w = mx.nd.to_dlpack_for_write(x)
>>> type(w)
<class 'PyCapsule'>
>>> u = mx.nd.from_dlpack(w)
>>> u += 1
>>> x
[[2. 2. 2.]
[2. 2. 2.]]
<NDArray 2x3 @cpu(0)>
"""
handle = NDArrayHandle()
dlpack = ctypes.py_object(dlpack)
assert ctypes.pythonapi.PyCapsule_IsValid(dlpack, _c_str_dltensor), ValueError(
'Invalid DLPack Tensor. DLTensor capsules can be consumed only once.')
dlpack_handle = ctypes.c_void_p(ctypes.pythonapi.PyCapsule_GetPointer(dlpack, _c_str_dltensor))
check_call(_LIB.MXNDArrayFromDLPack(dlpack_handle, ctypes.byref(handle)))
# Rename PyCapsule (DLPack)
ctypes.pythonapi.PyCapsule_SetName(dlpack, _c_str_used_dltensor)
# delete the deleter of the old dlpack
ctypes.pythonapi.PyCapsule_SetDestructor(dlpack, None)
return NDArray(handle=handle) | [
"def",
"from_dlpack",
"(",
"dlpack",
")",
":",
"handle",
"=",
"NDArrayHandle",
"(",
")",
"dlpack",
"=",
"ctypes",
".",
"py_object",
"(",
"dlpack",
")",
"assert",
"ctypes",
".",
"pythonapi",
".",
"PyCapsule_IsValid",
"(",
"dlpack",
",",
"_c_str_dltensor",
")",
",",
"ValueError",
"(",
"'Invalid DLPack Tensor. DLTensor capsules can be consumed only once.'",
")",
"dlpack_handle",
"=",
"ctypes",
".",
"c_void_p",
"(",
"ctypes",
".",
"pythonapi",
".",
"PyCapsule_GetPointer",
"(",
"dlpack",
",",
"_c_str_dltensor",
")",
")",
"check_call",
"(",
"_LIB",
".",
"MXNDArrayFromDLPack",
"(",
"dlpack_handle",
",",
"ctypes",
".",
"byref",
"(",
"handle",
")",
")",
")",
"# Rename PyCapsule (DLPack)",
"ctypes",
".",
"pythonapi",
".",
"PyCapsule_SetName",
"(",
"dlpack",
",",
"_c_str_used_dltensor",
")",
"# delete the deleter of the old dlpack",
"ctypes",
".",
"pythonapi",
".",
"PyCapsule_SetDestructor",
"(",
"dlpack",
",",
"None",
")",
"return",
"NDArray",
"(",
"handle",
"=",
"handle",
")"
] | Returns a NDArray backed by a dlpack tensor.
Parameters
----------
dlpack: PyCapsule (the pointer of DLManagedTensor)
input data
Returns
-------
NDArray
a NDArray backed by a dlpack tensor
Examples
--------
>>> x = mx.nd.ones((2,3))
>>> y = mx.nd.to_dlpack_for_read(x)
>>> type(y)
<class 'PyCapsule'>
>>> z = mx.nd.from_dlpack(y)
>>> type(z)
<class 'mxnet.ndarray.ndarray.NDArray'>
>>> z
[[ 1. 1. 1.]
[ 1. 1. 1.]]
<NDArray 2x3 @cpu(0)>
>>> w = mx.nd.to_dlpack_for_write(x)
>>> type(w)
<class 'PyCapsule'>
>>> u = mx.nd.from_dlpack(w)
>>> u += 1
>>> x
[[2. 2. 2.]
[2. 2. 2.]]
<NDArray 2x3 @cpu(0)> | [
"Returns",
"a",
"NDArray",
"backed",
"by",
"a",
"dlpack",
"tensor",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/ndarray.py#L4070-L4117 |
24,046 | apache/incubator-mxnet | python/mxnet/ndarray/ndarray.py | from_numpy | def from_numpy(ndarray, zero_copy=True):
"""Returns an MXNet's NDArray backed by Numpy's ndarray.
Parameters
----------
ndarray: numpy.ndarray
input data
zero_copy: bool
Whether we use DLPack's zero-copy conversion to convert to MXNet's NDArray.
This is only available for c-contiguous arrays, i.e. array.flags[C_CONTIGUOUS] == True.
Returns
-------
NDArray
a NDArray backed by a dlpack tensor
"""
def _make_manager_ctx(obj):
pyobj = ctypes.py_object(obj)
void_p = ctypes.c_void_p.from_buffer(pyobj)
ctypes.pythonapi.Py_IncRef(pyobj)
return void_p
def _make_dl_tensor(array):
if str(array.dtype) not in DLDataType.TYPE_MAP:
raise ValueError(str(array.dtype) + " is not supported.")
dl_tensor = DLTensor()
dl_tensor.data = array.ctypes.data_as(ctypes.c_void_p)
dl_tensor.ctx = DLContext(1, 0)
dl_tensor.ndim = array.ndim
dl_tensor.dtype = DLDataType.TYPE_MAP[str(array.dtype)]
dl_tensor.shape = array.ctypes.shape_as(ctypes.c_int64)
dl_tensor.strides = None
dl_tensor.byte_offset = 0
return dl_tensor
def _make_dl_managed_tensor(array):
c_obj = DLManagedTensor()
c_obj.dl_tensor = _make_dl_tensor(array)
c_obj.manager_ctx = _make_manager_ctx(array)
c_obj.deleter = dl_managed_tensor_deleter
return c_obj
if not zero_copy:
return array(ndarray, dtype=ndarray.dtype)
if not ndarray.flags['C_CONTIGUOUS']:
raise ValueError("Only c-contiguous arrays are supported for zero-copy")
c_obj = _make_dl_managed_tensor(ndarray)
address = ctypes.addressof(c_obj)
address = ctypes.cast(address, ctypes.c_void_p)
handle = NDArrayHandle()
check_call(_LIB.MXNDArrayFromDLPack(address, ctypes.byref(handle)))
return NDArray(handle=handle) | python | def from_numpy(ndarray, zero_copy=True):
"""Returns an MXNet's NDArray backed by Numpy's ndarray.
Parameters
----------
ndarray: numpy.ndarray
input data
zero_copy: bool
Whether we use DLPack's zero-copy conversion to convert to MXNet's NDArray.
This is only available for c-contiguous arrays, i.e. array.flags[C_CONTIGUOUS] == True.
Returns
-------
NDArray
a NDArray backed by a dlpack tensor
"""
def _make_manager_ctx(obj):
pyobj = ctypes.py_object(obj)
void_p = ctypes.c_void_p.from_buffer(pyobj)
ctypes.pythonapi.Py_IncRef(pyobj)
return void_p
def _make_dl_tensor(array):
if str(array.dtype) not in DLDataType.TYPE_MAP:
raise ValueError(str(array.dtype) + " is not supported.")
dl_tensor = DLTensor()
dl_tensor.data = array.ctypes.data_as(ctypes.c_void_p)
dl_tensor.ctx = DLContext(1, 0)
dl_tensor.ndim = array.ndim
dl_tensor.dtype = DLDataType.TYPE_MAP[str(array.dtype)]
dl_tensor.shape = array.ctypes.shape_as(ctypes.c_int64)
dl_tensor.strides = None
dl_tensor.byte_offset = 0
return dl_tensor
def _make_dl_managed_tensor(array):
c_obj = DLManagedTensor()
c_obj.dl_tensor = _make_dl_tensor(array)
c_obj.manager_ctx = _make_manager_ctx(array)
c_obj.deleter = dl_managed_tensor_deleter
return c_obj
if not zero_copy:
return array(ndarray, dtype=ndarray.dtype)
if not ndarray.flags['C_CONTIGUOUS']:
raise ValueError("Only c-contiguous arrays are supported for zero-copy")
c_obj = _make_dl_managed_tensor(ndarray)
address = ctypes.addressof(c_obj)
address = ctypes.cast(address, ctypes.c_void_p)
handle = NDArrayHandle()
check_call(_LIB.MXNDArrayFromDLPack(address, ctypes.byref(handle)))
return NDArray(handle=handle) | [
"def",
"from_numpy",
"(",
"ndarray",
",",
"zero_copy",
"=",
"True",
")",
":",
"def",
"_make_manager_ctx",
"(",
"obj",
")",
":",
"pyobj",
"=",
"ctypes",
".",
"py_object",
"(",
"obj",
")",
"void_p",
"=",
"ctypes",
".",
"c_void_p",
".",
"from_buffer",
"(",
"pyobj",
")",
"ctypes",
".",
"pythonapi",
".",
"Py_IncRef",
"(",
"pyobj",
")",
"return",
"void_p",
"def",
"_make_dl_tensor",
"(",
"array",
")",
":",
"if",
"str",
"(",
"array",
".",
"dtype",
")",
"not",
"in",
"DLDataType",
".",
"TYPE_MAP",
":",
"raise",
"ValueError",
"(",
"str",
"(",
"array",
".",
"dtype",
")",
"+",
"\" is not supported.\"",
")",
"dl_tensor",
"=",
"DLTensor",
"(",
")",
"dl_tensor",
".",
"data",
"=",
"array",
".",
"ctypes",
".",
"data_as",
"(",
"ctypes",
".",
"c_void_p",
")",
"dl_tensor",
".",
"ctx",
"=",
"DLContext",
"(",
"1",
",",
"0",
")",
"dl_tensor",
".",
"ndim",
"=",
"array",
".",
"ndim",
"dl_tensor",
".",
"dtype",
"=",
"DLDataType",
".",
"TYPE_MAP",
"[",
"str",
"(",
"array",
".",
"dtype",
")",
"]",
"dl_tensor",
".",
"shape",
"=",
"array",
".",
"ctypes",
".",
"shape_as",
"(",
"ctypes",
".",
"c_int64",
")",
"dl_tensor",
".",
"strides",
"=",
"None",
"dl_tensor",
".",
"byte_offset",
"=",
"0",
"return",
"dl_tensor",
"def",
"_make_dl_managed_tensor",
"(",
"array",
")",
":",
"c_obj",
"=",
"DLManagedTensor",
"(",
")",
"c_obj",
".",
"dl_tensor",
"=",
"_make_dl_tensor",
"(",
"array",
")",
"c_obj",
".",
"manager_ctx",
"=",
"_make_manager_ctx",
"(",
"array",
")",
"c_obj",
".",
"deleter",
"=",
"dl_managed_tensor_deleter",
"return",
"c_obj",
"if",
"not",
"zero_copy",
":",
"return",
"array",
"(",
"ndarray",
",",
"dtype",
"=",
"ndarray",
".",
"dtype",
")",
"if",
"not",
"ndarray",
".",
"flags",
"[",
"'C_CONTIGUOUS'",
"]",
":",
"raise",
"ValueError",
"(",
"\"Only c-contiguous arrays are supported for zero-copy\"",
")",
"c_obj",
"=",
"_make_dl_managed_tensor",
"(",
"ndarray",
")",
"address",
"=",
"ctypes",
".",
"addressof",
"(",
"c_obj",
")",
"address",
"=",
"ctypes",
".",
"cast",
"(",
"address",
",",
"ctypes",
".",
"c_void_p",
")",
"handle",
"=",
"NDArrayHandle",
"(",
")",
"check_call",
"(",
"_LIB",
".",
"MXNDArrayFromDLPack",
"(",
"address",
",",
"ctypes",
".",
"byref",
"(",
"handle",
")",
")",
")",
"return",
"NDArray",
"(",
"handle",
"=",
"handle",
")"
] | Returns an MXNet's NDArray backed by Numpy's ndarray.
Parameters
----------
ndarray: numpy.ndarray
input data
zero_copy: bool
Whether we use DLPack's zero-copy conversion to convert to MXNet's NDArray.
This is only available for c-contiguous arrays, i.e. array.flags[C_CONTIGUOUS] == True.
Returns
-------
NDArray
a NDArray backed by a dlpack tensor | [
"Returns",
"an",
"MXNet",
"s",
"NDArray",
"backed",
"by",
"Numpy",
"s",
"ndarray",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/ndarray.py#L4167-L4222 |
24,047 | apache/incubator-mxnet | python/mxnet/ndarray/ndarray.py | NDArray._prepare_value_nd | def _prepare_value_nd(self, value, vshape):
"""Given value and vshape, create an `NDArray` from value with the same
context and dtype as the current one and broadcast it to vshape."""
if isinstance(value, numeric_types):
value_nd = full(shape=vshape, val=value, ctx=self.context, dtype=self.dtype)
elif isinstance(value, NDArray):
value_nd = value.as_in_context(self.context)
if value_nd.dtype != self.dtype:
value_nd = value_nd.astype(self.dtype)
else:
try:
value_nd = array(value, ctx=self.context, dtype=self.dtype)
except:
raise TypeError('NDArray does not support assignment with non-array-like'
' object %s of type %s' % (str(value), str(type(value))))
if value_nd.shape != vshape:
value_nd = value_nd.broadcast_to(vshape)
return value_nd | python | def _prepare_value_nd(self, value, vshape):
"""Given value and vshape, create an `NDArray` from value with the same
context and dtype as the current one and broadcast it to vshape."""
if isinstance(value, numeric_types):
value_nd = full(shape=vshape, val=value, ctx=self.context, dtype=self.dtype)
elif isinstance(value, NDArray):
value_nd = value.as_in_context(self.context)
if value_nd.dtype != self.dtype:
value_nd = value_nd.astype(self.dtype)
else:
try:
value_nd = array(value, ctx=self.context, dtype=self.dtype)
except:
raise TypeError('NDArray does not support assignment with non-array-like'
' object %s of type %s' % (str(value), str(type(value))))
if value_nd.shape != vshape:
value_nd = value_nd.broadcast_to(vshape)
return value_nd | [
"def",
"_prepare_value_nd",
"(",
"self",
",",
"value",
",",
"vshape",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"numeric_types",
")",
":",
"value_nd",
"=",
"full",
"(",
"shape",
"=",
"vshape",
",",
"val",
"=",
"value",
",",
"ctx",
"=",
"self",
".",
"context",
",",
"dtype",
"=",
"self",
".",
"dtype",
")",
"elif",
"isinstance",
"(",
"value",
",",
"NDArray",
")",
":",
"value_nd",
"=",
"value",
".",
"as_in_context",
"(",
"self",
".",
"context",
")",
"if",
"value_nd",
".",
"dtype",
"!=",
"self",
".",
"dtype",
":",
"value_nd",
"=",
"value_nd",
".",
"astype",
"(",
"self",
".",
"dtype",
")",
"else",
":",
"try",
":",
"value_nd",
"=",
"array",
"(",
"value",
",",
"ctx",
"=",
"self",
".",
"context",
",",
"dtype",
"=",
"self",
".",
"dtype",
")",
"except",
":",
"raise",
"TypeError",
"(",
"'NDArray does not support assignment with non-array-like'",
"' object %s of type %s'",
"%",
"(",
"str",
"(",
"value",
")",
",",
"str",
"(",
"type",
"(",
"value",
")",
")",
")",
")",
"if",
"value_nd",
".",
"shape",
"!=",
"vshape",
":",
"value_nd",
"=",
"value_nd",
".",
"broadcast_to",
"(",
"vshape",
")",
"return",
"value_nd"
] | Given value and vshape, create an `NDArray` from value with the same
context and dtype as the current one and broadcast it to vshape. | [
"Given",
"value",
"and",
"vshape",
"create",
"an",
"NDArray",
"from",
"value",
"with",
"the",
"same",
"context",
"and",
"dtype",
"as",
"the",
"current",
"one",
"and",
"broadcast",
"it",
"to",
"vshape",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/ndarray.py#L664-L681 |
24,048 | apache/incubator-mxnet | python/mxnet/ndarray/ndarray.py | NDArray.broadcast_to | def broadcast_to(self, shape):
"""Broadcasts the input array to a new shape.
Broadcasting is only allowed on axes with size 1. The new shape cannot change
the number of dimensions.
For example, you could broadcast from shape (2, 1) to (2, 3), but not from
shape (2, 3) to (2, 3, 3).
Parameters
----------
shape : tuple of int
The shape of the desired array.
Returns
-------
NDArray
A NDArray with the desired shape that is not sharing data with this
array, even if the new shape is the same as ``self.shape``.
Examples
--------
>>> x = mx.nd.arange(0,3).reshape((1,3,1))
>>> x.asnumpy()
array([[[ 0.],
[ 1.],
[ 2.]]], dtype=float32)
>>> y = x.broadcast_to((2,3,3))
>>> y.asnumpy()
array([[[ 0., 0., 0.],
[ 1., 1., 1.],
[ 2., 2., 2.]],
<BLANKLINE>
[[ 0., 0., 0.],
[ 1., 1., 1.],
[ 2., 2., 2.]]], dtype=float32)
"""
cur_shape = self.shape
err_str = 'operands could not be broadcast together with remapped shapes' \
'[original->remapped]: {} and requested shape {}'.format(cur_shape, shape)
if len(shape) < len(cur_shape):
raise ValueError(err_str)
cur_shape = (1,) * (len(shape) - len(cur_shape)) + cur_shape
cur_shape_arr = np.array(cur_shape)
broadcasting_axes = np.nonzero(cur_shape_arr != np.array(shape))
if (cur_shape_arr[broadcasting_axes] != 1).any():
raise ValueError(err_str)
if cur_shape != self.shape:
return op.broadcast_to(self.reshape(cur_shape), shape=shape)
else:
return op.broadcast_to(self, shape=tuple(shape)) | python | def broadcast_to(self, shape):
"""Broadcasts the input array to a new shape.
Broadcasting is only allowed on axes with size 1. The new shape cannot change
the number of dimensions.
For example, you could broadcast from shape (2, 1) to (2, 3), but not from
shape (2, 3) to (2, 3, 3).
Parameters
----------
shape : tuple of int
The shape of the desired array.
Returns
-------
NDArray
A NDArray with the desired shape that is not sharing data with this
array, even if the new shape is the same as ``self.shape``.
Examples
--------
>>> x = mx.nd.arange(0,3).reshape((1,3,1))
>>> x.asnumpy()
array([[[ 0.],
[ 1.],
[ 2.]]], dtype=float32)
>>> y = x.broadcast_to((2,3,3))
>>> y.asnumpy()
array([[[ 0., 0., 0.],
[ 1., 1., 1.],
[ 2., 2., 2.]],
<BLANKLINE>
[[ 0., 0., 0.],
[ 1., 1., 1.],
[ 2., 2., 2.]]], dtype=float32)
"""
cur_shape = self.shape
err_str = 'operands could not be broadcast together with remapped shapes' \
'[original->remapped]: {} and requested shape {}'.format(cur_shape, shape)
if len(shape) < len(cur_shape):
raise ValueError(err_str)
cur_shape = (1,) * (len(shape) - len(cur_shape)) + cur_shape
cur_shape_arr = np.array(cur_shape)
broadcasting_axes = np.nonzero(cur_shape_arr != np.array(shape))
if (cur_shape_arr[broadcasting_axes] != 1).any():
raise ValueError(err_str)
if cur_shape != self.shape:
return op.broadcast_to(self.reshape(cur_shape), shape=shape)
else:
return op.broadcast_to(self, shape=tuple(shape)) | [
"def",
"broadcast_to",
"(",
"self",
",",
"shape",
")",
":",
"cur_shape",
"=",
"self",
".",
"shape",
"err_str",
"=",
"'operands could not be broadcast together with remapped shapes'",
"'[original->remapped]: {} and requested shape {}'",
".",
"format",
"(",
"cur_shape",
",",
"shape",
")",
"if",
"len",
"(",
"shape",
")",
"<",
"len",
"(",
"cur_shape",
")",
":",
"raise",
"ValueError",
"(",
"err_str",
")",
"cur_shape",
"=",
"(",
"1",
",",
")",
"*",
"(",
"len",
"(",
"shape",
")",
"-",
"len",
"(",
"cur_shape",
")",
")",
"+",
"cur_shape",
"cur_shape_arr",
"=",
"np",
".",
"array",
"(",
"cur_shape",
")",
"broadcasting_axes",
"=",
"np",
".",
"nonzero",
"(",
"cur_shape_arr",
"!=",
"np",
".",
"array",
"(",
"shape",
")",
")",
"if",
"(",
"cur_shape_arr",
"[",
"broadcasting_axes",
"]",
"!=",
"1",
")",
".",
"any",
"(",
")",
":",
"raise",
"ValueError",
"(",
"err_str",
")",
"if",
"cur_shape",
"!=",
"self",
".",
"shape",
":",
"return",
"op",
".",
"broadcast_to",
"(",
"self",
".",
"reshape",
"(",
"cur_shape",
")",
",",
"shape",
"=",
"shape",
")",
"else",
":",
"return",
"op",
".",
"broadcast_to",
"(",
"self",
",",
"shape",
"=",
"tuple",
"(",
"shape",
")",
")"
] | Broadcasts the input array to a new shape.
Broadcasting is only allowed on axes with size 1. The new shape cannot change
the number of dimensions.
For example, you could broadcast from shape (2, 1) to (2, 3), but not from
shape (2, 3) to (2, 3, 3).
Parameters
----------
shape : tuple of int
The shape of the desired array.
Returns
-------
NDArray
A NDArray with the desired shape that is not sharing data with this
array, even if the new shape is the same as ``self.shape``.
Examples
--------
>>> x = mx.nd.arange(0,3).reshape((1,3,1))
>>> x.asnumpy()
array([[[ 0.],
[ 1.],
[ 2.]]], dtype=float32)
>>> y = x.broadcast_to((2,3,3))
>>> y.asnumpy()
array([[[ 0., 0., 0.],
[ 1., 1., 1.],
[ 2., 2., 2.]],
<BLANKLINE>
[[ 0., 0., 0.],
[ 1., 1., 1.],
[ 2., 2., 2.]]], dtype=float32) | [
"Broadcasts",
"the",
"input",
"array",
"to",
"a",
"new",
"shape",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/ndarray.py#L1710-L1759 |
24,049 | apache/incubator-mxnet | python/mxnet/ndarray/ndarray.py | NDArray.shape | def shape(self):
"""Tuple of array dimensions.
Examples
--------
>>> x = mx.nd.array([1, 2, 3, 4])
>>> x.shape
(4L,)
>>> y = mx.nd.zeros((2, 3, 4))
>>> y.shape
(2L, 3L, 4L)
"""
ndim = mx_int()
pdata = ctypes.POINTER(mx_int)()
check_call(_LIB.MXNDArrayGetShapeEx(
self.handle, ctypes.byref(ndim), ctypes.byref(pdata)))
if ndim.value == -1:
return None
else:
return tuple(pdata[:ndim.value]) | python | def shape(self):
"""Tuple of array dimensions.
Examples
--------
>>> x = mx.nd.array([1, 2, 3, 4])
>>> x.shape
(4L,)
>>> y = mx.nd.zeros((2, 3, 4))
>>> y.shape
(2L, 3L, 4L)
"""
ndim = mx_int()
pdata = ctypes.POINTER(mx_int)()
check_call(_LIB.MXNDArrayGetShapeEx(
self.handle, ctypes.byref(ndim), ctypes.byref(pdata)))
if ndim.value == -1:
return None
else:
return tuple(pdata[:ndim.value]) | [
"def",
"shape",
"(",
"self",
")",
":",
"ndim",
"=",
"mx_int",
"(",
")",
"pdata",
"=",
"ctypes",
".",
"POINTER",
"(",
"mx_int",
")",
"(",
")",
"check_call",
"(",
"_LIB",
".",
"MXNDArrayGetShapeEx",
"(",
"self",
".",
"handle",
",",
"ctypes",
".",
"byref",
"(",
"ndim",
")",
",",
"ctypes",
".",
"byref",
"(",
"pdata",
")",
")",
")",
"if",
"ndim",
".",
"value",
"==",
"-",
"1",
":",
"return",
"None",
"else",
":",
"return",
"tuple",
"(",
"pdata",
"[",
":",
"ndim",
".",
"value",
"]",
")"
] | Tuple of array dimensions.
Examples
--------
>>> x = mx.nd.array([1, 2, 3, 4])
>>> x.shape
(4L,)
>>> y = mx.nd.zeros((2, 3, 4))
>>> y.shape
(2L, 3L, 4L) | [
"Tuple",
"of",
"array",
"dimensions",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/ndarray.py#L1836-L1855 |
24,050 | apache/incubator-mxnet | python/mxnet/ndarray/ndarray.py | NDArray.context | def context(self):
"""Device context of the array.
Examples
--------
>>> x = mx.nd.array([1, 2, 3, 4])
>>> x.context
cpu(0)
>>> type(x.context)
<class 'mxnet.context.Context'>
>>> y = mx.nd.zeros((2,3), mx.gpu(0))
>>> y.context
gpu(0)
"""
dev_typeid = ctypes.c_int()
dev_id = ctypes.c_int()
check_call(_LIB.MXNDArrayGetContext(
self.handle, ctypes.byref(dev_typeid), ctypes.byref(dev_id)))
return Context(Context.devtype2str[dev_typeid.value], dev_id.value) | python | def context(self):
"""Device context of the array.
Examples
--------
>>> x = mx.nd.array([1, 2, 3, 4])
>>> x.context
cpu(0)
>>> type(x.context)
<class 'mxnet.context.Context'>
>>> y = mx.nd.zeros((2,3), mx.gpu(0))
>>> y.context
gpu(0)
"""
dev_typeid = ctypes.c_int()
dev_id = ctypes.c_int()
check_call(_LIB.MXNDArrayGetContext(
self.handle, ctypes.byref(dev_typeid), ctypes.byref(dev_id)))
return Context(Context.devtype2str[dev_typeid.value], dev_id.value) | [
"def",
"context",
"(",
"self",
")",
":",
"dev_typeid",
"=",
"ctypes",
".",
"c_int",
"(",
")",
"dev_id",
"=",
"ctypes",
".",
"c_int",
"(",
")",
"check_call",
"(",
"_LIB",
".",
"MXNDArrayGetContext",
"(",
"self",
".",
"handle",
",",
"ctypes",
".",
"byref",
"(",
"dev_typeid",
")",
",",
"ctypes",
".",
"byref",
"(",
"dev_id",
")",
")",
")",
"return",
"Context",
"(",
"Context",
".",
"devtype2str",
"[",
"dev_typeid",
".",
"value",
"]",
",",
"dev_id",
".",
"value",
")"
] | Device context of the array.
Examples
--------
>>> x = mx.nd.array([1, 2, 3, 4])
>>> x.context
cpu(0)
>>> type(x.context)
<class 'mxnet.context.Context'>
>>> y = mx.nd.zeros((2,3), mx.gpu(0))
>>> y.context
gpu(0) | [
"Device",
"context",
"of",
"the",
"array",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/ndarray.py#L1879-L1897 |
24,051 | apache/incubator-mxnet | python/mxnet/ndarray/ndarray.py | NDArray.dtype | def dtype(self):
"""Data-type of the array's elements.
Returns
-------
numpy.dtype
This NDArray's data type.
Examples
--------
>>> x = mx.nd.zeros((2,3))
>>> x.dtype
<type 'numpy.float32'>
>>> y = mx.nd.zeros((2,3), dtype='int32')
>>> y.dtype
<type 'numpy.int32'>
"""
mx_dtype = ctypes.c_int()
check_call(_LIB.MXNDArrayGetDType(
self.handle, ctypes.byref(mx_dtype)))
return _DTYPE_MX_TO_NP[mx_dtype.value] | python | def dtype(self):
"""Data-type of the array's elements.
Returns
-------
numpy.dtype
This NDArray's data type.
Examples
--------
>>> x = mx.nd.zeros((2,3))
>>> x.dtype
<type 'numpy.float32'>
>>> y = mx.nd.zeros((2,3), dtype='int32')
>>> y.dtype
<type 'numpy.int32'>
"""
mx_dtype = ctypes.c_int()
check_call(_LIB.MXNDArrayGetDType(
self.handle, ctypes.byref(mx_dtype)))
return _DTYPE_MX_TO_NP[mx_dtype.value] | [
"def",
"dtype",
"(",
"self",
")",
":",
"mx_dtype",
"=",
"ctypes",
".",
"c_int",
"(",
")",
"check_call",
"(",
"_LIB",
".",
"MXNDArrayGetDType",
"(",
"self",
".",
"handle",
",",
"ctypes",
".",
"byref",
"(",
"mx_dtype",
")",
")",
")",
"return",
"_DTYPE_MX_TO_NP",
"[",
"mx_dtype",
".",
"value",
"]"
] | Data-type of the array's elements.
Returns
-------
numpy.dtype
This NDArray's data type.
Examples
--------
>>> x = mx.nd.zeros((2,3))
>>> x.dtype
<type 'numpy.float32'>
>>> y = mx.nd.zeros((2,3), dtype='int32')
>>> y.dtype
<type 'numpy.int32'> | [
"Data",
"-",
"type",
"of",
"the",
"array",
"s",
"elements",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/ndarray.py#L1900-L1920 |
24,052 | apache/incubator-mxnet | python/mxnet/ndarray/ndarray.py | NDArray.asnumpy | def asnumpy(self):
"""Returns a ``numpy.ndarray`` object with value copied from this array.
Examples
--------
>>> x = mx.nd.ones((2,3))
>>> y = x.asnumpy()
>>> type(y)
<type 'numpy.ndarray'>
>>> y
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> z = mx.nd.ones((2,3), dtype='int32')
>>> z.asnumpy()
array([[1, 1, 1],
[1, 1, 1]], dtype=int32)
"""
data = np.empty(self.shape, dtype=self.dtype)
check_call(_LIB.MXNDArraySyncCopyToCPU(
self.handle,
data.ctypes.data_as(ctypes.c_void_p),
ctypes.c_size_t(data.size)))
return data | python | def asnumpy(self):
"""Returns a ``numpy.ndarray`` object with value copied from this array.
Examples
--------
>>> x = mx.nd.ones((2,3))
>>> y = x.asnumpy()
>>> type(y)
<type 'numpy.ndarray'>
>>> y
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> z = mx.nd.ones((2,3), dtype='int32')
>>> z.asnumpy()
array([[1, 1, 1],
[1, 1, 1]], dtype=int32)
"""
data = np.empty(self.shape, dtype=self.dtype)
check_call(_LIB.MXNDArraySyncCopyToCPU(
self.handle,
data.ctypes.data_as(ctypes.c_void_p),
ctypes.c_size_t(data.size)))
return data | [
"def",
"asnumpy",
"(",
"self",
")",
":",
"data",
"=",
"np",
".",
"empty",
"(",
"self",
".",
"shape",
",",
"dtype",
"=",
"self",
".",
"dtype",
")",
"check_call",
"(",
"_LIB",
".",
"MXNDArraySyncCopyToCPU",
"(",
"self",
".",
"handle",
",",
"data",
".",
"ctypes",
".",
"data_as",
"(",
"ctypes",
".",
"c_void_p",
")",
",",
"ctypes",
".",
"c_size_t",
"(",
"data",
".",
"size",
")",
")",
")",
"return",
"data"
] | Returns a ``numpy.ndarray`` object with value copied from this array.
Examples
--------
>>> x = mx.nd.ones((2,3))
>>> y = x.asnumpy()
>>> type(y)
<type 'numpy.ndarray'>
>>> y
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> z = mx.nd.ones((2,3), dtype='int32')
>>> z.asnumpy()
array([[1, 1, 1],
[1, 1, 1]], dtype=int32) | [
"Returns",
"a",
"numpy",
".",
"ndarray",
"object",
"with",
"value",
"copied",
"from",
"this",
"array",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/ndarray.py#L1974-L1996 |
24,053 | apache/incubator-mxnet | python/mxnet/ndarray/ndarray.py | NDArray.astype | def astype(self, dtype, copy=True):
"""Returns a copy of the array after casting to a specified type.
Parameters
----------
dtype : numpy.dtype or str
The type of the returned array.
copy : bool
Default `True`. By default, astype always returns a newly
allocated ndarray on the same context. If this is set to
`False`, and the dtype requested is the same as the ndarray's
dtype, the ndarray is returned instead of a copy.
Returns
-------
NDArray, CSRNDArray or RowSparseNDArray
The copied array after casting to the specified type, or
the same array if copy=False and dtype is the same as the input
array.
Examples
--------
>>> x = mx.nd.zeros((2,3), dtype='float32')
>>> y = x.astype('int32')
>>> y.dtype
<type 'numpy.int32'>
"""
if not copy and np.dtype(dtype) == self.dtype:
return self
res = empty(self.shape, ctx=self.context, dtype=dtype)
self.copyto(res)
return res | python | def astype(self, dtype, copy=True):
"""Returns a copy of the array after casting to a specified type.
Parameters
----------
dtype : numpy.dtype or str
The type of the returned array.
copy : bool
Default `True`. By default, astype always returns a newly
allocated ndarray on the same context. If this is set to
`False`, and the dtype requested is the same as the ndarray's
dtype, the ndarray is returned instead of a copy.
Returns
-------
NDArray, CSRNDArray or RowSparseNDArray
The copied array after casting to the specified type, or
the same array if copy=False and dtype is the same as the input
array.
Examples
--------
>>> x = mx.nd.zeros((2,3), dtype='float32')
>>> y = x.astype('int32')
>>> y.dtype
<type 'numpy.int32'>
"""
if not copy and np.dtype(dtype) == self.dtype:
return self
res = empty(self.shape, ctx=self.context, dtype=dtype)
self.copyto(res)
return res | [
"def",
"astype",
"(",
"self",
",",
"dtype",
",",
"copy",
"=",
"True",
")",
":",
"if",
"not",
"copy",
"and",
"np",
".",
"dtype",
"(",
"dtype",
")",
"==",
"self",
".",
"dtype",
":",
"return",
"self",
"res",
"=",
"empty",
"(",
"self",
".",
"shape",
",",
"ctx",
"=",
"self",
".",
"context",
",",
"dtype",
"=",
"dtype",
")",
"self",
".",
"copyto",
"(",
"res",
")",
"return",
"res"
] | Returns a copy of the array after casting to a specified type.
Parameters
----------
dtype : numpy.dtype or str
The type of the returned array.
copy : bool
Default `True`. By default, astype always returns a newly
allocated ndarray on the same context. If this is set to
`False`, and the dtype requested is the same as the ndarray's
dtype, the ndarray is returned instead of a copy.
Returns
-------
NDArray, CSRNDArray or RowSparseNDArray
The copied array after casting to the specified type, or
the same array if copy=False and dtype is the same as the input
array.
Examples
--------
>>> x = mx.nd.zeros((2,3), dtype='float32')
>>> y = x.astype('int32')
>>> y.dtype
<type 'numpy.int32'> | [
"Returns",
"a",
"copy",
"of",
"the",
"array",
"after",
"casting",
"to",
"a",
"specified",
"type",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/ndarray.py#L2015-L2048 |
24,054 | apache/incubator-mxnet | python/mxnet/ndarray/ndarray.py | NDArray.as_in_context | def as_in_context(self, context):
"""Returns an array on the target device with the same value as this array.
If the target context is the same as ``self.context``, then ``self`` is
returned. Otherwise, a copy is made.
Parameters
----------
context : Context
The target context.
Returns
-------
NDArray, CSRNDArray or RowSparseNDArray
The target array.
Examples
--------
>>> x = mx.nd.ones((2,3))
>>> y = x.as_in_context(mx.cpu())
>>> y is x
True
>>> z = x.as_in_context(mx.gpu(0))
>>> z is x
False
"""
if self.context == context:
return self
return self.copyto(context) | python | def as_in_context(self, context):
"""Returns an array on the target device with the same value as this array.
If the target context is the same as ``self.context``, then ``self`` is
returned. Otherwise, a copy is made.
Parameters
----------
context : Context
The target context.
Returns
-------
NDArray, CSRNDArray or RowSparseNDArray
The target array.
Examples
--------
>>> x = mx.nd.ones((2,3))
>>> y = x.as_in_context(mx.cpu())
>>> y is x
True
>>> z = x.as_in_context(mx.gpu(0))
>>> z is x
False
"""
if self.context == context:
return self
return self.copyto(context) | [
"def",
"as_in_context",
"(",
"self",
",",
"context",
")",
":",
"if",
"self",
".",
"context",
"==",
"context",
":",
"return",
"self",
"return",
"self",
".",
"copyto",
"(",
"context",
")"
] | Returns an array on the target device with the same value as this array.
If the target context is the same as ``self.context``, then ``self`` is
returned. Otherwise, a copy is made.
Parameters
----------
context : Context
The target context.
Returns
-------
NDArray, CSRNDArray or RowSparseNDArray
The target array.
Examples
--------
>>> x = mx.nd.ones((2,3))
>>> y = x.as_in_context(mx.cpu())
>>> y is x
True
>>> z = x.as_in_context(mx.gpu(0))
>>> z is x
False | [
"Returns",
"an",
"array",
"on",
"the",
"target",
"device",
"with",
"the",
"same",
"value",
"as",
"this",
"array",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/ndarray.py#L2114-L2143 |
24,055 | apache/incubator-mxnet | python/mxnet/ndarray/ndarray.py | NDArray.attach_grad | def attach_grad(self, grad_req='write', stype=None):
"""Attach a gradient buffer to this NDArray, so that `backward`
can compute gradient with respect to it.
Parameters
----------
grad_req : {'write', 'add', 'null'}
How gradient will be accumulated.
- 'write': gradient will be overwritten on every backward.
- 'add': gradient will be added to existing value on every backward.
- 'null': do not compute gradient for this NDArray.
stype : str, optional
The storage type of the gradient array. Defaults to the same stype of this NDArray.
"""
from . import zeros as _zeros
if stype is not None:
grad = _zeros(self.shape, stype=stype)
else:
grad = op.zeros_like(self) # pylint: disable=undefined-variable
grad_req = _GRAD_REQ_MAP[grad_req]
check_call(_LIB.MXAutogradMarkVariables(
1, ctypes.pointer(self.handle),
ctypes.pointer(mx_uint(grad_req)),
ctypes.pointer(grad.handle))) | python | def attach_grad(self, grad_req='write', stype=None):
"""Attach a gradient buffer to this NDArray, so that `backward`
can compute gradient with respect to it.
Parameters
----------
grad_req : {'write', 'add', 'null'}
How gradient will be accumulated.
- 'write': gradient will be overwritten on every backward.
- 'add': gradient will be added to existing value on every backward.
- 'null': do not compute gradient for this NDArray.
stype : str, optional
The storage type of the gradient array. Defaults to the same stype of this NDArray.
"""
from . import zeros as _zeros
if stype is not None:
grad = _zeros(self.shape, stype=stype)
else:
grad = op.zeros_like(self) # pylint: disable=undefined-variable
grad_req = _GRAD_REQ_MAP[grad_req]
check_call(_LIB.MXAutogradMarkVariables(
1, ctypes.pointer(self.handle),
ctypes.pointer(mx_uint(grad_req)),
ctypes.pointer(grad.handle))) | [
"def",
"attach_grad",
"(",
"self",
",",
"grad_req",
"=",
"'write'",
",",
"stype",
"=",
"None",
")",
":",
"from",
".",
"import",
"zeros",
"as",
"_zeros",
"if",
"stype",
"is",
"not",
"None",
":",
"grad",
"=",
"_zeros",
"(",
"self",
".",
"shape",
",",
"stype",
"=",
"stype",
")",
"else",
":",
"grad",
"=",
"op",
".",
"zeros_like",
"(",
"self",
")",
"# pylint: disable=undefined-variable",
"grad_req",
"=",
"_GRAD_REQ_MAP",
"[",
"grad_req",
"]",
"check_call",
"(",
"_LIB",
".",
"MXAutogradMarkVariables",
"(",
"1",
",",
"ctypes",
".",
"pointer",
"(",
"self",
".",
"handle",
")",
",",
"ctypes",
".",
"pointer",
"(",
"mx_uint",
"(",
"grad_req",
")",
")",
",",
"ctypes",
".",
"pointer",
"(",
"grad",
".",
"handle",
")",
")",
")"
] | Attach a gradient buffer to this NDArray, so that `backward`
can compute gradient with respect to it.
Parameters
----------
grad_req : {'write', 'add', 'null'}
How gradient will be accumulated.
- 'write': gradient will be overwritten on every backward.
- 'add': gradient will be added to existing value on every backward.
- 'null': do not compute gradient for this NDArray.
stype : str, optional
The storage type of the gradient array. Defaults to the same stype of this NDArray. | [
"Attach",
"a",
"gradient",
"buffer",
"to",
"this",
"NDArray",
"so",
"that",
"backward",
"can",
"compute",
"gradient",
"with",
"respect",
"to",
"it",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/ndarray.py#L2145-L2168 |
24,056 | apache/incubator-mxnet | python/mxnet/ndarray/ndarray.py | NDArray.grad | def grad(self):
"""Returns gradient buffer attached to this NDArray."""
from . import _ndarray_cls
hdl = NDArrayHandle()
check_call(_LIB.MXNDArrayGetGrad(self.handle, ctypes.byref(hdl)))
if hdl.value is None:
return None
return _ndarray_cls(hdl) | python | def grad(self):
"""Returns gradient buffer attached to this NDArray."""
from . import _ndarray_cls
hdl = NDArrayHandle()
check_call(_LIB.MXNDArrayGetGrad(self.handle, ctypes.byref(hdl)))
if hdl.value is None:
return None
return _ndarray_cls(hdl) | [
"def",
"grad",
"(",
"self",
")",
":",
"from",
".",
"import",
"_ndarray_cls",
"hdl",
"=",
"NDArrayHandle",
"(",
")",
"check_call",
"(",
"_LIB",
".",
"MXNDArrayGetGrad",
"(",
"self",
".",
"handle",
",",
"ctypes",
".",
"byref",
"(",
"hdl",
")",
")",
")",
"if",
"hdl",
".",
"value",
"is",
"None",
":",
"return",
"None",
"return",
"_ndarray_cls",
"(",
"hdl",
")"
] | Returns gradient buffer attached to this NDArray. | [
"Returns",
"gradient",
"buffer",
"attached",
"to",
"this",
"NDArray",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/ndarray.py#L2171-L2178 |
24,057 | apache/incubator-mxnet | python/mxnet/ndarray/ndarray.py | NDArray.detach | def detach(self):
"""Returns a new NDArray, detached from the current graph."""
from . import _ndarray_cls
hdl = NDArrayHandle()
check_call(_LIB.MXNDArrayDetach(self.handle, ctypes.byref(hdl)))
return _ndarray_cls(hdl) | python | def detach(self):
"""Returns a new NDArray, detached from the current graph."""
from . import _ndarray_cls
hdl = NDArrayHandle()
check_call(_LIB.MXNDArrayDetach(self.handle, ctypes.byref(hdl)))
return _ndarray_cls(hdl) | [
"def",
"detach",
"(",
"self",
")",
":",
"from",
".",
"import",
"_ndarray_cls",
"hdl",
"=",
"NDArrayHandle",
"(",
")",
"check_call",
"(",
"_LIB",
".",
"MXNDArrayDetach",
"(",
"self",
".",
"handle",
",",
"ctypes",
".",
"byref",
"(",
"hdl",
")",
")",
")",
"return",
"_ndarray_cls",
"(",
"hdl",
")"
] | Returns a new NDArray, detached from the current graph. | [
"Returns",
"a",
"new",
"NDArray",
"detached",
"from",
"the",
"current",
"graph",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/ndarray.py#L2180-L2185 |
24,058 | apache/incubator-mxnet | python/mxnet/ndarray/ndarray.py | NDArray.backward | def backward(self, out_grad=None, retain_graph=False, train_mode=True):
"""Compute the gradients of this NDArray w.r.t variables.
Parameters
----------
out_grad : NDArray, optional
Gradient with respect to head.
retain_graph : bool, optional
Whether to retain the computaion graph for another backward
pass on the same graph. By default the computaion history
is cleared.
train_mode : bool, optional
Whether to compute gradient for training or inference.
"""
if out_grad is None:
ograd_handles = [NDArrayHandle(0)]
else:
ograd_handles = [out_grad.handle]
check_call(_LIB.MXAutogradBackwardEx(
1, c_handle_array([self]),
c_array(NDArrayHandle, ograd_handles),
0,
ctypes.c_void_p(0),
ctypes.c_int(retain_graph),
ctypes.c_int(0),
ctypes.c_int(train_mode),
ctypes.c_void_p(0),
ctypes.c_void_p(0))) | python | def backward(self, out_grad=None, retain_graph=False, train_mode=True):
"""Compute the gradients of this NDArray w.r.t variables.
Parameters
----------
out_grad : NDArray, optional
Gradient with respect to head.
retain_graph : bool, optional
Whether to retain the computaion graph for another backward
pass on the same graph. By default the computaion history
is cleared.
train_mode : bool, optional
Whether to compute gradient for training or inference.
"""
if out_grad is None:
ograd_handles = [NDArrayHandle(0)]
else:
ograd_handles = [out_grad.handle]
check_call(_LIB.MXAutogradBackwardEx(
1, c_handle_array([self]),
c_array(NDArrayHandle, ograd_handles),
0,
ctypes.c_void_p(0),
ctypes.c_int(retain_graph),
ctypes.c_int(0),
ctypes.c_int(train_mode),
ctypes.c_void_p(0),
ctypes.c_void_p(0))) | [
"def",
"backward",
"(",
"self",
",",
"out_grad",
"=",
"None",
",",
"retain_graph",
"=",
"False",
",",
"train_mode",
"=",
"True",
")",
":",
"if",
"out_grad",
"is",
"None",
":",
"ograd_handles",
"=",
"[",
"NDArrayHandle",
"(",
"0",
")",
"]",
"else",
":",
"ograd_handles",
"=",
"[",
"out_grad",
".",
"handle",
"]",
"check_call",
"(",
"_LIB",
".",
"MXAutogradBackwardEx",
"(",
"1",
",",
"c_handle_array",
"(",
"[",
"self",
"]",
")",
",",
"c_array",
"(",
"NDArrayHandle",
",",
"ograd_handles",
")",
",",
"0",
",",
"ctypes",
".",
"c_void_p",
"(",
"0",
")",
",",
"ctypes",
".",
"c_int",
"(",
"retain_graph",
")",
",",
"ctypes",
".",
"c_int",
"(",
"0",
")",
",",
"ctypes",
".",
"c_int",
"(",
"train_mode",
")",
",",
"ctypes",
".",
"c_void_p",
"(",
"0",
")",
",",
"ctypes",
".",
"c_void_p",
"(",
"0",
")",
")",
")"
] | Compute the gradients of this NDArray w.r.t variables.
Parameters
----------
out_grad : NDArray, optional
Gradient with respect to head.
retain_graph : bool, optional
Whether to retain the computaion graph for another backward
pass on the same graph. By default the computaion history
is cleared.
train_mode : bool, optional
Whether to compute gradient for training or inference. | [
"Compute",
"the",
"gradients",
"of",
"this",
"NDArray",
"w",
".",
"r",
".",
"t",
"variables",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/ndarray.py#L2187-L2215 |
24,059 | apache/incubator-mxnet | example/gluon/lipnet/utils/align.py | Align.build | def build(self, align_path):
"""
Build the align array
"""
file = open(align_path, 'r')
lines = file.readlines()
file.close()
# words: list([op, ed, word])
words = []
for line in lines:
_op, _ed, word = line.strip().split(' ')
if word not in Align.skip_list:
words.append((int(_op), int(_ed), word))
self.words = words
self.n_words = len(words)
self.sentence_str = " ".join([w[2] for w in self.words])
self.sentence_length = len(self.sentence_str) | python | def build(self, align_path):
"""
Build the align array
"""
file = open(align_path, 'r')
lines = file.readlines()
file.close()
# words: list([op, ed, word])
words = []
for line in lines:
_op, _ed, word = line.strip().split(' ')
if word not in Align.skip_list:
words.append((int(_op), int(_ed), word))
self.words = words
self.n_words = len(words)
self.sentence_str = " ".join([w[2] for w in self.words])
self.sentence_length = len(self.sentence_str) | [
"def",
"build",
"(",
"self",
",",
"align_path",
")",
":",
"file",
"=",
"open",
"(",
"align_path",
",",
"'r'",
")",
"lines",
"=",
"file",
".",
"readlines",
"(",
")",
"file",
".",
"close",
"(",
")",
"# words: list([op, ed, word])",
"words",
"=",
"[",
"]",
"for",
"line",
"in",
"lines",
":",
"_op",
",",
"_ed",
",",
"word",
"=",
"line",
".",
"strip",
"(",
")",
".",
"split",
"(",
"' '",
")",
"if",
"word",
"not",
"in",
"Align",
".",
"skip_list",
":",
"words",
".",
"append",
"(",
"(",
"int",
"(",
"_op",
")",
",",
"int",
"(",
"_ed",
")",
",",
"word",
")",
")",
"self",
".",
"words",
"=",
"words",
"self",
".",
"n_words",
"=",
"len",
"(",
"words",
")",
"self",
".",
"sentence_str",
"=",
"\" \"",
".",
"join",
"(",
"[",
"w",
"[",
"2",
"]",
"for",
"w",
"in",
"self",
".",
"words",
"]",
")",
"self",
".",
"sentence_length",
"=",
"len",
"(",
"self",
".",
"sentence_str",
")"
] | Build the align array | [
"Build",
"the",
"align",
"array"
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/gluon/lipnet/utils/align.py#L36-L52 |
24,060 | apache/incubator-mxnet | example/gluon/lipnet/utils/align.py | Align.word_frame_pos | def word_frame_pos(self, _id):
"""
Get the position of words
"""
left = int(self.words[_id][0]/1000)
right = max(left+1, int(self.words[_id][1]/1000))
return (left, right) | python | def word_frame_pos(self, _id):
"""
Get the position of words
"""
left = int(self.words[_id][0]/1000)
right = max(left+1, int(self.words[_id][1]/1000))
return (left, right) | [
"def",
"word_frame_pos",
"(",
"self",
",",
"_id",
")",
":",
"left",
"=",
"int",
"(",
"self",
".",
"words",
"[",
"_id",
"]",
"[",
"0",
"]",
"/",
"1000",
")",
"right",
"=",
"max",
"(",
"left",
"+",
"1",
",",
"int",
"(",
"self",
".",
"words",
"[",
"_id",
"]",
"[",
"1",
"]",
"/",
"1000",
")",
")",
"return",
"(",
"left",
",",
"right",
")"
] | Get the position of words | [
"Get",
"the",
"position",
"of",
"words"
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/gluon/lipnet/utils/align.py#L77-L83 |
24,061 | apache/incubator-mxnet | example/rnn/large_word_lm/custom_module.py | CustomModule.prepare_sparse_params | def prepare_sparse_params(self, param_rowids):
'''Prepares the module for processing a data batch by pulling row_sparse
parameters from kvstore to all devices based on rowids.
Parameters
----------
param_rowids : dict of str to NDArray of list of NDArrays
'''
if not self._kvstore:
return
assert(isinstance(param_rowids, dict))
for param_name, rowids in param_rowids.items():
if isinstance(rowids, (tuple, list)):
rowids_1d = []
for r in rowids:
rowids_1d.append(r.reshape((-1,)).astype(np.int64))
rowid = mx.nd.concat(*rowids_1d, dim=0)
else:
rowid = rowids
param_idx = self._exec_group.param_names.index(param_name)
param_val = self._exec_group.param_arrays[param_idx]
self._kvstore.row_sparse_pull(param_name, param_val, row_ids=rowid,
priority=-param_idx) | python | def prepare_sparse_params(self, param_rowids):
'''Prepares the module for processing a data batch by pulling row_sparse
parameters from kvstore to all devices based on rowids.
Parameters
----------
param_rowids : dict of str to NDArray of list of NDArrays
'''
if not self._kvstore:
return
assert(isinstance(param_rowids, dict))
for param_name, rowids in param_rowids.items():
if isinstance(rowids, (tuple, list)):
rowids_1d = []
for r in rowids:
rowids_1d.append(r.reshape((-1,)).astype(np.int64))
rowid = mx.nd.concat(*rowids_1d, dim=0)
else:
rowid = rowids
param_idx = self._exec_group.param_names.index(param_name)
param_val = self._exec_group.param_arrays[param_idx]
self._kvstore.row_sparse_pull(param_name, param_val, row_ids=rowid,
priority=-param_idx) | [
"def",
"prepare_sparse_params",
"(",
"self",
",",
"param_rowids",
")",
":",
"if",
"not",
"self",
".",
"_kvstore",
":",
"return",
"assert",
"(",
"isinstance",
"(",
"param_rowids",
",",
"dict",
")",
")",
"for",
"param_name",
",",
"rowids",
"in",
"param_rowids",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"rowids",
",",
"(",
"tuple",
",",
"list",
")",
")",
":",
"rowids_1d",
"=",
"[",
"]",
"for",
"r",
"in",
"rowids",
":",
"rowids_1d",
".",
"append",
"(",
"r",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
")",
")",
".",
"astype",
"(",
"np",
".",
"int64",
")",
")",
"rowid",
"=",
"mx",
".",
"nd",
".",
"concat",
"(",
"*",
"rowids_1d",
",",
"dim",
"=",
"0",
")",
"else",
":",
"rowid",
"=",
"rowids",
"param_idx",
"=",
"self",
".",
"_exec_group",
".",
"param_names",
".",
"index",
"(",
"param_name",
")",
"param_val",
"=",
"self",
".",
"_exec_group",
".",
"param_arrays",
"[",
"param_idx",
"]",
"self",
".",
"_kvstore",
".",
"row_sparse_pull",
"(",
"param_name",
",",
"param_val",
",",
"row_ids",
"=",
"rowid",
",",
"priority",
"=",
"-",
"param_idx",
")"
] | Prepares the module for processing a data batch by pulling row_sparse
parameters from kvstore to all devices based on rowids.
Parameters
----------
param_rowids : dict of str to NDArray of list of NDArrays | [
"Prepares",
"the",
"module",
"for",
"processing",
"a",
"data",
"batch",
"by",
"pulling",
"row_sparse",
"parameters",
"from",
"kvstore",
"to",
"all",
"devices",
"based",
"on",
"rowids",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/rnn/large_word_lm/custom_module.py#L38-L60 |
24,062 | apache/incubator-mxnet | example/rnn/large_word_lm/custom_module.py | CustomModule.rescale_grad | def rescale_grad(self, scale=None, param_name=None):
""" Rescale the gradient of provided parameters by a certain scale """
if scale is None or param_name is None:
return
param_idx = self._exec_group.param_names.index(param_name)
grad_vals = self._exec_group.grad_arrays[param_idx]
for grad in grad_vals:
grad[:] *= scale | python | def rescale_grad(self, scale=None, param_name=None):
""" Rescale the gradient of provided parameters by a certain scale """
if scale is None or param_name is None:
return
param_idx = self._exec_group.param_names.index(param_name)
grad_vals = self._exec_group.grad_arrays[param_idx]
for grad in grad_vals:
grad[:] *= scale | [
"def",
"rescale_grad",
"(",
"self",
",",
"scale",
"=",
"None",
",",
"param_name",
"=",
"None",
")",
":",
"if",
"scale",
"is",
"None",
"or",
"param_name",
"is",
"None",
":",
"return",
"param_idx",
"=",
"self",
".",
"_exec_group",
".",
"param_names",
".",
"index",
"(",
"param_name",
")",
"grad_vals",
"=",
"self",
".",
"_exec_group",
".",
"grad_arrays",
"[",
"param_idx",
"]",
"for",
"grad",
"in",
"grad_vals",
":",
"grad",
"[",
":",
"]",
"*=",
"scale"
] | Rescale the gradient of provided parameters by a certain scale | [
"Rescale",
"the",
"gradient",
"of",
"provided",
"parameters",
"by",
"a",
"certain",
"scale"
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/rnn/large_word_lm/custom_module.py#L176-L183 |
24,063 | apache/incubator-mxnet | python/mxnet/symbol_doc.py | _build_doc | def _build_doc(func_name,
desc,
arg_names,
arg_types,
arg_desc,
key_var_num_args=None,
ret_type=None):
"""Build docstring for symbolic functions."""
param_str = _build_param_doc(arg_names, arg_types, arg_desc)
if key_var_num_args:
desc += '\nThis function support variable length of positional input.'
doc_str = ('%s\n\n' +
'%s\n' +
'name : string, optional.\n' +
' Name of the resulting symbol.\n\n' +
'Returns\n' +
'-------\n' +
'Symbol\n' +
' The result symbol.')
doc_str = doc_str % (desc, param_str)
extra_doc = "\n" + '\n'.join([x.__doc__ for x in type.__subclasses__(SymbolDoc)
if x.__name__ == '%sDoc' % func_name])
doc_str += _re.sub(_re.compile(" "), "", extra_doc)
doc_str = _re.sub('NDArray-or-Symbol', 'Symbol', doc_str)
return doc_str | python | def _build_doc(func_name,
desc,
arg_names,
arg_types,
arg_desc,
key_var_num_args=None,
ret_type=None):
"""Build docstring for symbolic functions."""
param_str = _build_param_doc(arg_names, arg_types, arg_desc)
if key_var_num_args:
desc += '\nThis function support variable length of positional input.'
doc_str = ('%s\n\n' +
'%s\n' +
'name : string, optional.\n' +
' Name of the resulting symbol.\n\n' +
'Returns\n' +
'-------\n' +
'Symbol\n' +
' The result symbol.')
doc_str = doc_str % (desc, param_str)
extra_doc = "\n" + '\n'.join([x.__doc__ for x in type.__subclasses__(SymbolDoc)
if x.__name__ == '%sDoc' % func_name])
doc_str += _re.sub(_re.compile(" "), "", extra_doc)
doc_str = _re.sub('NDArray-or-Symbol', 'Symbol', doc_str)
return doc_str | [
"def",
"_build_doc",
"(",
"func_name",
",",
"desc",
",",
"arg_names",
",",
"arg_types",
",",
"arg_desc",
",",
"key_var_num_args",
"=",
"None",
",",
"ret_type",
"=",
"None",
")",
":",
"param_str",
"=",
"_build_param_doc",
"(",
"arg_names",
",",
"arg_types",
",",
"arg_desc",
")",
"if",
"key_var_num_args",
":",
"desc",
"+=",
"'\\nThis function support variable length of positional input.'",
"doc_str",
"=",
"(",
"'%s\\n\\n'",
"+",
"'%s\\n'",
"+",
"'name : string, optional.\\n'",
"+",
"' Name of the resulting symbol.\\n\\n'",
"+",
"'Returns\\n'",
"+",
"'-------\\n'",
"+",
"'Symbol\\n'",
"+",
"' The result symbol.'",
")",
"doc_str",
"=",
"doc_str",
"%",
"(",
"desc",
",",
"param_str",
")",
"extra_doc",
"=",
"\"\\n\"",
"+",
"'\\n'",
".",
"join",
"(",
"[",
"x",
".",
"__doc__",
"for",
"x",
"in",
"type",
".",
"__subclasses__",
"(",
"SymbolDoc",
")",
"if",
"x",
".",
"__name__",
"==",
"'%sDoc'",
"%",
"func_name",
"]",
")",
"doc_str",
"+=",
"_re",
".",
"sub",
"(",
"_re",
".",
"compile",
"(",
"\" \"",
")",
",",
"\"\"",
",",
"extra_doc",
")",
"doc_str",
"=",
"_re",
".",
"sub",
"(",
"'NDArray-or-Symbol'",
",",
"'Symbol'",
",",
"doc_str",
")",
"return",
"doc_str"
] | Build docstring for symbolic functions. | [
"Build",
"docstring",
"for",
"symbolic",
"functions",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/symbol_doc.py#L212-L236 |
24,064 | apache/incubator-mxnet | python/mxnet/symbol_doc.py | SymbolDoc.get_output_shape | def get_output_shape(sym, **input_shapes):
"""Get user friendly information of the output shapes."""
_, s_outputs, _ = sym.infer_shape(**input_shapes)
return dict(zip(sym.list_outputs(), s_outputs)) | python | def get_output_shape(sym, **input_shapes):
"""Get user friendly information of the output shapes."""
_, s_outputs, _ = sym.infer_shape(**input_shapes)
return dict(zip(sym.list_outputs(), s_outputs)) | [
"def",
"get_output_shape",
"(",
"sym",
",",
"*",
"*",
"input_shapes",
")",
":",
"_",
",",
"s_outputs",
",",
"_",
"=",
"sym",
".",
"infer_shape",
"(",
"*",
"*",
"input_shapes",
")",
"return",
"dict",
"(",
"zip",
"(",
"sym",
".",
"list_outputs",
"(",
")",
",",
"s_outputs",
")",
")"
] | Get user friendly information of the output shapes. | [
"Get",
"user",
"friendly",
"information",
"of",
"the",
"output",
"shapes",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/symbol_doc.py#L56-L59 |
24,065 | apache/incubator-mxnet | python/mxnet/context.py | num_gpus | def num_gpus():
"""Query CUDA for the number of GPUs present.
Raises
------
Will raise an exception on any CUDA error.
Returns
-------
count : int
The number of GPUs.
"""
count = ctypes.c_int()
check_call(_LIB.MXGetGPUCount(ctypes.byref(count)))
return count.value | python | def num_gpus():
"""Query CUDA for the number of GPUs present.
Raises
------
Will raise an exception on any CUDA error.
Returns
-------
count : int
The number of GPUs.
"""
count = ctypes.c_int()
check_call(_LIB.MXGetGPUCount(ctypes.byref(count)))
return count.value | [
"def",
"num_gpus",
"(",
")",
":",
"count",
"=",
"ctypes",
".",
"c_int",
"(",
")",
"check_call",
"(",
"_LIB",
".",
"MXGetGPUCount",
"(",
"ctypes",
".",
"byref",
"(",
"count",
")",
")",
")",
"return",
"count",
".",
"value"
] | Query CUDA for the number of GPUs present.
Raises
------
Will raise an exception on any CUDA error.
Returns
-------
count : int
The number of GPUs. | [
"Query",
"CUDA",
"for",
"the",
"number",
"of",
"GPUs",
"present",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/context.py#L244-L259 |
24,066 | apache/incubator-mxnet | python/mxnet/context.py | gpu_memory_info | def gpu_memory_info(device_id=0):
"""Query CUDA for the free and total bytes of GPU global memory.
Parameters
----------
device_id : int, optional
The device id of the GPU device.
Raises
------
Will raise an exception on any CUDA error.
Returns
-------
(free, total) : (int, int)
The number of GPUs.
"""
free = ctypes.c_uint64()
total = ctypes.c_uint64()
dev_id = ctypes.c_int(device_id)
check_call(_LIB.MXGetGPUMemoryInformation64(dev_id, ctypes.byref(free), ctypes.byref(total)))
return (free.value, total.value) | python | def gpu_memory_info(device_id=0):
"""Query CUDA for the free and total bytes of GPU global memory.
Parameters
----------
device_id : int, optional
The device id of the GPU device.
Raises
------
Will raise an exception on any CUDA error.
Returns
-------
(free, total) : (int, int)
The number of GPUs.
"""
free = ctypes.c_uint64()
total = ctypes.c_uint64()
dev_id = ctypes.c_int(device_id)
check_call(_LIB.MXGetGPUMemoryInformation64(dev_id, ctypes.byref(free), ctypes.byref(total)))
return (free.value, total.value) | [
"def",
"gpu_memory_info",
"(",
"device_id",
"=",
"0",
")",
":",
"free",
"=",
"ctypes",
".",
"c_uint64",
"(",
")",
"total",
"=",
"ctypes",
".",
"c_uint64",
"(",
")",
"dev_id",
"=",
"ctypes",
".",
"c_int",
"(",
"device_id",
")",
"check_call",
"(",
"_LIB",
".",
"MXGetGPUMemoryInformation64",
"(",
"dev_id",
",",
"ctypes",
".",
"byref",
"(",
"free",
")",
",",
"ctypes",
".",
"byref",
"(",
"total",
")",
")",
")",
"return",
"(",
"free",
".",
"value",
",",
"total",
".",
"value",
")"
] | Query CUDA for the free and total bytes of GPU global memory.
Parameters
----------
device_id : int, optional
The device id of the GPU device.
Raises
------
Will raise an exception on any CUDA error.
Returns
-------
(free, total) : (int, int)
The number of GPUs. | [
"Query",
"CUDA",
"for",
"the",
"free",
"and",
"total",
"bytes",
"of",
"GPU",
"global",
"memory",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/context.py#L261-L283 |
24,067 | apache/incubator-mxnet | python/mxnet/context.py | current_context | def current_context():
"""Returns the current context.
By default, `mx.cpu()` is used for all the computations
and it can be overridden by using `with mx.Context(x)` statement where
x can be cpu(device_id) or gpu(device_id).
Examples
-------
>>> mx.current_context()
cpu(0)
>>> with mx.Context('gpu', 1): # Context changed in `with` block.
... mx.current_context() # Computation done here will be on gpu(1).
...
gpu(1)
>>> mx.current_context() # Back to default context.
cpu(0)
Returns
-------
default_ctx : Context
"""
if not hasattr(Context._default_ctx, "value"):
Context._default_ctx.value = Context('cpu', 0)
return Context._default_ctx.value | python | def current_context():
"""Returns the current context.
By default, `mx.cpu()` is used for all the computations
and it can be overridden by using `with mx.Context(x)` statement where
x can be cpu(device_id) or gpu(device_id).
Examples
-------
>>> mx.current_context()
cpu(0)
>>> with mx.Context('gpu', 1): # Context changed in `with` block.
... mx.current_context() # Computation done here will be on gpu(1).
...
gpu(1)
>>> mx.current_context() # Back to default context.
cpu(0)
Returns
-------
default_ctx : Context
"""
if not hasattr(Context._default_ctx, "value"):
Context._default_ctx.value = Context('cpu', 0)
return Context._default_ctx.value | [
"def",
"current_context",
"(",
")",
":",
"if",
"not",
"hasattr",
"(",
"Context",
".",
"_default_ctx",
",",
"\"value\"",
")",
":",
"Context",
".",
"_default_ctx",
".",
"value",
"=",
"Context",
"(",
"'cpu'",
",",
"0",
")",
"return",
"Context",
".",
"_default_ctx",
".",
"value"
] | Returns the current context.
By default, `mx.cpu()` is used for all the computations
and it can be overridden by using `with mx.Context(x)` statement where
x can be cpu(device_id) or gpu(device_id).
Examples
-------
>>> mx.current_context()
cpu(0)
>>> with mx.Context('gpu', 1): # Context changed in `with` block.
... mx.current_context() # Computation done here will be on gpu(1).
...
gpu(1)
>>> mx.current_context() # Back to default context.
cpu(0)
Returns
-------
default_ctx : Context | [
"Returns",
"the",
"current",
"context",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/context.py#L285-L309 |
24,068 | apache/incubator-mxnet | python/setup.py | config_cython | def config_cython():
"""Try to configure cython and return cython configuration"""
if not with_cython:
return []
# pylint: disable=unreachable
if os.name == 'nt':
print("WARNING: Cython is not supported on Windows, will compile without cython module")
return []
try:
from Cython.Build import cythonize
# from setuptools.extension import Extension
if sys.version_info >= (3, 0):
subdir = "_cy3"
else:
subdir = "_cy2"
ret = []
path = "mxnet/cython"
if os.name == 'nt':
library_dirs = ['mxnet', '../build/Release', '../build']
libraries = ['libmxnet']
else:
library_dirs = None
libraries = None
for fn in os.listdir(path):
if not fn.endswith(".pyx"):
continue
ret.append(Extension(
"mxnet/%s/.%s" % (subdir, fn[:-4]),
["mxnet/cython/%s" % fn],
include_dirs=["../include/", "../3rdparty/tvm/nnvm/include"],
library_dirs=library_dirs,
libraries=libraries,
language="c++"))
return cythonize(ret)
except ImportError:
print("WARNING: Cython is not installed, will compile without cython module")
return [] | python | def config_cython():
"""Try to configure cython and return cython configuration"""
if not with_cython:
return []
# pylint: disable=unreachable
if os.name == 'nt':
print("WARNING: Cython is not supported on Windows, will compile without cython module")
return []
try:
from Cython.Build import cythonize
# from setuptools.extension import Extension
if sys.version_info >= (3, 0):
subdir = "_cy3"
else:
subdir = "_cy2"
ret = []
path = "mxnet/cython"
if os.name == 'nt':
library_dirs = ['mxnet', '../build/Release', '../build']
libraries = ['libmxnet']
else:
library_dirs = None
libraries = None
for fn in os.listdir(path):
if not fn.endswith(".pyx"):
continue
ret.append(Extension(
"mxnet/%s/.%s" % (subdir, fn[:-4]),
["mxnet/cython/%s" % fn],
include_dirs=["../include/", "../3rdparty/tvm/nnvm/include"],
library_dirs=library_dirs,
libraries=libraries,
language="c++"))
return cythonize(ret)
except ImportError:
print("WARNING: Cython is not installed, will compile without cython module")
return [] | [
"def",
"config_cython",
"(",
")",
":",
"if",
"not",
"with_cython",
":",
"return",
"[",
"]",
"# pylint: disable=unreachable",
"if",
"os",
".",
"name",
"==",
"'nt'",
":",
"print",
"(",
"\"WARNING: Cython is not supported on Windows, will compile without cython module\"",
")",
"return",
"[",
"]",
"try",
":",
"from",
"Cython",
".",
"Build",
"import",
"cythonize",
"# from setuptools.extension import Extension",
"if",
"sys",
".",
"version_info",
">=",
"(",
"3",
",",
"0",
")",
":",
"subdir",
"=",
"\"_cy3\"",
"else",
":",
"subdir",
"=",
"\"_cy2\"",
"ret",
"=",
"[",
"]",
"path",
"=",
"\"mxnet/cython\"",
"if",
"os",
".",
"name",
"==",
"'nt'",
":",
"library_dirs",
"=",
"[",
"'mxnet'",
",",
"'../build/Release'",
",",
"'../build'",
"]",
"libraries",
"=",
"[",
"'libmxnet'",
"]",
"else",
":",
"library_dirs",
"=",
"None",
"libraries",
"=",
"None",
"for",
"fn",
"in",
"os",
".",
"listdir",
"(",
"path",
")",
":",
"if",
"not",
"fn",
".",
"endswith",
"(",
"\".pyx\"",
")",
":",
"continue",
"ret",
".",
"append",
"(",
"Extension",
"(",
"\"mxnet/%s/.%s\"",
"%",
"(",
"subdir",
",",
"fn",
"[",
":",
"-",
"4",
"]",
")",
",",
"[",
"\"mxnet/cython/%s\"",
"%",
"fn",
"]",
",",
"include_dirs",
"=",
"[",
"\"../include/\"",
",",
"\"../3rdparty/tvm/nnvm/include\"",
"]",
",",
"library_dirs",
"=",
"library_dirs",
",",
"libraries",
"=",
"libraries",
",",
"language",
"=",
"\"c++\"",
")",
")",
"return",
"cythonize",
"(",
"ret",
")",
"except",
"ImportError",
":",
"print",
"(",
"\"WARNING: Cython is not installed, will compile without cython module\"",
")",
"return",
"[",
"]"
] | Try to configure cython and return cython configuration | [
"Try",
"to",
"configure",
"cython",
"and",
"return",
"cython",
"configuration"
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/setup.py#L62-L100 |
24,069 | apache/incubator-mxnet | python/mxnet/_ctypes/symbol.py | SymbolBase._compose | def _compose(self, *args, **kwargs):
"""Compose symbol on inputs.
This call mutates the current symbol.
Parameters
----------
args:
provide positional arguments
kwargs:
provide keyword arguments
Returns
-------
the resulting symbol
"""
name = kwargs.pop('name', None)
if name:
name = c_str(name)
if len(args) != 0 and len(kwargs) != 0:
raise TypeError('compose only accept input Symbols \
either as positional or keyword arguments, not both')
for arg in args:
if not isinstance(arg, SymbolBase):
raise TypeError('Compose expect `Symbol` as arguments')
for val in kwargs.values():
if not isinstance(val, SymbolBase):
raise TypeError('Compose expect `Symbol` as arguments')
num_args = len(args) + len(kwargs)
if len(kwargs) != 0:
keys = c_str_array(kwargs.keys())
args = c_handle_array(kwargs.values())
else:
keys = None
args = c_handle_array(kwargs.values())
check_call(_LIB.NNSymbolCompose(
self.handle, name, num_args, keys, args)) | python | def _compose(self, *args, **kwargs):
"""Compose symbol on inputs.
This call mutates the current symbol.
Parameters
----------
args:
provide positional arguments
kwargs:
provide keyword arguments
Returns
-------
the resulting symbol
"""
name = kwargs.pop('name', None)
if name:
name = c_str(name)
if len(args) != 0 and len(kwargs) != 0:
raise TypeError('compose only accept input Symbols \
either as positional or keyword arguments, not both')
for arg in args:
if not isinstance(arg, SymbolBase):
raise TypeError('Compose expect `Symbol` as arguments')
for val in kwargs.values():
if not isinstance(val, SymbolBase):
raise TypeError('Compose expect `Symbol` as arguments')
num_args = len(args) + len(kwargs)
if len(kwargs) != 0:
keys = c_str_array(kwargs.keys())
args = c_handle_array(kwargs.values())
else:
keys = None
args = c_handle_array(kwargs.values())
check_call(_LIB.NNSymbolCompose(
self.handle, name, num_args, keys, args)) | [
"def",
"_compose",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"name",
"=",
"kwargs",
".",
"pop",
"(",
"'name'",
",",
"None",
")",
"if",
"name",
":",
"name",
"=",
"c_str",
"(",
"name",
")",
"if",
"len",
"(",
"args",
")",
"!=",
"0",
"and",
"len",
"(",
"kwargs",
")",
"!=",
"0",
":",
"raise",
"TypeError",
"(",
"'compose only accept input Symbols \\\n either as positional or keyword arguments, not both'",
")",
"for",
"arg",
"in",
"args",
":",
"if",
"not",
"isinstance",
"(",
"arg",
",",
"SymbolBase",
")",
":",
"raise",
"TypeError",
"(",
"'Compose expect `Symbol` as arguments'",
")",
"for",
"val",
"in",
"kwargs",
".",
"values",
"(",
")",
":",
"if",
"not",
"isinstance",
"(",
"val",
",",
"SymbolBase",
")",
":",
"raise",
"TypeError",
"(",
"'Compose expect `Symbol` as arguments'",
")",
"num_args",
"=",
"len",
"(",
"args",
")",
"+",
"len",
"(",
"kwargs",
")",
"if",
"len",
"(",
"kwargs",
")",
"!=",
"0",
":",
"keys",
"=",
"c_str_array",
"(",
"kwargs",
".",
"keys",
"(",
")",
")",
"args",
"=",
"c_handle_array",
"(",
"kwargs",
".",
"values",
"(",
")",
")",
"else",
":",
"keys",
"=",
"None",
"args",
"=",
"c_handle_array",
"(",
"kwargs",
".",
"values",
"(",
")",
")",
"check_call",
"(",
"_LIB",
".",
"NNSymbolCompose",
"(",
"self",
".",
"handle",
",",
"name",
",",
"num_args",
",",
"keys",
",",
"args",
")",
")"
] | Compose symbol on inputs.
This call mutates the current symbol.
Parameters
----------
args:
provide positional arguments
kwargs:
provide keyword arguments
Returns
-------
the resulting symbol | [
"Compose",
"symbol",
"on",
"inputs",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/_ctypes/symbol.py#L48-L88 |
24,070 | apache/incubator-mxnet | python/mxnet/_ctypes/symbol.py | SymbolBase._set_attr | def _set_attr(self, **kwargs):
"""Set the attribute of the symbol.
Parameters
----------
**kwargs
The attributes to set
"""
keys = c_str_array(kwargs.keys())
vals = c_str_array([str(s) for s in kwargs.values()])
num_args = mx_uint(len(kwargs))
check_call(_LIB.MXSymbolSetAttrs(
self.handle, num_args, keys, vals)) | python | def _set_attr(self, **kwargs):
"""Set the attribute of the symbol.
Parameters
----------
**kwargs
The attributes to set
"""
keys = c_str_array(kwargs.keys())
vals = c_str_array([str(s) for s in kwargs.values()])
num_args = mx_uint(len(kwargs))
check_call(_LIB.MXSymbolSetAttrs(
self.handle, num_args, keys, vals)) | [
"def",
"_set_attr",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"keys",
"=",
"c_str_array",
"(",
"kwargs",
".",
"keys",
"(",
")",
")",
"vals",
"=",
"c_str_array",
"(",
"[",
"str",
"(",
"s",
")",
"for",
"s",
"in",
"kwargs",
".",
"values",
"(",
")",
"]",
")",
"num_args",
"=",
"mx_uint",
"(",
"len",
"(",
"kwargs",
")",
")",
"check_call",
"(",
"_LIB",
".",
"MXSymbolSetAttrs",
"(",
"self",
".",
"handle",
",",
"num_args",
",",
"keys",
",",
"vals",
")",
")"
] | Set the attribute of the symbol.
Parameters
----------
**kwargs
The attributes to set | [
"Set",
"the",
"attribute",
"of",
"the",
"symbol",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/_ctypes/symbol.py#L90-L102 |
24,071 | apache/incubator-mxnet | example/ssd/symbol/symbol_factory.py | get_symbol_train | def get_symbol_train(network, data_shape, **kwargs):
"""Wrapper for get symbol for train
Parameters
----------
network : str
name for the base network symbol
data_shape : int
input shape
kwargs : dict
see symbol_builder.get_symbol_train for more details
"""
if network.startswith('legacy'):
logging.warn('Using legacy model.')
return symbol_builder.import_module(network).get_symbol_train(**kwargs)
config = get_config(network, data_shape, **kwargs).copy()
config.update(kwargs)
return symbol_builder.get_symbol_train(**config) | python | def get_symbol_train(network, data_shape, **kwargs):
"""Wrapper for get symbol for train
Parameters
----------
network : str
name for the base network symbol
data_shape : int
input shape
kwargs : dict
see symbol_builder.get_symbol_train for more details
"""
if network.startswith('legacy'):
logging.warn('Using legacy model.')
return symbol_builder.import_module(network).get_symbol_train(**kwargs)
config = get_config(network, data_shape, **kwargs).copy()
config.update(kwargs)
return symbol_builder.get_symbol_train(**config) | [
"def",
"get_symbol_train",
"(",
"network",
",",
"data_shape",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"network",
".",
"startswith",
"(",
"'legacy'",
")",
":",
"logging",
".",
"warn",
"(",
"'Using legacy model.'",
")",
"return",
"symbol_builder",
".",
"import_module",
"(",
"network",
")",
".",
"get_symbol_train",
"(",
"*",
"*",
"kwargs",
")",
"config",
"=",
"get_config",
"(",
"network",
",",
"data_shape",
",",
"*",
"*",
"kwargs",
")",
".",
"copy",
"(",
")",
"config",
".",
"update",
"(",
"kwargs",
")",
"return",
"symbol_builder",
".",
"get_symbol_train",
"(",
"*",
"*",
"config",
")"
] | Wrapper for get symbol for train
Parameters
----------
network : str
name for the base network symbol
data_shape : int
input shape
kwargs : dict
see symbol_builder.get_symbol_train for more details | [
"Wrapper",
"for",
"get",
"symbol",
"for",
"train"
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/ssd/symbol/symbol_factory.py#L103-L120 |
24,072 | apache/incubator-mxnet | python/mxnet/gluon/parameter.py | Parameter._set_trainer | def _set_trainer(self, trainer):
""" Set the trainer this parameter is associated with. """
# trainer cannot be replaced for sparse params
if self._stype != 'default' and self._trainer and trainer and self._trainer is not trainer:
raise RuntimeError(
"Failed to set the trainer for Parameter '%s' because it was already set. " \
"More than one trainers for a %s Parameter is not supported." \
%(self.name, self._stype))
self._trainer = trainer | python | def _set_trainer(self, trainer):
""" Set the trainer this parameter is associated with. """
# trainer cannot be replaced for sparse params
if self._stype != 'default' and self._trainer and trainer and self._trainer is not trainer:
raise RuntimeError(
"Failed to set the trainer for Parameter '%s' because it was already set. " \
"More than one trainers for a %s Parameter is not supported." \
%(self.name, self._stype))
self._trainer = trainer | [
"def",
"_set_trainer",
"(",
"self",
",",
"trainer",
")",
":",
"# trainer cannot be replaced for sparse params",
"if",
"self",
".",
"_stype",
"!=",
"'default'",
"and",
"self",
".",
"_trainer",
"and",
"trainer",
"and",
"self",
".",
"_trainer",
"is",
"not",
"trainer",
":",
"raise",
"RuntimeError",
"(",
"\"Failed to set the trainer for Parameter '%s' because it was already set. \"",
"\"More than one trainers for a %s Parameter is not supported.\"",
"%",
"(",
"self",
".",
"name",
",",
"self",
".",
"_stype",
")",
")",
"self",
".",
"_trainer",
"=",
"trainer"
] | Set the trainer this parameter is associated with. | [
"Set",
"the",
"trainer",
"this",
"parameter",
"is",
"associated",
"with",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/gluon/parameter.py#L174-L182 |
24,073 | apache/incubator-mxnet | python/mxnet/gluon/parameter.py | Parameter._get_row_sparse | def _get_row_sparse(self, arr_list, ctx, row_id):
""" Get row_sparse data from row_sparse parameters based on row_id. """
# get row sparse params based on row ids
if not isinstance(row_id, ndarray.NDArray):
raise TypeError("row_id must have NDArray type, but %s is given"%(type(row_id)))
if not self._trainer:
raise RuntimeError("Cannot get row_sparse data for Parameter '%s' when no " \
"Trainer is created with it."%self.name)
results = self._check_and_get(arr_list, ctx)
# fetch row sparse params from the trainer
self._trainer._row_sparse_pull(self, results, row_id)
return results | python | def _get_row_sparse(self, arr_list, ctx, row_id):
""" Get row_sparse data from row_sparse parameters based on row_id. """
# get row sparse params based on row ids
if not isinstance(row_id, ndarray.NDArray):
raise TypeError("row_id must have NDArray type, but %s is given"%(type(row_id)))
if not self._trainer:
raise RuntimeError("Cannot get row_sparse data for Parameter '%s' when no " \
"Trainer is created with it."%self.name)
results = self._check_and_get(arr_list, ctx)
# fetch row sparse params from the trainer
self._trainer._row_sparse_pull(self, results, row_id)
return results | [
"def",
"_get_row_sparse",
"(",
"self",
",",
"arr_list",
",",
"ctx",
",",
"row_id",
")",
":",
"# get row sparse params based on row ids",
"if",
"not",
"isinstance",
"(",
"row_id",
",",
"ndarray",
".",
"NDArray",
")",
":",
"raise",
"TypeError",
"(",
"\"row_id must have NDArray type, but %s is given\"",
"%",
"(",
"type",
"(",
"row_id",
")",
")",
")",
"if",
"not",
"self",
".",
"_trainer",
":",
"raise",
"RuntimeError",
"(",
"\"Cannot get row_sparse data for Parameter '%s' when no \"",
"\"Trainer is created with it.\"",
"%",
"self",
".",
"name",
")",
"results",
"=",
"self",
".",
"_check_and_get",
"(",
"arr_list",
",",
"ctx",
")",
"# fetch row sparse params from the trainer",
"self",
".",
"_trainer",
".",
"_row_sparse_pull",
"(",
"self",
",",
"results",
",",
"row_id",
")",
"return",
"results"
] | Get row_sparse data from row_sparse parameters based on row_id. | [
"Get",
"row_sparse",
"data",
"from",
"row_sparse",
"parameters",
"based",
"on",
"row_id",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/gluon/parameter.py#L216-L228 |
24,074 | apache/incubator-mxnet | python/mxnet/gluon/parameter.py | Parameter._finish_deferred_init | def _finish_deferred_init(self):
"""Finishes deferred initialization."""
if not self._deferred_init:
return
init, ctx, default_init, data = self._deferred_init
self._deferred_init = ()
assert self.shape is not None and np.prod(self.shape) > 0, \
"Cannot initialize Parameter '%s' because it has " \
"invalid shape: %s. Please specify in_units, " \
"in_channels, etc for `Block`s."%(
self.name, str(self.shape))
with autograd.pause():
if data is None:
data = ndarray.zeros(shape=self.shape, dtype=self.dtype,
ctx=context.cpu(), stype=self._stype)
initializer.create(default_init)(
initializer.InitDesc(self.name, {'__init__': init}), data)
self._init_impl(data, ctx) | python | def _finish_deferred_init(self):
"""Finishes deferred initialization."""
if not self._deferred_init:
return
init, ctx, default_init, data = self._deferred_init
self._deferred_init = ()
assert self.shape is not None and np.prod(self.shape) > 0, \
"Cannot initialize Parameter '%s' because it has " \
"invalid shape: %s. Please specify in_units, " \
"in_channels, etc for `Block`s."%(
self.name, str(self.shape))
with autograd.pause():
if data is None:
data = ndarray.zeros(shape=self.shape, dtype=self.dtype,
ctx=context.cpu(), stype=self._stype)
initializer.create(default_init)(
initializer.InitDesc(self.name, {'__init__': init}), data)
self._init_impl(data, ctx) | [
"def",
"_finish_deferred_init",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_deferred_init",
":",
"return",
"init",
",",
"ctx",
",",
"default_init",
",",
"data",
"=",
"self",
".",
"_deferred_init",
"self",
".",
"_deferred_init",
"=",
"(",
")",
"assert",
"self",
".",
"shape",
"is",
"not",
"None",
"and",
"np",
".",
"prod",
"(",
"self",
".",
"shape",
")",
">",
"0",
",",
"\"Cannot initialize Parameter '%s' because it has \"",
"\"invalid shape: %s. Please specify in_units, \"",
"\"in_channels, etc for `Block`s.\"",
"%",
"(",
"self",
".",
"name",
",",
"str",
"(",
"self",
".",
"shape",
")",
")",
"with",
"autograd",
".",
"pause",
"(",
")",
":",
"if",
"data",
"is",
"None",
":",
"data",
"=",
"ndarray",
".",
"zeros",
"(",
"shape",
"=",
"self",
".",
"shape",
",",
"dtype",
"=",
"self",
".",
"dtype",
",",
"ctx",
"=",
"context",
".",
"cpu",
"(",
")",
",",
"stype",
"=",
"self",
".",
"_stype",
")",
"initializer",
".",
"create",
"(",
"default_init",
")",
"(",
"initializer",
".",
"InitDesc",
"(",
"self",
".",
"name",
",",
"{",
"'__init__'",
":",
"init",
"}",
")",
",",
"data",
")",
"self",
".",
"_init_impl",
"(",
"data",
",",
"ctx",
")"
] | Finishes deferred initialization. | [
"Finishes",
"deferred",
"initialization",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/gluon/parameter.py#L266-L285 |
24,075 | apache/incubator-mxnet | python/mxnet/gluon/parameter.py | Parameter._init_impl | def _init_impl(self, data, ctx_list):
"""Sets data and grad."""
self._ctx_list = list(ctx_list)
self._ctx_map = [[], []]
for i, ctx in enumerate(self._ctx_list):
dev_list = self._ctx_map[ctx.device_typeid&1]
while len(dev_list) <= ctx.device_id:
dev_list.append(None)
dev_list[ctx.device_id] = i
self._data = [data.copyto(ctx) for ctx in self._ctx_list]
self._init_grad() | python | def _init_impl(self, data, ctx_list):
"""Sets data and grad."""
self._ctx_list = list(ctx_list)
self._ctx_map = [[], []]
for i, ctx in enumerate(self._ctx_list):
dev_list = self._ctx_map[ctx.device_typeid&1]
while len(dev_list) <= ctx.device_id:
dev_list.append(None)
dev_list[ctx.device_id] = i
self._data = [data.copyto(ctx) for ctx in self._ctx_list]
self._init_grad() | [
"def",
"_init_impl",
"(",
"self",
",",
"data",
",",
"ctx_list",
")",
":",
"self",
".",
"_ctx_list",
"=",
"list",
"(",
"ctx_list",
")",
"self",
".",
"_ctx_map",
"=",
"[",
"[",
"]",
",",
"[",
"]",
"]",
"for",
"i",
",",
"ctx",
"in",
"enumerate",
"(",
"self",
".",
"_ctx_list",
")",
":",
"dev_list",
"=",
"self",
".",
"_ctx_map",
"[",
"ctx",
".",
"device_typeid",
"&",
"1",
"]",
"while",
"len",
"(",
"dev_list",
")",
"<=",
"ctx",
".",
"device_id",
":",
"dev_list",
".",
"append",
"(",
"None",
")",
"dev_list",
"[",
"ctx",
".",
"device_id",
"]",
"=",
"i",
"self",
".",
"_data",
"=",
"[",
"data",
".",
"copyto",
"(",
"ctx",
")",
"for",
"ctx",
"in",
"self",
".",
"_ctx_list",
"]",
"self",
".",
"_init_grad",
"(",
")"
] | Sets data and grad. | [
"Sets",
"data",
"and",
"grad",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/gluon/parameter.py#L287-L298 |
24,076 | apache/incubator-mxnet | python/mxnet/gluon/parameter.py | Parameter._init_grad | def _init_grad(self):
"""Initialize grad buffers."""
if self.grad_req == 'null':
self._grad = None
return
self._grad = [ndarray.zeros(shape=i.shape, dtype=i.dtype, ctx=i.context,
stype=self._grad_stype) for i in self._data]
autograd.mark_variables(self._check_and_get(self._data, list),
self._grad, self.grad_req) | python | def _init_grad(self):
"""Initialize grad buffers."""
if self.grad_req == 'null':
self._grad = None
return
self._grad = [ndarray.zeros(shape=i.shape, dtype=i.dtype, ctx=i.context,
stype=self._grad_stype) for i in self._data]
autograd.mark_variables(self._check_and_get(self._data, list),
self._grad, self.grad_req) | [
"def",
"_init_grad",
"(",
"self",
")",
":",
"if",
"self",
".",
"grad_req",
"==",
"'null'",
":",
"self",
".",
"_grad",
"=",
"None",
"return",
"self",
".",
"_grad",
"=",
"[",
"ndarray",
".",
"zeros",
"(",
"shape",
"=",
"i",
".",
"shape",
",",
"dtype",
"=",
"i",
".",
"dtype",
",",
"ctx",
"=",
"i",
".",
"context",
",",
"stype",
"=",
"self",
".",
"_grad_stype",
")",
"for",
"i",
"in",
"self",
".",
"_data",
"]",
"autograd",
".",
"mark_variables",
"(",
"self",
".",
"_check_and_get",
"(",
"self",
".",
"_data",
",",
"list",
")",
",",
"self",
".",
"_grad",
",",
"self",
".",
"grad_req",
")"
] | Initialize grad buffers. | [
"Initialize",
"grad",
"buffers",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/gluon/parameter.py#L300-L310 |
24,077 | apache/incubator-mxnet | python/mxnet/gluon/parameter.py | Parameter._reduce | def _reduce(self):
"""Reduce data from multiple context to cpu."""
ctx = context.cpu()
if self._stype == 'default':
block = self.list_data()
data = ndarray.add_n(*(w.copyto(ctx) for w in block)) / len(block)
else:
# fetch all rows for 'row_sparse' param
all_row_ids = ndarray.arange(0, self.shape[0], dtype='int64', ctx=ctx)
data = ndarray.zeros(self.shape, stype='row_sparse', ctx=ctx)
self._trainer._row_sparse_pull(self, data, all_row_ids, full_idx=True)
return data | python | def _reduce(self):
"""Reduce data from multiple context to cpu."""
ctx = context.cpu()
if self._stype == 'default':
block = self.list_data()
data = ndarray.add_n(*(w.copyto(ctx) for w in block)) / len(block)
else:
# fetch all rows for 'row_sparse' param
all_row_ids = ndarray.arange(0, self.shape[0], dtype='int64', ctx=ctx)
data = ndarray.zeros(self.shape, stype='row_sparse', ctx=ctx)
self._trainer._row_sparse_pull(self, data, all_row_ids, full_idx=True)
return data | [
"def",
"_reduce",
"(",
"self",
")",
":",
"ctx",
"=",
"context",
".",
"cpu",
"(",
")",
"if",
"self",
".",
"_stype",
"==",
"'default'",
":",
"block",
"=",
"self",
".",
"list_data",
"(",
")",
"data",
"=",
"ndarray",
".",
"add_n",
"(",
"*",
"(",
"w",
".",
"copyto",
"(",
"ctx",
")",
"for",
"w",
"in",
"block",
")",
")",
"/",
"len",
"(",
"block",
")",
"else",
":",
"# fetch all rows for 'row_sparse' param",
"all_row_ids",
"=",
"ndarray",
".",
"arange",
"(",
"0",
",",
"self",
".",
"shape",
"[",
"0",
"]",
",",
"dtype",
"=",
"'int64'",
",",
"ctx",
"=",
"ctx",
")",
"data",
"=",
"ndarray",
".",
"zeros",
"(",
"self",
".",
"shape",
",",
"stype",
"=",
"'row_sparse'",
",",
"ctx",
"=",
"ctx",
")",
"self",
".",
"_trainer",
".",
"_row_sparse_pull",
"(",
"self",
",",
"data",
",",
"all_row_ids",
",",
"full_idx",
"=",
"True",
")",
"return",
"data"
] | Reduce data from multiple context to cpu. | [
"Reduce",
"data",
"from",
"multiple",
"context",
"to",
"cpu",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/gluon/parameter.py#L312-L323 |
24,078 | apache/incubator-mxnet | python/mxnet/gluon/parameter.py | Parameter.reset_ctx | def reset_ctx(self, ctx):
"""Re-assign Parameter to other contexts.
Parameters
----------
ctx : Context or list of Context, default ``context.current_context()``.
Assign Parameter to given context. If ctx is a list of Context, a
copy will be made for each context.
"""
if ctx is None:
ctx = [context.current_context()]
if isinstance(ctx, Context):
ctx = [ctx]
if self._data:
data = self._reduce()
with autograd.pause():
self._init_impl(data, ctx)
elif self._deferred_init:
init, _, default_init, data = self._deferred_init
self._deferred_init = (init, ctx, default_init, data)
else:
raise ValueError("Cannot reset context for Parameter '%s' because it "
"has not been initialized."%self.name) | python | def reset_ctx(self, ctx):
"""Re-assign Parameter to other contexts.
Parameters
----------
ctx : Context or list of Context, default ``context.current_context()``.
Assign Parameter to given context. If ctx is a list of Context, a
copy will be made for each context.
"""
if ctx is None:
ctx = [context.current_context()]
if isinstance(ctx, Context):
ctx = [ctx]
if self._data:
data = self._reduce()
with autograd.pause():
self._init_impl(data, ctx)
elif self._deferred_init:
init, _, default_init, data = self._deferred_init
self._deferred_init = (init, ctx, default_init, data)
else:
raise ValueError("Cannot reset context for Parameter '%s' because it "
"has not been initialized."%self.name) | [
"def",
"reset_ctx",
"(",
"self",
",",
"ctx",
")",
":",
"if",
"ctx",
"is",
"None",
":",
"ctx",
"=",
"[",
"context",
".",
"current_context",
"(",
")",
"]",
"if",
"isinstance",
"(",
"ctx",
",",
"Context",
")",
":",
"ctx",
"=",
"[",
"ctx",
"]",
"if",
"self",
".",
"_data",
":",
"data",
"=",
"self",
".",
"_reduce",
"(",
")",
"with",
"autograd",
".",
"pause",
"(",
")",
":",
"self",
".",
"_init_impl",
"(",
"data",
",",
"ctx",
")",
"elif",
"self",
".",
"_deferred_init",
":",
"init",
",",
"_",
",",
"default_init",
",",
"data",
"=",
"self",
".",
"_deferred_init",
"self",
".",
"_deferred_init",
"=",
"(",
"init",
",",
"ctx",
",",
"default_init",
",",
"data",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Cannot reset context for Parameter '%s' because it \"",
"\"has not been initialized.\"",
"%",
"self",
".",
"name",
")"
] | Re-assign Parameter to other contexts.
Parameters
----------
ctx : Context or list of Context, default ``context.current_context()``.
Assign Parameter to given context. If ctx is a list of Context, a
copy will be made for each context. | [
"Re",
"-",
"assign",
"Parameter",
"to",
"other",
"contexts",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/gluon/parameter.py#L393-L415 |
24,079 | apache/incubator-mxnet | python/mxnet/gluon/parameter.py | Parameter.set_data | def set_data(self, data):
"""Sets this parameter's value on all contexts."""
self.shape = data.shape
if self._data is None:
assert self._deferred_init, \
"Parameter '%s' has not been initialized"%self.name
self._deferred_init = self._deferred_init[:3] + (data,)
return
# if update_on_kvstore, we need to make sure the copy stored in kvstore is in sync
if self._trainer and self._trainer._kv_initialized and self._trainer._update_on_kvstore:
if self not in self._trainer._params_to_init:
self._trainer._reset_kvstore()
for arr in self._check_and_get(self._data, list):
arr[:] = data | python | def set_data(self, data):
"""Sets this parameter's value on all contexts."""
self.shape = data.shape
if self._data is None:
assert self._deferred_init, \
"Parameter '%s' has not been initialized"%self.name
self._deferred_init = self._deferred_init[:3] + (data,)
return
# if update_on_kvstore, we need to make sure the copy stored in kvstore is in sync
if self._trainer and self._trainer._kv_initialized and self._trainer._update_on_kvstore:
if self not in self._trainer._params_to_init:
self._trainer._reset_kvstore()
for arr in self._check_and_get(self._data, list):
arr[:] = data | [
"def",
"set_data",
"(",
"self",
",",
"data",
")",
":",
"self",
".",
"shape",
"=",
"data",
".",
"shape",
"if",
"self",
".",
"_data",
"is",
"None",
":",
"assert",
"self",
".",
"_deferred_init",
",",
"\"Parameter '%s' has not been initialized\"",
"%",
"self",
".",
"name",
"self",
".",
"_deferred_init",
"=",
"self",
".",
"_deferred_init",
"[",
":",
"3",
"]",
"+",
"(",
"data",
",",
")",
"return",
"# if update_on_kvstore, we need to make sure the copy stored in kvstore is in sync",
"if",
"self",
".",
"_trainer",
"and",
"self",
".",
"_trainer",
".",
"_kv_initialized",
"and",
"self",
".",
"_trainer",
".",
"_update_on_kvstore",
":",
"if",
"self",
"not",
"in",
"self",
".",
"_trainer",
".",
"_params_to_init",
":",
"self",
".",
"_trainer",
".",
"_reset_kvstore",
"(",
")",
"for",
"arr",
"in",
"self",
".",
"_check_and_get",
"(",
"self",
".",
"_data",
",",
"list",
")",
":",
"arr",
"[",
":",
"]",
"=",
"data"
] | Sets this parameter's value on all contexts. | [
"Sets",
"this",
"parameter",
"s",
"value",
"on",
"all",
"contexts",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/gluon/parameter.py#L418-L434 |
24,080 | apache/incubator-mxnet | python/mxnet/gluon/parameter.py | Parameter.row_sparse_data | def row_sparse_data(self, row_id):
"""Returns a copy of the 'row_sparse' parameter on the same context as row_id's.
The copy only retains rows whose ids occur in provided row ids.
The parameter must have been initialized on this context before.
Parameters
----------
row_id: NDArray
Row ids to retain for the 'row_sparse' parameter.
Returns
-------
NDArray on row_id's context
"""
if self._stype != 'row_sparse':
raise RuntimeError("Cannot return a copy of Parameter %s via row_sparse_data() " \
"because its storage type is %s. Please use data() instead." \
%(self.name, self._stype))
return self._get_row_sparse(self._data, row_id.context, row_id) | python | def row_sparse_data(self, row_id):
"""Returns a copy of the 'row_sparse' parameter on the same context as row_id's.
The copy only retains rows whose ids occur in provided row ids.
The parameter must have been initialized on this context before.
Parameters
----------
row_id: NDArray
Row ids to retain for the 'row_sparse' parameter.
Returns
-------
NDArray on row_id's context
"""
if self._stype != 'row_sparse':
raise RuntimeError("Cannot return a copy of Parameter %s via row_sparse_data() " \
"because its storage type is %s. Please use data() instead." \
%(self.name, self._stype))
return self._get_row_sparse(self._data, row_id.context, row_id) | [
"def",
"row_sparse_data",
"(",
"self",
",",
"row_id",
")",
":",
"if",
"self",
".",
"_stype",
"!=",
"'row_sparse'",
":",
"raise",
"RuntimeError",
"(",
"\"Cannot return a copy of Parameter %s via row_sparse_data() \"",
"\"because its storage type is %s. Please use data() instead.\"",
"%",
"(",
"self",
".",
"name",
",",
"self",
".",
"_stype",
")",
")",
"return",
"self",
".",
"_get_row_sparse",
"(",
"self",
".",
"_data",
",",
"row_id",
".",
"context",
",",
"row_id",
")"
] | Returns a copy of the 'row_sparse' parameter on the same context as row_id's.
The copy only retains rows whose ids occur in provided row ids.
The parameter must have been initialized on this context before.
Parameters
----------
row_id: NDArray
Row ids to retain for the 'row_sparse' parameter.
Returns
-------
NDArray on row_id's context | [
"Returns",
"a",
"copy",
"of",
"the",
"row_sparse",
"parameter",
"on",
"the",
"same",
"context",
"as",
"row_id",
"s",
".",
"The",
"copy",
"only",
"retains",
"rows",
"whose",
"ids",
"occur",
"in",
"provided",
"row",
"ids",
".",
"The",
"parameter",
"must",
"have",
"been",
"initialized",
"on",
"this",
"context",
"before",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/gluon/parameter.py#L436-L454 |
24,081 | apache/incubator-mxnet | python/mxnet/gluon/parameter.py | Parameter.list_row_sparse_data | def list_row_sparse_data(self, row_id):
"""Returns copies of the 'row_sparse' parameter on all contexts, in the same order
as creation. The copy only retains rows whose ids occur in provided row ids.
The parameter must have been initialized before.
Parameters
----------
row_id: NDArray
Row ids to retain for the 'row_sparse' parameter.
Returns
-------
list of NDArrays
"""
if self._stype != 'row_sparse':
raise RuntimeError("Cannot return copies of Parameter '%s' on all contexts via " \
"list_row_sparse_data() because its storage type is %s. Please " \
"use data() instead." % (self.name, self._stype))
return self._get_row_sparse(self._data, list, row_id) | python | def list_row_sparse_data(self, row_id):
"""Returns copies of the 'row_sparse' parameter on all contexts, in the same order
as creation. The copy only retains rows whose ids occur in provided row ids.
The parameter must have been initialized before.
Parameters
----------
row_id: NDArray
Row ids to retain for the 'row_sparse' parameter.
Returns
-------
list of NDArrays
"""
if self._stype != 'row_sparse':
raise RuntimeError("Cannot return copies of Parameter '%s' on all contexts via " \
"list_row_sparse_data() because its storage type is %s. Please " \
"use data() instead." % (self.name, self._stype))
return self._get_row_sparse(self._data, list, row_id) | [
"def",
"list_row_sparse_data",
"(",
"self",
",",
"row_id",
")",
":",
"if",
"self",
".",
"_stype",
"!=",
"'row_sparse'",
":",
"raise",
"RuntimeError",
"(",
"\"Cannot return copies of Parameter '%s' on all contexts via \"",
"\"list_row_sparse_data() because its storage type is %s. Please \"",
"\"use data() instead.\"",
"%",
"(",
"self",
".",
"name",
",",
"self",
".",
"_stype",
")",
")",
"return",
"self",
".",
"_get_row_sparse",
"(",
"self",
".",
"_data",
",",
"list",
",",
"row_id",
")"
] | Returns copies of the 'row_sparse' parameter on all contexts, in the same order
as creation. The copy only retains rows whose ids occur in provided row ids.
The parameter must have been initialized before.
Parameters
----------
row_id: NDArray
Row ids to retain for the 'row_sparse' parameter.
Returns
-------
list of NDArrays | [
"Returns",
"copies",
"of",
"the",
"row_sparse",
"parameter",
"on",
"all",
"contexts",
"in",
"the",
"same",
"order",
"as",
"creation",
".",
"The",
"copy",
"only",
"retains",
"rows",
"whose",
"ids",
"occur",
"in",
"provided",
"row",
"ids",
".",
"The",
"parameter",
"must",
"have",
"been",
"initialized",
"before",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/gluon/parameter.py#L456-L474 |
24,082 | apache/incubator-mxnet | python/mxnet/gluon/parameter.py | Parameter.grad | def grad(self, ctx=None):
"""Returns a gradient buffer for this parameter on one context.
Parameters
----------
ctx : Context
Desired context.
"""
if self._data is not None and self._grad is None:
raise RuntimeError(
"Cannot get gradient array for Parameter '%s' " \
"because grad_req='null'"%(self.name))
return self._check_and_get(self._grad, ctx) | python | def grad(self, ctx=None):
"""Returns a gradient buffer for this parameter on one context.
Parameters
----------
ctx : Context
Desired context.
"""
if self._data is not None and self._grad is None:
raise RuntimeError(
"Cannot get gradient array for Parameter '%s' " \
"because grad_req='null'"%(self.name))
return self._check_and_get(self._grad, ctx) | [
"def",
"grad",
"(",
"self",
",",
"ctx",
"=",
"None",
")",
":",
"if",
"self",
".",
"_data",
"is",
"not",
"None",
"and",
"self",
".",
"_grad",
"is",
"None",
":",
"raise",
"RuntimeError",
"(",
"\"Cannot get gradient array for Parameter '%s' \"",
"\"because grad_req='null'\"",
"%",
"(",
"self",
".",
"name",
")",
")",
"return",
"self",
".",
"_check_and_get",
"(",
"self",
".",
"_grad",
",",
"ctx",
")"
] | Returns a gradient buffer for this parameter on one context.
Parameters
----------
ctx : Context
Desired context. | [
"Returns",
"a",
"gradient",
"buffer",
"for",
"this",
"parameter",
"on",
"one",
"context",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/gluon/parameter.py#L511-L523 |
24,083 | apache/incubator-mxnet | python/mxnet/gluon/parameter.py | Parameter.list_ctx | def list_ctx(self):
"""Returns a list of contexts this parameter is initialized on."""
if self._data is None:
if self._deferred_init:
return self._deferred_init[1]
raise RuntimeError("Parameter '%s' has not been initialized"%self.name)
return self._ctx_list | python | def list_ctx(self):
"""Returns a list of contexts this parameter is initialized on."""
if self._data is None:
if self._deferred_init:
return self._deferred_init[1]
raise RuntimeError("Parameter '%s' has not been initialized"%self.name)
return self._ctx_list | [
"def",
"list_ctx",
"(",
"self",
")",
":",
"if",
"self",
".",
"_data",
"is",
"None",
":",
"if",
"self",
".",
"_deferred_init",
":",
"return",
"self",
".",
"_deferred_init",
"[",
"1",
"]",
"raise",
"RuntimeError",
"(",
"\"Parameter '%s' has not been initialized\"",
"%",
"self",
".",
"name",
")",
"return",
"self",
".",
"_ctx_list"
] | Returns a list of contexts this parameter is initialized on. | [
"Returns",
"a",
"list",
"of",
"contexts",
"this",
"parameter",
"is",
"initialized",
"on",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/gluon/parameter.py#L534-L540 |
24,084 | apache/incubator-mxnet | python/mxnet/gluon/parameter.py | Parameter.zero_grad | def zero_grad(self):
"""Sets gradient buffer on all contexts to 0. No action is taken if
parameter is uninitialized or doesn't require gradient."""
if self._grad is None:
return
for i in self._grad:
ndarray.zeros_like(i, out=i) | python | def zero_grad(self):
"""Sets gradient buffer on all contexts to 0. No action is taken if
parameter is uninitialized or doesn't require gradient."""
if self._grad is None:
return
for i in self._grad:
ndarray.zeros_like(i, out=i) | [
"def",
"zero_grad",
"(",
"self",
")",
":",
"if",
"self",
".",
"_grad",
"is",
"None",
":",
"return",
"for",
"i",
"in",
"self",
".",
"_grad",
":",
"ndarray",
".",
"zeros_like",
"(",
"i",
",",
"out",
"=",
"i",
")"
] | Sets gradient buffer on all contexts to 0. No action is taken if
parameter is uninitialized or doesn't require gradient. | [
"Sets",
"gradient",
"buffer",
"on",
"all",
"contexts",
"to",
"0",
".",
"No",
"action",
"is",
"taken",
"if",
"parameter",
"is",
"uninitialized",
"or",
"doesn",
"t",
"require",
"gradient",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/gluon/parameter.py#L542-L548 |
24,085 | apache/incubator-mxnet | python/mxnet/gluon/parameter.py | Parameter.var | def var(self):
"""Returns a symbol representing this parameter."""
if self._var is None:
self._var = symbol.var(self.name, shape=self.shape, dtype=self.dtype,
lr_mult=self.lr_mult, wd_mult=self.wd_mult,
init=self.init, stype=self._stype)
return self._var | python | def var(self):
"""Returns a symbol representing this parameter."""
if self._var is None:
self._var = symbol.var(self.name, shape=self.shape, dtype=self.dtype,
lr_mult=self.lr_mult, wd_mult=self.wd_mult,
init=self.init, stype=self._stype)
return self._var | [
"def",
"var",
"(",
"self",
")",
":",
"if",
"self",
".",
"_var",
"is",
"None",
":",
"self",
".",
"_var",
"=",
"symbol",
".",
"var",
"(",
"self",
".",
"name",
",",
"shape",
"=",
"self",
".",
"shape",
",",
"dtype",
"=",
"self",
".",
"dtype",
",",
"lr_mult",
"=",
"self",
".",
"lr_mult",
",",
"wd_mult",
"=",
"self",
".",
"wd_mult",
",",
"init",
"=",
"self",
".",
"init",
",",
"stype",
"=",
"self",
".",
"_stype",
")",
"return",
"self",
".",
"_var"
] | Returns a symbol representing this parameter. | [
"Returns",
"a",
"symbol",
"representing",
"this",
"parameter",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/gluon/parameter.py#L550-L556 |
24,086 | apache/incubator-mxnet | python/mxnet/gluon/parameter.py | Parameter.cast | def cast(self, dtype):
"""Cast data and gradient of this Parameter to a new data type.
Parameters
----------
dtype : str or numpy.dtype
The new data type.
"""
self.dtype = dtype
if self._data is None:
return
with autograd.pause():
self._data = [i.astype(dtype) for i in self._data]
if self._grad is None:
return
self._grad = [i.astype(dtype) for i in self._grad]
autograd.mark_variables(self._data, self._grad, self.grad_req) | python | def cast(self, dtype):
"""Cast data and gradient of this Parameter to a new data type.
Parameters
----------
dtype : str or numpy.dtype
The new data type.
"""
self.dtype = dtype
if self._data is None:
return
with autograd.pause():
self._data = [i.astype(dtype) for i in self._data]
if self._grad is None:
return
self._grad = [i.astype(dtype) for i in self._grad]
autograd.mark_variables(self._data, self._grad, self.grad_req) | [
"def",
"cast",
"(",
"self",
",",
"dtype",
")",
":",
"self",
".",
"dtype",
"=",
"dtype",
"if",
"self",
".",
"_data",
"is",
"None",
":",
"return",
"with",
"autograd",
".",
"pause",
"(",
")",
":",
"self",
".",
"_data",
"=",
"[",
"i",
".",
"astype",
"(",
"dtype",
")",
"for",
"i",
"in",
"self",
".",
"_data",
"]",
"if",
"self",
".",
"_grad",
"is",
"None",
":",
"return",
"self",
".",
"_grad",
"=",
"[",
"i",
".",
"astype",
"(",
"dtype",
")",
"for",
"i",
"in",
"self",
".",
"_grad",
"]",
"autograd",
".",
"mark_variables",
"(",
"self",
".",
"_data",
",",
"self",
".",
"_grad",
",",
"self",
".",
"grad_req",
")"
] | Cast data and gradient of this Parameter to a new data type.
Parameters
----------
dtype : str or numpy.dtype
The new data type. | [
"Cast",
"data",
"and",
"gradient",
"of",
"this",
"Parameter",
"to",
"a",
"new",
"data",
"type",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/gluon/parameter.py#L558-L574 |
24,087 | apache/incubator-mxnet | python/mxnet/gluon/parameter.py | ParameterDict.update | def update(self, other):
"""Copies all Parameters in ``other`` to self."""
for k, v in other.items():
if k in self._params:
assert self._params[k] is v, \
"Cannot update self with other because they have different " \
"Parameters with the same name '%s'"%k
for k, v in other.items():
self._params[k] = v | python | def update(self, other):
"""Copies all Parameters in ``other`` to self."""
for k, v in other.items():
if k in self._params:
assert self._params[k] is v, \
"Cannot update self with other because they have different " \
"Parameters with the same name '%s'"%k
for k, v in other.items():
self._params[k] = v | [
"def",
"update",
"(",
"self",
",",
"other",
")",
":",
"for",
"k",
",",
"v",
"in",
"other",
".",
"items",
"(",
")",
":",
"if",
"k",
"in",
"self",
".",
"_params",
":",
"assert",
"self",
".",
"_params",
"[",
"k",
"]",
"is",
"v",
",",
"\"Cannot update self with other because they have different \"",
"\"Parameters with the same name '%s'\"",
"%",
"k",
"for",
"k",
",",
"v",
"in",
"other",
".",
"items",
"(",
")",
":",
"self",
".",
"_params",
"[",
"k",
"]",
"=",
"v"
] | Copies all Parameters in ``other`` to self. | [
"Copies",
"all",
"Parameters",
"in",
"other",
"to",
"self",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/gluon/parameter.py#L782-L791 |
24,088 | apache/incubator-mxnet | python/mxnet/gluon/parameter.py | ParameterDict.setattr | def setattr(self, name, value):
"""Set an attribute to a new value for all Parameters.
For example, set grad_req to null if you don't need gradient w.r.t a
model's Parameters::
model.collect_params().setattr('grad_req', 'null')
or change the learning rate multiplier::
model.collect_params().setattr('lr_mult', 0.5)
Parameters
----------
name : str
Name of the attribute.
value : valid type for attribute name
The new value for the attribute.
"""
for i in self.values():
setattr(i, name, value) | python | def setattr(self, name, value):
"""Set an attribute to a new value for all Parameters.
For example, set grad_req to null if you don't need gradient w.r.t a
model's Parameters::
model.collect_params().setattr('grad_req', 'null')
or change the learning rate multiplier::
model.collect_params().setattr('lr_mult', 0.5)
Parameters
----------
name : str
Name of the attribute.
value : valid type for attribute name
The new value for the attribute.
"""
for i in self.values():
setattr(i, name, value) | [
"def",
"setattr",
"(",
"self",
",",
"name",
",",
"value",
")",
":",
"for",
"i",
"in",
"self",
".",
"values",
"(",
")",
":",
"setattr",
"(",
"i",
",",
"name",
",",
"value",
")"
] | Set an attribute to a new value for all Parameters.
For example, set grad_req to null if you don't need gradient w.r.t a
model's Parameters::
model.collect_params().setattr('grad_req', 'null')
or change the learning rate multiplier::
model.collect_params().setattr('lr_mult', 0.5)
Parameters
----------
name : str
Name of the attribute.
value : valid type for attribute name
The new value for the attribute. | [
"Set",
"an",
"attribute",
"to",
"a",
"new",
"value",
"for",
"all",
"Parameters",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/gluon/parameter.py#L832-L852 |
24,089 | apache/incubator-mxnet | python/mxnet/gluon/parameter.py | ParameterDict.save | def save(self, filename, strip_prefix=''):
"""Save parameters to file.
Parameters
----------
filename : str
Path to parameter file.
strip_prefix : str, default ''
Strip prefix from parameter names before saving.
"""
arg_dict = {}
for param in self.values():
weight = param._reduce()
if not param.name.startswith(strip_prefix):
raise ValueError(
"Prefix '%s' is to be striped before saving, but Parameter's "
"name '%s' does not start with '%s'. "
"this may be due to your Block shares parameters from other "
"Blocks or you forgot to use 'with name_scope()' when creating "
"child blocks. For more info on naming, please see "
"http://mxnet.incubator.apache.org/tutorials/basic/naming.html"%(
strip_prefix, param.name, strip_prefix))
arg_dict[param.name[len(strip_prefix):]] = weight
ndarray.save(filename, arg_dict) | python | def save(self, filename, strip_prefix=''):
"""Save parameters to file.
Parameters
----------
filename : str
Path to parameter file.
strip_prefix : str, default ''
Strip prefix from parameter names before saving.
"""
arg_dict = {}
for param in self.values():
weight = param._reduce()
if not param.name.startswith(strip_prefix):
raise ValueError(
"Prefix '%s' is to be striped before saving, but Parameter's "
"name '%s' does not start with '%s'. "
"this may be due to your Block shares parameters from other "
"Blocks or you forgot to use 'with name_scope()' when creating "
"child blocks. For more info on naming, please see "
"http://mxnet.incubator.apache.org/tutorials/basic/naming.html"%(
strip_prefix, param.name, strip_prefix))
arg_dict[param.name[len(strip_prefix):]] = weight
ndarray.save(filename, arg_dict) | [
"def",
"save",
"(",
"self",
",",
"filename",
",",
"strip_prefix",
"=",
"''",
")",
":",
"arg_dict",
"=",
"{",
"}",
"for",
"param",
"in",
"self",
".",
"values",
"(",
")",
":",
"weight",
"=",
"param",
".",
"_reduce",
"(",
")",
"if",
"not",
"param",
".",
"name",
".",
"startswith",
"(",
"strip_prefix",
")",
":",
"raise",
"ValueError",
"(",
"\"Prefix '%s' is to be striped before saving, but Parameter's \"",
"\"name '%s' does not start with '%s'. \"",
"\"this may be due to your Block shares parameters from other \"",
"\"Blocks or you forgot to use 'with name_scope()' when creating \"",
"\"child blocks. For more info on naming, please see \"",
"\"http://mxnet.incubator.apache.org/tutorials/basic/naming.html\"",
"%",
"(",
"strip_prefix",
",",
"param",
".",
"name",
",",
"strip_prefix",
")",
")",
"arg_dict",
"[",
"param",
".",
"name",
"[",
"len",
"(",
"strip_prefix",
")",
":",
"]",
"]",
"=",
"weight",
"ndarray",
".",
"save",
"(",
"filename",
",",
"arg_dict",
")"
] | Save parameters to file.
Parameters
----------
filename : str
Path to parameter file.
strip_prefix : str, default ''
Strip prefix from parameter names before saving. | [
"Save",
"parameters",
"to",
"file",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/gluon/parameter.py#L854-L877 |
24,090 | apache/incubator-mxnet | python/mxnet/torch.py | _init_torch_module | def _init_torch_module():
"""List and add all the torch backed ndarray functions to current module."""
plist = ctypes.POINTER(FunctionHandle)()
size = ctypes.c_uint()
check_call(_LIB.MXListFunctions(ctypes.byref(size),
ctypes.byref(plist)))
module_obj = sys.modules[__name__]
for i in range(size.value):
hdl = FunctionHandle(plist[i])
function = _make_torch_function(hdl)
# if function name starts with underscore, register as static method of NDArray
if function is not None:
setattr(module_obj, function.__name__, function) | python | def _init_torch_module():
"""List and add all the torch backed ndarray functions to current module."""
plist = ctypes.POINTER(FunctionHandle)()
size = ctypes.c_uint()
check_call(_LIB.MXListFunctions(ctypes.byref(size),
ctypes.byref(plist)))
module_obj = sys.modules[__name__]
for i in range(size.value):
hdl = FunctionHandle(plist[i])
function = _make_torch_function(hdl)
# if function name starts with underscore, register as static method of NDArray
if function is not None:
setattr(module_obj, function.__name__, function) | [
"def",
"_init_torch_module",
"(",
")",
":",
"plist",
"=",
"ctypes",
".",
"POINTER",
"(",
"FunctionHandle",
")",
"(",
")",
"size",
"=",
"ctypes",
".",
"c_uint",
"(",
")",
"check_call",
"(",
"_LIB",
".",
"MXListFunctions",
"(",
"ctypes",
".",
"byref",
"(",
"size",
")",
",",
"ctypes",
".",
"byref",
"(",
"plist",
")",
")",
")",
"module_obj",
"=",
"sys",
".",
"modules",
"[",
"__name__",
"]",
"for",
"i",
"in",
"range",
"(",
"size",
".",
"value",
")",
":",
"hdl",
"=",
"FunctionHandle",
"(",
"plist",
"[",
"i",
"]",
")",
"function",
"=",
"_make_torch_function",
"(",
"hdl",
")",
"# if function name starts with underscore, register as static method of NDArray",
"if",
"function",
"is",
"not",
"None",
":",
"setattr",
"(",
"module_obj",
",",
"function",
".",
"__name__",
",",
"function",
")"
] | List and add all the torch backed ndarray functions to current module. | [
"List",
"and",
"add",
"all",
"the",
"torch",
"backed",
"ndarray",
"functions",
"to",
"current",
"module",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/torch.py#L167-L180 |
24,091 | apache/incubator-mxnet | python/mxnet/recordio.py | pack | def pack(header, s):
"""Pack a string into MXImageRecord.
Parameters
----------
header : IRHeader
Header of the image record.
``header.label`` can be a number or an array. See more detail in ``IRHeader``.
s : str
Raw image string to be packed.
Returns
-------
s : str
The packed string.
Examples
--------
>>> label = 4 # label can also be a 1-D array, for example: label = [1,2,3]
>>> id = 2574
>>> header = mx.recordio.IRHeader(0, label, id, 0)
>>> with open(path, 'r') as file:
... s = file.read()
>>> packed_s = mx.recordio.pack(header, s)
"""
header = IRHeader(*header)
if isinstance(header.label, numbers.Number):
header = header._replace(flag=0)
else:
label = np.asarray(header.label, dtype=np.float32)
header = header._replace(flag=label.size, label=0)
s = label.tostring() + s
s = struct.pack(_IR_FORMAT, *header) + s
return s | python | def pack(header, s):
"""Pack a string into MXImageRecord.
Parameters
----------
header : IRHeader
Header of the image record.
``header.label`` can be a number or an array. See more detail in ``IRHeader``.
s : str
Raw image string to be packed.
Returns
-------
s : str
The packed string.
Examples
--------
>>> label = 4 # label can also be a 1-D array, for example: label = [1,2,3]
>>> id = 2574
>>> header = mx.recordio.IRHeader(0, label, id, 0)
>>> with open(path, 'r') as file:
... s = file.read()
>>> packed_s = mx.recordio.pack(header, s)
"""
header = IRHeader(*header)
if isinstance(header.label, numbers.Number):
header = header._replace(flag=0)
else:
label = np.asarray(header.label, dtype=np.float32)
header = header._replace(flag=label.size, label=0)
s = label.tostring() + s
s = struct.pack(_IR_FORMAT, *header) + s
return s | [
"def",
"pack",
"(",
"header",
",",
"s",
")",
":",
"header",
"=",
"IRHeader",
"(",
"*",
"header",
")",
"if",
"isinstance",
"(",
"header",
".",
"label",
",",
"numbers",
".",
"Number",
")",
":",
"header",
"=",
"header",
".",
"_replace",
"(",
"flag",
"=",
"0",
")",
"else",
":",
"label",
"=",
"np",
".",
"asarray",
"(",
"header",
".",
"label",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"header",
"=",
"header",
".",
"_replace",
"(",
"flag",
"=",
"label",
".",
"size",
",",
"label",
"=",
"0",
")",
"s",
"=",
"label",
".",
"tostring",
"(",
")",
"+",
"s",
"s",
"=",
"struct",
".",
"pack",
"(",
"_IR_FORMAT",
",",
"*",
"header",
")",
"+",
"s",
"return",
"s"
] | Pack a string into MXImageRecord.
Parameters
----------
header : IRHeader
Header of the image record.
``header.label`` can be a number or an array. See more detail in ``IRHeader``.
s : str
Raw image string to be packed.
Returns
-------
s : str
The packed string.
Examples
--------
>>> label = 4 # label can also be a 1-D array, for example: label = [1,2,3]
>>> id = 2574
>>> header = mx.recordio.IRHeader(0, label, id, 0)
>>> with open(path, 'r') as file:
... s = file.read()
>>> packed_s = mx.recordio.pack(header, s) | [
"Pack",
"a",
"string",
"into",
"MXImageRecord",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/recordio.py#L358-L391 |
24,092 | apache/incubator-mxnet | python/mxnet/recordio.py | unpack | def unpack(s):
"""Unpack a MXImageRecord to string.
Parameters
----------
s : str
String buffer from ``MXRecordIO.read``.
Returns
-------
header : IRHeader
Header of the image record.
s : str
Unpacked string.
Examples
--------
>>> record = mx.recordio.MXRecordIO('test.rec', 'r')
>>> item = record.read()
>>> header, s = mx.recordio.unpack(item)
>>> header
HEADER(flag=0, label=14.0, id=20129312, id2=0)
"""
header = IRHeader(*struct.unpack(_IR_FORMAT, s[:_IR_SIZE]))
s = s[_IR_SIZE:]
if header.flag > 0:
header = header._replace(label=np.frombuffer(s, np.float32, header.flag))
s = s[header.flag*4:]
return header, s | python | def unpack(s):
"""Unpack a MXImageRecord to string.
Parameters
----------
s : str
String buffer from ``MXRecordIO.read``.
Returns
-------
header : IRHeader
Header of the image record.
s : str
Unpacked string.
Examples
--------
>>> record = mx.recordio.MXRecordIO('test.rec', 'r')
>>> item = record.read()
>>> header, s = mx.recordio.unpack(item)
>>> header
HEADER(flag=0, label=14.0, id=20129312, id2=0)
"""
header = IRHeader(*struct.unpack(_IR_FORMAT, s[:_IR_SIZE]))
s = s[_IR_SIZE:]
if header.flag > 0:
header = header._replace(label=np.frombuffer(s, np.float32, header.flag))
s = s[header.flag*4:]
return header, s | [
"def",
"unpack",
"(",
"s",
")",
":",
"header",
"=",
"IRHeader",
"(",
"*",
"struct",
".",
"unpack",
"(",
"_IR_FORMAT",
",",
"s",
"[",
":",
"_IR_SIZE",
"]",
")",
")",
"s",
"=",
"s",
"[",
"_IR_SIZE",
":",
"]",
"if",
"header",
".",
"flag",
">",
"0",
":",
"header",
"=",
"header",
".",
"_replace",
"(",
"label",
"=",
"np",
".",
"frombuffer",
"(",
"s",
",",
"np",
".",
"float32",
",",
"header",
".",
"flag",
")",
")",
"s",
"=",
"s",
"[",
"header",
".",
"flag",
"*",
"4",
":",
"]",
"return",
"header",
",",
"s"
] | Unpack a MXImageRecord to string.
Parameters
----------
s : str
String buffer from ``MXRecordIO.read``.
Returns
-------
header : IRHeader
Header of the image record.
s : str
Unpacked string.
Examples
--------
>>> record = mx.recordio.MXRecordIO('test.rec', 'r')
>>> item = record.read()
>>> header, s = mx.recordio.unpack(item)
>>> header
HEADER(flag=0, label=14.0, id=20129312, id2=0) | [
"Unpack",
"a",
"MXImageRecord",
"to",
"string",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/recordio.py#L393-L421 |
24,093 | apache/incubator-mxnet | python/mxnet/recordio.py | unpack_img | def unpack_img(s, iscolor=-1):
"""Unpack a MXImageRecord to image.
Parameters
----------
s : str
String buffer from ``MXRecordIO.read``.
iscolor : int
Image format option for ``cv2.imdecode``.
Returns
-------
header : IRHeader
Header of the image record.
img : numpy.ndarray
Unpacked image.
Examples
--------
>>> record = mx.recordio.MXRecordIO('test.rec', 'r')
>>> item = record.read()
>>> header, img = mx.recordio.unpack_img(item)
>>> header
HEADER(flag=0, label=14.0, id=20129312, id2=0)
>>> img
array([[[ 23, 27, 45],
[ 28, 32, 50],
...,
[ 36, 40, 59],
[ 35, 39, 58]],
...,
[[ 91, 92, 113],
[ 97, 98, 119],
...,
[168, 169, 167],
[166, 167, 165]]], dtype=uint8)
"""
header, s = unpack(s)
img = np.frombuffer(s, dtype=np.uint8)
assert cv2 is not None
img = cv2.imdecode(img, iscolor)
return header, img | python | def unpack_img(s, iscolor=-1):
"""Unpack a MXImageRecord to image.
Parameters
----------
s : str
String buffer from ``MXRecordIO.read``.
iscolor : int
Image format option for ``cv2.imdecode``.
Returns
-------
header : IRHeader
Header of the image record.
img : numpy.ndarray
Unpacked image.
Examples
--------
>>> record = mx.recordio.MXRecordIO('test.rec', 'r')
>>> item = record.read()
>>> header, img = mx.recordio.unpack_img(item)
>>> header
HEADER(flag=0, label=14.0, id=20129312, id2=0)
>>> img
array([[[ 23, 27, 45],
[ 28, 32, 50],
...,
[ 36, 40, 59],
[ 35, 39, 58]],
...,
[[ 91, 92, 113],
[ 97, 98, 119],
...,
[168, 169, 167],
[166, 167, 165]]], dtype=uint8)
"""
header, s = unpack(s)
img = np.frombuffer(s, dtype=np.uint8)
assert cv2 is not None
img = cv2.imdecode(img, iscolor)
return header, img | [
"def",
"unpack_img",
"(",
"s",
",",
"iscolor",
"=",
"-",
"1",
")",
":",
"header",
",",
"s",
"=",
"unpack",
"(",
"s",
")",
"img",
"=",
"np",
".",
"frombuffer",
"(",
"s",
",",
"dtype",
"=",
"np",
".",
"uint8",
")",
"assert",
"cv2",
"is",
"not",
"None",
"img",
"=",
"cv2",
".",
"imdecode",
"(",
"img",
",",
"iscolor",
")",
"return",
"header",
",",
"img"
] | Unpack a MXImageRecord to image.
Parameters
----------
s : str
String buffer from ``MXRecordIO.read``.
iscolor : int
Image format option for ``cv2.imdecode``.
Returns
-------
header : IRHeader
Header of the image record.
img : numpy.ndarray
Unpacked image.
Examples
--------
>>> record = mx.recordio.MXRecordIO('test.rec', 'r')
>>> item = record.read()
>>> header, img = mx.recordio.unpack_img(item)
>>> header
HEADER(flag=0, label=14.0, id=20129312, id2=0)
>>> img
array([[[ 23, 27, 45],
[ 28, 32, 50],
...,
[ 36, 40, 59],
[ 35, 39, 58]],
...,
[[ 91, 92, 113],
[ 97, 98, 119],
...,
[168, 169, 167],
[166, 167, 165]]], dtype=uint8) | [
"Unpack",
"a",
"MXImageRecord",
"to",
"image",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/recordio.py#L423-L464 |
24,094 | apache/incubator-mxnet | python/mxnet/recordio.py | pack_img | def pack_img(header, img, quality=95, img_fmt='.jpg'):
"""Pack an image into ``MXImageRecord``.
Parameters
----------
header : IRHeader
Header of the image record.
``header.label`` can be a number or an array. See more detail in ``IRHeader``.
img : numpy.ndarray
Image to be packed.
quality : int
Quality for JPEG encoding in range 1-100, or compression for PNG encoding in range 1-9.
img_fmt : str
Encoding of the image (.jpg for JPEG, .png for PNG).
Returns
-------
s : str
The packed string.
Examples
--------
>>> label = 4 # label can also be a 1-D array, for example: label = [1,2,3]
>>> id = 2574
>>> header = mx.recordio.IRHeader(0, label, id, 0)
>>> img = cv2.imread('test.jpg')
>>> packed_s = mx.recordio.pack_img(header, img)
"""
assert cv2 is not None
jpg_formats = ['.JPG', '.JPEG']
png_formats = ['.PNG']
encode_params = None
if img_fmt.upper() in jpg_formats:
encode_params = [cv2.IMWRITE_JPEG_QUALITY, quality]
elif img_fmt.upper() in png_formats:
encode_params = [cv2.IMWRITE_PNG_COMPRESSION, quality]
ret, buf = cv2.imencode(img_fmt, img, encode_params)
assert ret, 'failed to encode image'
return pack(header, buf.tostring()) | python | def pack_img(header, img, quality=95, img_fmt='.jpg'):
"""Pack an image into ``MXImageRecord``.
Parameters
----------
header : IRHeader
Header of the image record.
``header.label`` can be a number or an array. See more detail in ``IRHeader``.
img : numpy.ndarray
Image to be packed.
quality : int
Quality for JPEG encoding in range 1-100, or compression for PNG encoding in range 1-9.
img_fmt : str
Encoding of the image (.jpg for JPEG, .png for PNG).
Returns
-------
s : str
The packed string.
Examples
--------
>>> label = 4 # label can also be a 1-D array, for example: label = [1,2,3]
>>> id = 2574
>>> header = mx.recordio.IRHeader(0, label, id, 0)
>>> img = cv2.imread('test.jpg')
>>> packed_s = mx.recordio.pack_img(header, img)
"""
assert cv2 is not None
jpg_formats = ['.JPG', '.JPEG']
png_formats = ['.PNG']
encode_params = None
if img_fmt.upper() in jpg_formats:
encode_params = [cv2.IMWRITE_JPEG_QUALITY, quality]
elif img_fmt.upper() in png_formats:
encode_params = [cv2.IMWRITE_PNG_COMPRESSION, quality]
ret, buf = cv2.imencode(img_fmt, img, encode_params)
assert ret, 'failed to encode image'
return pack(header, buf.tostring()) | [
"def",
"pack_img",
"(",
"header",
",",
"img",
",",
"quality",
"=",
"95",
",",
"img_fmt",
"=",
"'.jpg'",
")",
":",
"assert",
"cv2",
"is",
"not",
"None",
"jpg_formats",
"=",
"[",
"'.JPG'",
",",
"'.JPEG'",
"]",
"png_formats",
"=",
"[",
"'.PNG'",
"]",
"encode_params",
"=",
"None",
"if",
"img_fmt",
".",
"upper",
"(",
")",
"in",
"jpg_formats",
":",
"encode_params",
"=",
"[",
"cv2",
".",
"IMWRITE_JPEG_QUALITY",
",",
"quality",
"]",
"elif",
"img_fmt",
".",
"upper",
"(",
")",
"in",
"png_formats",
":",
"encode_params",
"=",
"[",
"cv2",
".",
"IMWRITE_PNG_COMPRESSION",
",",
"quality",
"]",
"ret",
",",
"buf",
"=",
"cv2",
".",
"imencode",
"(",
"img_fmt",
",",
"img",
",",
"encode_params",
")",
"assert",
"ret",
",",
"'failed to encode image'",
"return",
"pack",
"(",
"header",
",",
"buf",
".",
"tostring",
"(",
")",
")"
] | Pack an image into ``MXImageRecord``.
Parameters
----------
header : IRHeader
Header of the image record.
``header.label`` can be a number or an array. See more detail in ``IRHeader``.
img : numpy.ndarray
Image to be packed.
quality : int
Quality for JPEG encoding in range 1-100, or compression for PNG encoding in range 1-9.
img_fmt : str
Encoding of the image (.jpg for JPEG, .png for PNG).
Returns
-------
s : str
The packed string.
Examples
--------
>>> label = 4 # label can also be a 1-D array, for example: label = [1,2,3]
>>> id = 2574
>>> header = mx.recordio.IRHeader(0, label, id, 0)
>>> img = cv2.imread('test.jpg')
>>> packed_s = mx.recordio.pack_img(header, img) | [
"Pack",
"an",
"image",
"into",
"MXImageRecord",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/recordio.py#L466-L505 |
24,095 | apache/incubator-mxnet | python/mxnet/recordio.py | MXRecordIO.open | def open(self):
"""Opens the record file."""
if self.flag == "w":
check_call(_LIB.MXRecordIOWriterCreate(self.uri, ctypes.byref(self.handle)))
self.writable = True
elif self.flag == "r":
check_call(_LIB.MXRecordIOReaderCreate(self.uri, ctypes.byref(self.handle)))
self.writable = False
else:
raise ValueError("Invalid flag %s"%self.flag)
self.pid = current_process().pid
self.is_open = True | python | def open(self):
"""Opens the record file."""
if self.flag == "w":
check_call(_LIB.MXRecordIOWriterCreate(self.uri, ctypes.byref(self.handle)))
self.writable = True
elif self.flag == "r":
check_call(_LIB.MXRecordIOReaderCreate(self.uri, ctypes.byref(self.handle)))
self.writable = False
else:
raise ValueError("Invalid flag %s"%self.flag)
self.pid = current_process().pid
self.is_open = True | [
"def",
"open",
"(",
"self",
")",
":",
"if",
"self",
".",
"flag",
"==",
"\"w\"",
":",
"check_call",
"(",
"_LIB",
".",
"MXRecordIOWriterCreate",
"(",
"self",
".",
"uri",
",",
"ctypes",
".",
"byref",
"(",
"self",
".",
"handle",
")",
")",
")",
"self",
".",
"writable",
"=",
"True",
"elif",
"self",
".",
"flag",
"==",
"\"r\"",
":",
"check_call",
"(",
"_LIB",
".",
"MXRecordIOReaderCreate",
"(",
"self",
".",
"uri",
",",
"ctypes",
".",
"byref",
"(",
"self",
".",
"handle",
")",
")",
")",
"self",
".",
"writable",
"=",
"False",
"else",
":",
"raise",
"ValueError",
"(",
"\"Invalid flag %s\"",
"%",
"self",
".",
"flag",
")",
"self",
".",
"pid",
"=",
"current_process",
"(",
")",
".",
"pid",
"self",
".",
"is_open",
"=",
"True"
] | Opens the record file. | [
"Opens",
"the",
"record",
"file",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/recordio.py#L73-L84 |
24,096 | apache/incubator-mxnet | python/mxnet/recordio.py | MXRecordIO._check_pid | def _check_pid(self, allow_reset=False):
"""Check process id to ensure integrity, reset if in new process."""
if not self.pid == current_process().pid:
if allow_reset:
self.reset()
else:
raise RuntimeError("Forbidden operation in multiple processes") | python | def _check_pid(self, allow_reset=False):
"""Check process id to ensure integrity, reset if in new process."""
if not self.pid == current_process().pid:
if allow_reset:
self.reset()
else:
raise RuntimeError("Forbidden operation in multiple processes") | [
"def",
"_check_pid",
"(",
"self",
",",
"allow_reset",
"=",
"False",
")",
":",
"if",
"not",
"self",
".",
"pid",
"==",
"current_process",
"(",
")",
".",
"pid",
":",
"if",
"allow_reset",
":",
"self",
".",
"reset",
"(",
")",
"else",
":",
"raise",
"RuntimeError",
"(",
"\"Forbidden operation in multiple processes\"",
")"
] | Check process id to ensure integrity, reset if in new process. | [
"Check",
"process",
"id",
"to",
"ensure",
"integrity",
"reset",
"if",
"in",
"new",
"process",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/recordio.py#L115-L121 |
24,097 | apache/incubator-mxnet | python/mxnet/recordio.py | MXRecordIO.write | def write(self, buf):
"""Inserts a string buffer as a record.
Examples
---------
>>> record = mx.recordio.MXRecordIO('tmp.rec', 'w')
>>> for i in range(5):
... record.write('record_%d'%i)
>>> record.close()
Parameters
----------
buf : string (python2), bytes (python3)
Buffer to write.
"""
assert self.writable
self._check_pid(allow_reset=False)
check_call(_LIB.MXRecordIOWriterWriteRecord(self.handle,
ctypes.c_char_p(buf),
ctypes.c_size_t(len(buf)))) | python | def write(self, buf):
"""Inserts a string buffer as a record.
Examples
---------
>>> record = mx.recordio.MXRecordIO('tmp.rec', 'w')
>>> for i in range(5):
... record.write('record_%d'%i)
>>> record.close()
Parameters
----------
buf : string (python2), bytes (python3)
Buffer to write.
"""
assert self.writable
self._check_pid(allow_reset=False)
check_call(_LIB.MXRecordIOWriterWriteRecord(self.handle,
ctypes.c_char_p(buf),
ctypes.c_size_t(len(buf)))) | [
"def",
"write",
"(",
"self",
",",
"buf",
")",
":",
"assert",
"self",
".",
"writable",
"self",
".",
"_check_pid",
"(",
"allow_reset",
"=",
"False",
")",
"check_call",
"(",
"_LIB",
".",
"MXRecordIOWriterWriteRecord",
"(",
"self",
".",
"handle",
",",
"ctypes",
".",
"c_char_p",
"(",
"buf",
")",
",",
"ctypes",
".",
"c_size_t",
"(",
"len",
"(",
"buf",
")",
")",
")",
")"
] | Inserts a string buffer as a record.
Examples
---------
>>> record = mx.recordio.MXRecordIO('tmp.rec', 'w')
>>> for i in range(5):
... record.write('record_%d'%i)
>>> record.close()
Parameters
----------
buf : string (python2), bytes (python3)
Buffer to write. | [
"Inserts",
"a",
"string",
"buffer",
"as",
"a",
"record",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/recordio.py#L155-L174 |
24,098 | apache/incubator-mxnet | python/mxnet/recordio.py | MXRecordIO.read | def read(self):
"""Returns record as a string.
Examples
---------
>>> record = mx.recordio.MXRecordIO('tmp.rec', 'r')
>>> for i in range(5):
... item = record.read()
... print(item)
record_0
record_1
record_2
record_3
record_4
>>> record.close()
Returns
----------
buf : string
Buffer read.
"""
assert not self.writable
# trying to implicitly read from multiple processes is forbidden,
# there's no elegant way to handle unless lock is introduced
self._check_pid(allow_reset=False)
buf = ctypes.c_char_p()
size = ctypes.c_size_t()
check_call(_LIB.MXRecordIOReaderReadRecord(self.handle,
ctypes.byref(buf),
ctypes.byref(size)))
if buf:
buf = ctypes.cast(buf, ctypes.POINTER(ctypes.c_char*size.value))
return buf.contents.raw
else:
return None | python | def read(self):
"""Returns record as a string.
Examples
---------
>>> record = mx.recordio.MXRecordIO('tmp.rec', 'r')
>>> for i in range(5):
... item = record.read()
... print(item)
record_0
record_1
record_2
record_3
record_4
>>> record.close()
Returns
----------
buf : string
Buffer read.
"""
assert not self.writable
# trying to implicitly read from multiple processes is forbidden,
# there's no elegant way to handle unless lock is introduced
self._check_pid(allow_reset=False)
buf = ctypes.c_char_p()
size = ctypes.c_size_t()
check_call(_LIB.MXRecordIOReaderReadRecord(self.handle,
ctypes.byref(buf),
ctypes.byref(size)))
if buf:
buf = ctypes.cast(buf, ctypes.POINTER(ctypes.c_char*size.value))
return buf.contents.raw
else:
return None | [
"def",
"read",
"(",
"self",
")",
":",
"assert",
"not",
"self",
".",
"writable",
"# trying to implicitly read from multiple processes is forbidden,",
"# there's no elegant way to handle unless lock is introduced",
"self",
".",
"_check_pid",
"(",
"allow_reset",
"=",
"False",
")",
"buf",
"=",
"ctypes",
".",
"c_char_p",
"(",
")",
"size",
"=",
"ctypes",
".",
"c_size_t",
"(",
")",
"check_call",
"(",
"_LIB",
".",
"MXRecordIOReaderReadRecord",
"(",
"self",
".",
"handle",
",",
"ctypes",
".",
"byref",
"(",
"buf",
")",
",",
"ctypes",
".",
"byref",
"(",
"size",
")",
")",
")",
"if",
"buf",
":",
"buf",
"=",
"ctypes",
".",
"cast",
"(",
"buf",
",",
"ctypes",
".",
"POINTER",
"(",
"ctypes",
".",
"c_char",
"*",
"size",
".",
"value",
")",
")",
"return",
"buf",
".",
"contents",
".",
"raw",
"else",
":",
"return",
"None"
] | Returns record as a string.
Examples
---------
>>> record = mx.recordio.MXRecordIO('tmp.rec', 'r')
>>> for i in range(5):
... item = record.read()
... print(item)
record_0
record_1
record_2
record_3
record_4
>>> record.close()
Returns
----------
buf : string
Buffer read. | [
"Returns",
"record",
"as",
"a",
"string",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/recordio.py#L176-L210 |
24,099 | apache/incubator-mxnet | python/mxnet/recordio.py | MXIndexedRecordIO.seek | def seek(self, idx):
"""Sets the current read pointer position.
This function is internally called by `read_idx(idx)` to find the current
reader pointer position. It doesn't return anything."""
assert not self.writable
self._check_pid(allow_reset=True)
pos = ctypes.c_size_t(self.idx[idx])
check_call(_LIB.MXRecordIOReaderSeek(self.handle, pos)) | python | def seek(self, idx):
"""Sets the current read pointer position.
This function is internally called by `read_idx(idx)` to find the current
reader pointer position. It doesn't return anything."""
assert not self.writable
self._check_pid(allow_reset=True)
pos = ctypes.c_size_t(self.idx[idx])
check_call(_LIB.MXRecordIOReaderSeek(self.handle, pos)) | [
"def",
"seek",
"(",
"self",
",",
"idx",
")",
":",
"assert",
"not",
"self",
".",
"writable",
"self",
".",
"_check_pid",
"(",
"allow_reset",
"=",
"True",
")",
"pos",
"=",
"ctypes",
".",
"c_size_t",
"(",
"self",
".",
"idx",
"[",
"idx",
"]",
")",
"check_call",
"(",
"_LIB",
".",
"MXRecordIOReaderSeek",
"(",
"self",
".",
"handle",
",",
"pos",
")",
")"
] | Sets the current read pointer position.
This function is internally called by `read_idx(idx)` to find the current
reader pointer position. It doesn't return anything. | [
"Sets",
"the",
"current",
"read",
"pointer",
"position",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/recordio.py#L268-L276 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.