partition stringclasses 3 values | func_name stringlengths 1 134 | docstring stringlengths 1 46.9k | path stringlengths 4 223 | original_string stringlengths 75 104k | code stringlengths 75 104k | docstring_tokens listlengths 1 1.97k | repo stringlengths 7 55 | language stringclasses 1 value | url stringlengths 87 315 | code_tokens listlengths 19 28.4k | sha stringlengths 40 40 |
|---|---|---|---|---|---|---|---|---|---|---|---|
train | DataFrameWriter.options | Adds output options for the underlying data source.
You can set the following option(s) for writing files:
* ``timeZone``: sets the string that indicates a timezone to be used to format
timestamps in the JSON/CSV datasources or partition values.
If it isn't set, it uses the default value, session local timezone. | python/pyspark/sql/readwriter.py | def options(self, **options):
"""Adds output options for the underlying data source.
You can set the following option(s) for writing files:
* ``timeZone``: sets the string that indicates a timezone to be used to format
timestamps in the JSON/CSV datasources or partition values.
If it isn't set, it uses the default value, session local timezone.
"""
for k in options:
self._jwrite = self._jwrite.option(k, to_str(options[k]))
return self | def options(self, **options):
"""Adds output options for the underlying data source.
You can set the following option(s) for writing files:
* ``timeZone``: sets the string that indicates a timezone to be used to format
timestamps in the JSON/CSV datasources or partition values.
If it isn't set, it uses the default value, session local timezone.
"""
for k in options:
self._jwrite = self._jwrite.option(k, to_str(options[k]))
return self | [
"Adds",
"output",
"options",
"for",
"the",
"underlying",
"data",
"source",
"."
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/readwriter.py#L632-L642 | [
"def",
"options",
"(",
"self",
",",
"*",
"*",
"options",
")",
":",
"for",
"k",
"in",
"options",
":",
"self",
".",
"_jwrite",
"=",
"self",
".",
"_jwrite",
".",
"option",
"(",
"k",
",",
"to_str",
"(",
"options",
"[",
"k",
"]",
")",
")",
"return",
"self"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | DataFrameWriter.partitionBy | Partitions the output by the given columns on the file system.
If specified, the output is laid out on the file system similar
to Hive's partitioning scheme.
:param cols: name of columns
>>> df.write.partitionBy('year', 'month').parquet(os.path.join(tempfile.mkdtemp(), 'data')) | python/pyspark/sql/readwriter.py | def partitionBy(self, *cols):
"""Partitions the output by the given columns on the file system.
If specified, the output is laid out on the file system similar
to Hive's partitioning scheme.
:param cols: name of columns
>>> df.write.partitionBy('year', 'month').parquet(os.path.join(tempfile.mkdtemp(), 'data'))
"""
if len(cols) == 1 and isinstance(cols[0], (list, tuple)):
cols = cols[0]
self._jwrite = self._jwrite.partitionBy(_to_seq(self._spark._sc, cols))
return self | def partitionBy(self, *cols):
"""Partitions the output by the given columns on the file system.
If specified, the output is laid out on the file system similar
to Hive's partitioning scheme.
:param cols: name of columns
>>> df.write.partitionBy('year', 'month').parquet(os.path.join(tempfile.mkdtemp(), 'data'))
"""
if len(cols) == 1 and isinstance(cols[0], (list, tuple)):
cols = cols[0]
self._jwrite = self._jwrite.partitionBy(_to_seq(self._spark._sc, cols))
return self | [
"Partitions",
"the",
"output",
"by",
"the",
"given",
"columns",
"on",
"the",
"file",
"system",
"."
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/readwriter.py#L645-L658 | [
"def",
"partitionBy",
"(",
"self",
",",
"*",
"cols",
")",
":",
"if",
"len",
"(",
"cols",
")",
"==",
"1",
"and",
"isinstance",
"(",
"cols",
"[",
"0",
"]",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"cols",
"=",
"cols",
"[",
"0",
"]",
"self",
".",
"_jwrite",
"=",
"self",
".",
"_jwrite",
".",
"partitionBy",
"(",
"_to_seq",
"(",
"self",
".",
"_spark",
".",
"_sc",
",",
"cols",
")",
")",
"return",
"self"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | DataFrameWriter.sortBy | Sorts the output in each bucket by the given columns on the file system.
:param col: a name of a column, or a list of names.
:param cols: additional names (optional). If `col` is a list it should be empty.
>>> (df.write.format('parquet') # doctest: +SKIP
... .bucketBy(100, 'year', 'month')
... .sortBy('day')
... .mode("overwrite")
... .saveAsTable('sorted_bucketed_table')) | python/pyspark/sql/readwriter.py | def sortBy(self, col, *cols):
"""Sorts the output in each bucket by the given columns on the file system.
:param col: a name of a column, or a list of names.
:param cols: additional names (optional). If `col` is a list it should be empty.
>>> (df.write.format('parquet') # doctest: +SKIP
... .bucketBy(100, 'year', 'month')
... .sortBy('day')
... .mode("overwrite")
... .saveAsTable('sorted_bucketed_table'))
"""
if isinstance(col, (list, tuple)):
if cols:
raise ValueError("col is a {0} but cols are not empty".format(type(col)))
col, cols = col[0], col[1:]
if not all(isinstance(c, basestring) for c in cols) or not(isinstance(col, basestring)):
raise TypeError("all names should be `str`")
self._jwrite = self._jwrite.sortBy(col, _to_seq(self._spark._sc, cols))
return self | def sortBy(self, col, *cols):
"""Sorts the output in each bucket by the given columns on the file system.
:param col: a name of a column, or a list of names.
:param cols: additional names (optional). If `col` is a list it should be empty.
>>> (df.write.format('parquet') # doctest: +SKIP
... .bucketBy(100, 'year', 'month')
... .sortBy('day')
... .mode("overwrite")
... .saveAsTable('sorted_bucketed_table'))
"""
if isinstance(col, (list, tuple)):
if cols:
raise ValueError("col is a {0} but cols are not empty".format(type(col)))
col, cols = col[0], col[1:]
if not all(isinstance(c, basestring) for c in cols) or not(isinstance(col, basestring)):
raise TypeError("all names should be `str`")
self._jwrite = self._jwrite.sortBy(col, _to_seq(self._spark._sc, cols))
return self | [
"Sorts",
"the",
"output",
"in",
"each",
"bucket",
"by",
"the",
"given",
"columns",
"on",
"the",
"file",
"system",
"."
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/readwriter.py#L693-L715 | [
"def",
"sortBy",
"(",
"self",
",",
"col",
",",
"*",
"cols",
")",
":",
"if",
"isinstance",
"(",
"col",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"if",
"cols",
":",
"raise",
"ValueError",
"(",
"\"col is a {0} but cols are not empty\"",
".",
"format",
"(",
"type",
"(",
"col",
")",
")",
")",
"col",
",",
"cols",
"=",
"col",
"[",
"0",
"]",
",",
"col",
"[",
"1",
":",
"]",
"if",
"not",
"all",
"(",
"isinstance",
"(",
"c",
",",
"basestring",
")",
"for",
"c",
"in",
"cols",
")",
"or",
"not",
"(",
"isinstance",
"(",
"col",
",",
"basestring",
")",
")",
":",
"raise",
"TypeError",
"(",
"\"all names should be `str`\"",
")",
"self",
".",
"_jwrite",
"=",
"self",
".",
"_jwrite",
".",
"sortBy",
"(",
"col",
",",
"_to_seq",
"(",
"self",
".",
"_spark",
".",
"_sc",
",",
"cols",
")",
")",
"return",
"self"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | DataFrameWriter.save | Saves the contents of the :class:`DataFrame` to a data source.
The data source is specified by the ``format`` and a set of ``options``.
If ``format`` is not specified, the default data source configured by
``spark.sql.sources.default`` will be used.
:param path: the path in a Hadoop supported file system
:param format: the format used to save
:param mode: specifies the behavior of the save operation when data already exists.
* ``append``: Append contents of this :class:`DataFrame` to existing data.
* ``overwrite``: Overwrite existing data.
* ``ignore``: Silently ignore this operation if data already exists.
* ``error`` or ``errorifexists`` (default case): Throw an exception if data already \
exists.
:param partitionBy: names of partitioning columns
:param options: all other string options
>>> df.write.mode('append').parquet(os.path.join(tempfile.mkdtemp(), 'data')) | python/pyspark/sql/readwriter.py | def save(self, path=None, format=None, mode=None, partitionBy=None, **options):
"""Saves the contents of the :class:`DataFrame` to a data source.
The data source is specified by the ``format`` and a set of ``options``.
If ``format`` is not specified, the default data source configured by
``spark.sql.sources.default`` will be used.
:param path: the path in a Hadoop supported file system
:param format: the format used to save
:param mode: specifies the behavior of the save operation when data already exists.
* ``append``: Append contents of this :class:`DataFrame` to existing data.
* ``overwrite``: Overwrite existing data.
* ``ignore``: Silently ignore this operation if data already exists.
* ``error`` or ``errorifexists`` (default case): Throw an exception if data already \
exists.
:param partitionBy: names of partitioning columns
:param options: all other string options
>>> df.write.mode('append').parquet(os.path.join(tempfile.mkdtemp(), 'data'))
"""
self.mode(mode).options(**options)
if partitionBy is not None:
self.partitionBy(partitionBy)
if format is not None:
self.format(format)
if path is None:
self._jwrite.save()
else:
self._jwrite.save(path) | def save(self, path=None, format=None, mode=None, partitionBy=None, **options):
"""Saves the contents of the :class:`DataFrame` to a data source.
The data source is specified by the ``format`` and a set of ``options``.
If ``format`` is not specified, the default data source configured by
``spark.sql.sources.default`` will be used.
:param path: the path in a Hadoop supported file system
:param format: the format used to save
:param mode: specifies the behavior of the save operation when data already exists.
* ``append``: Append contents of this :class:`DataFrame` to existing data.
* ``overwrite``: Overwrite existing data.
* ``ignore``: Silently ignore this operation if data already exists.
* ``error`` or ``errorifexists`` (default case): Throw an exception if data already \
exists.
:param partitionBy: names of partitioning columns
:param options: all other string options
>>> df.write.mode('append').parquet(os.path.join(tempfile.mkdtemp(), 'data'))
"""
self.mode(mode).options(**options)
if partitionBy is not None:
self.partitionBy(partitionBy)
if format is not None:
self.format(format)
if path is None:
self._jwrite.save()
else:
self._jwrite.save(path) | [
"Saves",
"the",
"contents",
"of",
"the",
":",
"class",
":",
"DataFrame",
"to",
"a",
"data",
"source",
"."
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/readwriter.py#L718-L747 | [
"def",
"save",
"(",
"self",
",",
"path",
"=",
"None",
",",
"format",
"=",
"None",
",",
"mode",
"=",
"None",
",",
"partitionBy",
"=",
"None",
",",
"*",
"*",
"options",
")",
":",
"self",
".",
"mode",
"(",
"mode",
")",
".",
"options",
"(",
"*",
"*",
"options",
")",
"if",
"partitionBy",
"is",
"not",
"None",
":",
"self",
".",
"partitionBy",
"(",
"partitionBy",
")",
"if",
"format",
"is",
"not",
"None",
":",
"self",
".",
"format",
"(",
"format",
")",
"if",
"path",
"is",
"None",
":",
"self",
".",
"_jwrite",
".",
"save",
"(",
")",
"else",
":",
"self",
".",
"_jwrite",
".",
"save",
"(",
"path",
")"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | DataFrameWriter.insertInto | Inserts the content of the :class:`DataFrame` to the specified table.
It requires that the schema of the class:`DataFrame` is the same as the
schema of the table.
Optionally overwriting any existing data. | python/pyspark/sql/readwriter.py | def insertInto(self, tableName, overwrite=False):
"""Inserts the content of the :class:`DataFrame` to the specified table.
It requires that the schema of the class:`DataFrame` is the same as the
schema of the table.
Optionally overwriting any existing data.
"""
self._jwrite.mode("overwrite" if overwrite else "append").insertInto(tableName) | def insertInto(self, tableName, overwrite=False):
"""Inserts the content of the :class:`DataFrame` to the specified table.
It requires that the schema of the class:`DataFrame` is the same as the
schema of the table.
Optionally overwriting any existing data.
"""
self._jwrite.mode("overwrite" if overwrite else "append").insertInto(tableName) | [
"Inserts",
"the",
"content",
"of",
"the",
":",
"class",
":",
"DataFrame",
"to",
"the",
"specified",
"table",
"."
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/readwriter.py#L750-L758 | [
"def",
"insertInto",
"(",
"self",
",",
"tableName",
",",
"overwrite",
"=",
"False",
")",
":",
"self",
".",
"_jwrite",
".",
"mode",
"(",
"\"overwrite\"",
"if",
"overwrite",
"else",
"\"append\"",
")",
".",
"insertInto",
"(",
"tableName",
")"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | DataFrameWriter.saveAsTable | Saves the content of the :class:`DataFrame` as the specified table.
In the case the table already exists, behavior of this function depends on the
save mode, specified by the `mode` function (default to throwing an exception).
When `mode` is `Overwrite`, the schema of the :class:`DataFrame` does not need to be
the same as that of the existing table.
* `append`: Append contents of this :class:`DataFrame` to existing data.
* `overwrite`: Overwrite existing data.
* `error` or `errorifexists`: Throw an exception if data already exists.
* `ignore`: Silently ignore this operation if data already exists.
:param name: the table name
:param format: the format used to save
:param mode: one of `append`, `overwrite`, `error`, `errorifexists`, `ignore` \
(default: error)
:param partitionBy: names of partitioning columns
:param options: all other string options | python/pyspark/sql/readwriter.py | def saveAsTable(self, name, format=None, mode=None, partitionBy=None, **options):
"""Saves the content of the :class:`DataFrame` as the specified table.
In the case the table already exists, behavior of this function depends on the
save mode, specified by the `mode` function (default to throwing an exception).
When `mode` is `Overwrite`, the schema of the :class:`DataFrame` does not need to be
the same as that of the existing table.
* `append`: Append contents of this :class:`DataFrame` to existing data.
* `overwrite`: Overwrite existing data.
* `error` or `errorifexists`: Throw an exception if data already exists.
* `ignore`: Silently ignore this operation if data already exists.
:param name: the table name
:param format: the format used to save
:param mode: one of `append`, `overwrite`, `error`, `errorifexists`, `ignore` \
(default: error)
:param partitionBy: names of partitioning columns
:param options: all other string options
"""
self.mode(mode).options(**options)
if partitionBy is not None:
self.partitionBy(partitionBy)
if format is not None:
self.format(format)
self._jwrite.saveAsTable(name) | def saveAsTable(self, name, format=None, mode=None, partitionBy=None, **options):
"""Saves the content of the :class:`DataFrame` as the specified table.
In the case the table already exists, behavior of this function depends on the
save mode, specified by the `mode` function (default to throwing an exception).
When `mode` is `Overwrite`, the schema of the :class:`DataFrame` does not need to be
the same as that of the existing table.
* `append`: Append contents of this :class:`DataFrame` to existing data.
* `overwrite`: Overwrite existing data.
* `error` or `errorifexists`: Throw an exception if data already exists.
* `ignore`: Silently ignore this operation if data already exists.
:param name: the table name
:param format: the format used to save
:param mode: one of `append`, `overwrite`, `error`, `errorifexists`, `ignore` \
(default: error)
:param partitionBy: names of partitioning columns
:param options: all other string options
"""
self.mode(mode).options(**options)
if partitionBy is not None:
self.partitionBy(partitionBy)
if format is not None:
self.format(format)
self._jwrite.saveAsTable(name) | [
"Saves",
"the",
"content",
"of",
"the",
":",
"class",
":",
"DataFrame",
"as",
"the",
"specified",
"table",
"."
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/readwriter.py#L761-L786 | [
"def",
"saveAsTable",
"(",
"self",
",",
"name",
",",
"format",
"=",
"None",
",",
"mode",
"=",
"None",
",",
"partitionBy",
"=",
"None",
",",
"*",
"*",
"options",
")",
":",
"self",
".",
"mode",
"(",
"mode",
")",
".",
"options",
"(",
"*",
"*",
"options",
")",
"if",
"partitionBy",
"is",
"not",
"None",
":",
"self",
".",
"partitionBy",
"(",
"partitionBy",
")",
"if",
"format",
"is",
"not",
"None",
":",
"self",
".",
"format",
"(",
"format",
")",
"self",
".",
"_jwrite",
".",
"saveAsTable",
"(",
"name",
")"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | DataFrameWriter.json | Saves the content of the :class:`DataFrame` in JSON format
(`JSON Lines text format or newline-delimited JSON <http://jsonlines.org/>`_) at the
specified path.
:param path: the path in any Hadoop supported file system
:param mode: specifies the behavior of the save operation when data already exists.
* ``append``: Append contents of this :class:`DataFrame` to existing data.
* ``overwrite``: Overwrite existing data.
* ``ignore``: Silently ignore this operation if data already exists.
* ``error`` or ``errorifexists`` (default case): Throw an exception if data already \
exists.
:param compression: compression codec to use when saving to file. This can be one of the
known case-insensitive shorten names (none, bzip2, gzip, lz4,
snappy and deflate).
:param dateFormat: sets the string that indicates a date format. Custom date formats
follow the formats at ``java.time.format.DateTimeFormatter``. This
applies to date type. If None is set, it uses the
default value, ``yyyy-MM-dd``.
:param timestampFormat: sets the string that indicates a timestamp format.
Custom date formats follow the formats at
``java.time.format.DateTimeFormatter``.
This applies to timestamp type. If None is set, it uses the
default value, ``yyyy-MM-dd'T'HH:mm:ss.SSSXXX``.
:param encoding: specifies encoding (charset) of saved json files. If None is set,
the default UTF-8 charset will be used.
:param lineSep: defines the line separator that should be used for writing. If None is
set, it uses the default value, ``\\n``.
>>> df.write.json(os.path.join(tempfile.mkdtemp(), 'data')) | python/pyspark/sql/readwriter.py | def json(self, path, mode=None, compression=None, dateFormat=None, timestampFormat=None,
lineSep=None, encoding=None):
"""Saves the content of the :class:`DataFrame` in JSON format
(`JSON Lines text format or newline-delimited JSON <http://jsonlines.org/>`_) at the
specified path.
:param path: the path in any Hadoop supported file system
:param mode: specifies the behavior of the save operation when data already exists.
* ``append``: Append contents of this :class:`DataFrame` to existing data.
* ``overwrite``: Overwrite existing data.
* ``ignore``: Silently ignore this operation if data already exists.
* ``error`` or ``errorifexists`` (default case): Throw an exception if data already \
exists.
:param compression: compression codec to use when saving to file. This can be one of the
known case-insensitive shorten names (none, bzip2, gzip, lz4,
snappy and deflate).
:param dateFormat: sets the string that indicates a date format. Custom date formats
follow the formats at ``java.time.format.DateTimeFormatter``. This
applies to date type. If None is set, it uses the
default value, ``yyyy-MM-dd``.
:param timestampFormat: sets the string that indicates a timestamp format.
Custom date formats follow the formats at
``java.time.format.DateTimeFormatter``.
This applies to timestamp type. If None is set, it uses the
default value, ``yyyy-MM-dd'T'HH:mm:ss.SSSXXX``.
:param encoding: specifies encoding (charset) of saved json files. If None is set,
the default UTF-8 charset will be used.
:param lineSep: defines the line separator that should be used for writing. If None is
set, it uses the default value, ``\\n``.
>>> df.write.json(os.path.join(tempfile.mkdtemp(), 'data'))
"""
self.mode(mode)
self._set_opts(
compression=compression, dateFormat=dateFormat, timestampFormat=timestampFormat,
lineSep=lineSep, encoding=encoding)
self._jwrite.json(path) | def json(self, path, mode=None, compression=None, dateFormat=None, timestampFormat=None,
lineSep=None, encoding=None):
"""Saves the content of the :class:`DataFrame` in JSON format
(`JSON Lines text format or newline-delimited JSON <http://jsonlines.org/>`_) at the
specified path.
:param path: the path in any Hadoop supported file system
:param mode: specifies the behavior of the save operation when data already exists.
* ``append``: Append contents of this :class:`DataFrame` to existing data.
* ``overwrite``: Overwrite existing data.
* ``ignore``: Silently ignore this operation if data already exists.
* ``error`` or ``errorifexists`` (default case): Throw an exception if data already \
exists.
:param compression: compression codec to use when saving to file. This can be one of the
known case-insensitive shorten names (none, bzip2, gzip, lz4,
snappy and deflate).
:param dateFormat: sets the string that indicates a date format. Custom date formats
follow the formats at ``java.time.format.DateTimeFormatter``. This
applies to date type. If None is set, it uses the
default value, ``yyyy-MM-dd``.
:param timestampFormat: sets the string that indicates a timestamp format.
Custom date formats follow the formats at
``java.time.format.DateTimeFormatter``.
This applies to timestamp type. If None is set, it uses the
default value, ``yyyy-MM-dd'T'HH:mm:ss.SSSXXX``.
:param encoding: specifies encoding (charset) of saved json files. If None is set,
the default UTF-8 charset will be used.
:param lineSep: defines the line separator that should be used for writing. If None is
set, it uses the default value, ``\\n``.
>>> df.write.json(os.path.join(tempfile.mkdtemp(), 'data'))
"""
self.mode(mode)
self._set_opts(
compression=compression, dateFormat=dateFormat, timestampFormat=timestampFormat,
lineSep=lineSep, encoding=encoding)
self._jwrite.json(path) | [
"Saves",
"the",
"content",
"of",
"the",
":",
"class",
":",
"DataFrame",
"in",
"JSON",
"format",
"(",
"JSON",
"Lines",
"text",
"format",
"or",
"newline",
"-",
"delimited",
"JSON",
"<http",
":",
"//",
"jsonlines",
".",
"org",
"/",
">",
"_",
")",
"at",
"the",
"specified",
"path",
"."
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/readwriter.py#L789-L826 | [
"def",
"json",
"(",
"self",
",",
"path",
",",
"mode",
"=",
"None",
",",
"compression",
"=",
"None",
",",
"dateFormat",
"=",
"None",
",",
"timestampFormat",
"=",
"None",
",",
"lineSep",
"=",
"None",
",",
"encoding",
"=",
"None",
")",
":",
"self",
".",
"mode",
"(",
"mode",
")",
"self",
".",
"_set_opts",
"(",
"compression",
"=",
"compression",
",",
"dateFormat",
"=",
"dateFormat",
",",
"timestampFormat",
"=",
"timestampFormat",
",",
"lineSep",
"=",
"lineSep",
",",
"encoding",
"=",
"encoding",
")",
"self",
".",
"_jwrite",
".",
"json",
"(",
"path",
")"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | DataFrameWriter.parquet | Saves the content of the :class:`DataFrame` in Parquet format at the specified path.
:param path: the path in any Hadoop supported file system
:param mode: specifies the behavior of the save operation when data already exists.
* ``append``: Append contents of this :class:`DataFrame` to existing data.
* ``overwrite``: Overwrite existing data.
* ``ignore``: Silently ignore this operation if data already exists.
* ``error`` or ``errorifexists`` (default case): Throw an exception if data already \
exists.
:param partitionBy: names of partitioning columns
:param compression: compression codec to use when saving to file. This can be one of the
known case-insensitive shorten names (none, uncompressed, snappy, gzip,
lzo, brotli, lz4, and zstd). This will override
``spark.sql.parquet.compression.codec``. If None is set, it uses the
value specified in ``spark.sql.parquet.compression.codec``.
>>> df.write.parquet(os.path.join(tempfile.mkdtemp(), 'data')) | python/pyspark/sql/readwriter.py | def parquet(self, path, mode=None, partitionBy=None, compression=None):
"""Saves the content of the :class:`DataFrame` in Parquet format at the specified path.
:param path: the path in any Hadoop supported file system
:param mode: specifies the behavior of the save operation when data already exists.
* ``append``: Append contents of this :class:`DataFrame` to existing data.
* ``overwrite``: Overwrite existing data.
* ``ignore``: Silently ignore this operation if data already exists.
* ``error`` or ``errorifexists`` (default case): Throw an exception if data already \
exists.
:param partitionBy: names of partitioning columns
:param compression: compression codec to use when saving to file. This can be one of the
known case-insensitive shorten names (none, uncompressed, snappy, gzip,
lzo, brotli, lz4, and zstd). This will override
``spark.sql.parquet.compression.codec``. If None is set, it uses the
value specified in ``spark.sql.parquet.compression.codec``.
>>> df.write.parquet(os.path.join(tempfile.mkdtemp(), 'data'))
"""
self.mode(mode)
if partitionBy is not None:
self.partitionBy(partitionBy)
self._set_opts(compression=compression)
self._jwrite.parquet(path) | def parquet(self, path, mode=None, partitionBy=None, compression=None):
"""Saves the content of the :class:`DataFrame` in Parquet format at the specified path.
:param path: the path in any Hadoop supported file system
:param mode: specifies the behavior of the save operation when data already exists.
* ``append``: Append contents of this :class:`DataFrame` to existing data.
* ``overwrite``: Overwrite existing data.
* ``ignore``: Silently ignore this operation if data already exists.
* ``error`` or ``errorifexists`` (default case): Throw an exception if data already \
exists.
:param partitionBy: names of partitioning columns
:param compression: compression codec to use when saving to file. This can be one of the
known case-insensitive shorten names (none, uncompressed, snappy, gzip,
lzo, brotli, lz4, and zstd). This will override
``spark.sql.parquet.compression.codec``. If None is set, it uses the
value specified in ``spark.sql.parquet.compression.codec``.
>>> df.write.parquet(os.path.join(tempfile.mkdtemp(), 'data'))
"""
self.mode(mode)
if partitionBy is not None:
self.partitionBy(partitionBy)
self._set_opts(compression=compression)
self._jwrite.parquet(path) | [
"Saves",
"the",
"content",
"of",
"the",
":",
"class",
":",
"DataFrame",
"in",
"Parquet",
"format",
"at",
"the",
"specified",
"path",
"."
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/readwriter.py#L829-L853 | [
"def",
"parquet",
"(",
"self",
",",
"path",
",",
"mode",
"=",
"None",
",",
"partitionBy",
"=",
"None",
",",
"compression",
"=",
"None",
")",
":",
"self",
".",
"mode",
"(",
"mode",
")",
"if",
"partitionBy",
"is",
"not",
"None",
":",
"self",
".",
"partitionBy",
"(",
"partitionBy",
")",
"self",
".",
"_set_opts",
"(",
"compression",
"=",
"compression",
")",
"self",
".",
"_jwrite",
".",
"parquet",
"(",
"path",
")"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | DataFrameWriter.text | Saves the content of the DataFrame in a text file at the specified path.
The text files will be encoded as UTF-8.
:param path: the path in any Hadoop supported file system
:param compression: compression codec to use when saving to file. This can be one of the
known case-insensitive shorten names (none, bzip2, gzip, lz4,
snappy and deflate).
:param lineSep: defines the line separator that should be used for writing. If None is
set, it uses the default value, ``\\n``.
The DataFrame must have only one column that is of string type.
Each row becomes a new line in the output file. | python/pyspark/sql/readwriter.py | def text(self, path, compression=None, lineSep=None):
"""Saves the content of the DataFrame in a text file at the specified path.
The text files will be encoded as UTF-8.
:param path: the path in any Hadoop supported file system
:param compression: compression codec to use when saving to file. This can be one of the
known case-insensitive shorten names (none, bzip2, gzip, lz4,
snappy and deflate).
:param lineSep: defines the line separator that should be used for writing. If None is
set, it uses the default value, ``\\n``.
The DataFrame must have only one column that is of string type.
Each row becomes a new line in the output file.
"""
self._set_opts(compression=compression, lineSep=lineSep)
self._jwrite.text(path) | def text(self, path, compression=None, lineSep=None):
"""Saves the content of the DataFrame in a text file at the specified path.
The text files will be encoded as UTF-8.
:param path: the path in any Hadoop supported file system
:param compression: compression codec to use when saving to file. This can be one of the
known case-insensitive shorten names (none, bzip2, gzip, lz4,
snappy and deflate).
:param lineSep: defines the line separator that should be used for writing. If None is
set, it uses the default value, ``\\n``.
The DataFrame must have only one column that is of string type.
Each row becomes a new line in the output file.
"""
self._set_opts(compression=compression, lineSep=lineSep)
self._jwrite.text(path) | [
"Saves",
"the",
"content",
"of",
"the",
"DataFrame",
"in",
"a",
"text",
"file",
"at",
"the",
"specified",
"path",
".",
"The",
"text",
"files",
"will",
"be",
"encoded",
"as",
"UTF",
"-",
"8",
"."
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/readwriter.py#L856-L871 | [
"def",
"text",
"(",
"self",
",",
"path",
",",
"compression",
"=",
"None",
",",
"lineSep",
"=",
"None",
")",
":",
"self",
".",
"_set_opts",
"(",
"compression",
"=",
"compression",
",",
"lineSep",
"=",
"lineSep",
")",
"self",
".",
"_jwrite",
".",
"text",
"(",
"path",
")"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | DataFrameWriter.csv | r"""Saves the content of the :class:`DataFrame` in CSV format at the specified path.
:param path: the path in any Hadoop supported file system
:param mode: specifies the behavior of the save operation when data already exists.
* ``append``: Append contents of this :class:`DataFrame` to existing data.
* ``overwrite``: Overwrite existing data.
* ``ignore``: Silently ignore this operation if data already exists.
* ``error`` or ``errorifexists`` (default case): Throw an exception if data already \
exists.
:param compression: compression codec to use when saving to file. This can be one of the
known case-insensitive shorten names (none, bzip2, gzip, lz4,
snappy and deflate).
:param sep: sets a single character as a separator for each field and value. If None is
set, it uses the default value, ``,``.
:param quote: sets a single character used for escaping quoted values where the
separator can be part of the value. If None is set, it uses the default
value, ``"``. If an empty string is set, it uses ``u0000`` (null character).
:param escape: sets a single character used for escaping quotes inside an already
quoted value. If None is set, it uses the default value, ``\``
:param escapeQuotes: a flag indicating whether values containing quotes should always
be enclosed in quotes. If None is set, it uses the default value
``true``, escaping all values containing a quote character.
:param quoteAll: a flag indicating whether all values should always be enclosed in
quotes. If None is set, it uses the default value ``false``,
only escaping values containing a quote character.
:param header: writes the names of columns as the first line. If None is set, it uses
the default value, ``false``.
:param nullValue: sets the string representation of a null value. If None is set, it uses
the default value, empty string.
:param dateFormat: sets the string that indicates a date format. Custom date formats
follow the formats at ``java.time.format.DateTimeFormatter``. This
applies to date type. If None is set, it uses the
default value, ``yyyy-MM-dd``.
:param timestampFormat: sets the string that indicates a timestamp format.
Custom date formats follow the formats at
``java.time.format.DateTimeFormatter``.
This applies to timestamp type. If None is set, it uses the
default value, ``yyyy-MM-dd'T'HH:mm:ss.SSSXXX``.
:param ignoreLeadingWhiteSpace: a flag indicating whether or not leading whitespaces from
values being written should be skipped. If None is set, it
uses the default value, ``true``.
:param ignoreTrailingWhiteSpace: a flag indicating whether or not trailing whitespaces from
values being written should be skipped. If None is set, it
uses the default value, ``true``.
:param charToEscapeQuoteEscaping: sets a single character used for escaping the escape for
the quote character. If None is set, the default value is
escape character when escape and quote characters are
different, ``\0`` otherwise..
:param encoding: sets the encoding (charset) of saved csv files. If None is set,
the default UTF-8 charset will be used.
:param emptyValue: sets the string representation of an empty value. If None is set, it uses
the default value, ``""``.
:param lineSep: defines the line separator that should be used for writing. If None is
set, it uses the default value, ``\\n``. Maximum length is 1 character.
>>> df.write.csv(os.path.join(tempfile.mkdtemp(), 'data')) | python/pyspark/sql/readwriter.py | def csv(self, path, mode=None, compression=None, sep=None, quote=None, escape=None,
header=None, nullValue=None, escapeQuotes=None, quoteAll=None, dateFormat=None,
timestampFormat=None, ignoreLeadingWhiteSpace=None, ignoreTrailingWhiteSpace=None,
charToEscapeQuoteEscaping=None, encoding=None, emptyValue=None, lineSep=None):
r"""Saves the content of the :class:`DataFrame` in CSV format at the specified path.
:param path: the path in any Hadoop supported file system
:param mode: specifies the behavior of the save operation when data already exists.
* ``append``: Append contents of this :class:`DataFrame` to existing data.
* ``overwrite``: Overwrite existing data.
* ``ignore``: Silently ignore this operation if data already exists.
* ``error`` or ``errorifexists`` (default case): Throw an exception if data already \
exists.
:param compression: compression codec to use when saving to file. This can be one of the
known case-insensitive shorten names (none, bzip2, gzip, lz4,
snappy and deflate).
:param sep: sets a single character as a separator for each field and value. If None is
set, it uses the default value, ``,``.
:param quote: sets a single character used for escaping quoted values where the
separator can be part of the value. If None is set, it uses the default
value, ``"``. If an empty string is set, it uses ``u0000`` (null character).
:param escape: sets a single character used for escaping quotes inside an already
quoted value. If None is set, it uses the default value, ``\``
:param escapeQuotes: a flag indicating whether values containing quotes should always
be enclosed in quotes. If None is set, it uses the default value
``true``, escaping all values containing a quote character.
:param quoteAll: a flag indicating whether all values should always be enclosed in
quotes. If None is set, it uses the default value ``false``,
only escaping values containing a quote character.
:param header: writes the names of columns as the first line. If None is set, it uses
the default value, ``false``.
:param nullValue: sets the string representation of a null value. If None is set, it uses
the default value, empty string.
:param dateFormat: sets the string that indicates a date format. Custom date formats
follow the formats at ``java.time.format.DateTimeFormatter``. This
applies to date type. If None is set, it uses the
default value, ``yyyy-MM-dd``.
:param timestampFormat: sets the string that indicates a timestamp format.
Custom date formats follow the formats at
``java.time.format.DateTimeFormatter``.
This applies to timestamp type. If None is set, it uses the
default value, ``yyyy-MM-dd'T'HH:mm:ss.SSSXXX``.
:param ignoreLeadingWhiteSpace: a flag indicating whether or not leading whitespaces from
values being written should be skipped. If None is set, it
uses the default value, ``true``.
:param ignoreTrailingWhiteSpace: a flag indicating whether or not trailing whitespaces from
values being written should be skipped. If None is set, it
uses the default value, ``true``.
:param charToEscapeQuoteEscaping: sets a single character used for escaping the escape for
the quote character. If None is set, the default value is
escape character when escape and quote characters are
different, ``\0`` otherwise..
:param encoding: sets the encoding (charset) of saved csv files. If None is set,
the default UTF-8 charset will be used.
:param emptyValue: sets the string representation of an empty value. If None is set, it uses
the default value, ``""``.
:param lineSep: defines the line separator that should be used for writing. If None is
set, it uses the default value, ``\\n``. Maximum length is 1 character.
>>> df.write.csv(os.path.join(tempfile.mkdtemp(), 'data'))
"""
self.mode(mode)
self._set_opts(compression=compression, sep=sep, quote=quote, escape=escape, header=header,
nullValue=nullValue, escapeQuotes=escapeQuotes, quoteAll=quoteAll,
dateFormat=dateFormat, timestampFormat=timestampFormat,
ignoreLeadingWhiteSpace=ignoreLeadingWhiteSpace,
ignoreTrailingWhiteSpace=ignoreTrailingWhiteSpace,
charToEscapeQuoteEscaping=charToEscapeQuoteEscaping,
encoding=encoding, emptyValue=emptyValue, lineSep=lineSep)
self._jwrite.csv(path) | def csv(self, path, mode=None, compression=None, sep=None, quote=None, escape=None,
header=None, nullValue=None, escapeQuotes=None, quoteAll=None, dateFormat=None,
timestampFormat=None, ignoreLeadingWhiteSpace=None, ignoreTrailingWhiteSpace=None,
charToEscapeQuoteEscaping=None, encoding=None, emptyValue=None, lineSep=None):
r"""Saves the content of the :class:`DataFrame` in CSV format at the specified path.
:param path: the path in any Hadoop supported file system
:param mode: specifies the behavior of the save operation when data already exists.
* ``append``: Append contents of this :class:`DataFrame` to existing data.
* ``overwrite``: Overwrite existing data.
* ``ignore``: Silently ignore this operation if data already exists.
* ``error`` or ``errorifexists`` (default case): Throw an exception if data already \
exists.
:param compression: compression codec to use when saving to file. This can be one of the
known case-insensitive shorten names (none, bzip2, gzip, lz4,
snappy and deflate).
:param sep: sets a single character as a separator for each field and value. If None is
set, it uses the default value, ``,``.
:param quote: sets a single character used for escaping quoted values where the
separator can be part of the value. If None is set, it uses the default
value, ``"``. If an empty string is set, it uses ``u0000`` (null character).
:param escape: sets a single character used for escaping quotes inside an already
quoted value. If None is set, it uses the default value, ``\``
:param escapeQuotes: a flag indicating whether values containing quotes should always
be enclosed in quotes. If None is set, it uses the default value
``true``, escaping all values containing a quote character.
:param quoteAll: a flag indicating whether all values should always be enclosed in
quotes. If None is set, it uses the default value ``false``,
only escaping values containing a quote character.
:param header: writes the names of columns as the first line. If None is set, it uses
the default value, ``false``.
:param nullValue: sets the string representation of a null value. If None is set, it uses
the default value, empty string.
:param dateFormat: sets the string that indicates a date format. Custom date formats
follow the formats at ``java.time.format.DateTimeFormatter``. This
applies to date type. If None is set, it uses the
default value, ``yyyy-MM-dd``.
:param timestampFormat: sets the string that indicates a timestamp format.
Custom date formats follow the formats at
``java.time.format.DateTimeFormatter``.
This applies to timestamp type. If None is set, it uses the
default value, ``yyyy-MM-dd'T'HH:mm:ss.SSSXXX``.
:param ignoreLeadingWhiteSpace: a flag indicating whether or not leading whitespaces from
values being written should be skipped. If None is set, it
uses the default value, ``true``.
:param ignoreTrailingWhiteSpace: a flag indicating whether or not trailing whitespaces from
values being written should be skipped. If None is set, it
uses the default value, ``true``.
:param charToEscapeQuoteEscaping: sets a single character used for escaping the escape for
the quote character. If None is set, the default value is
escape character when escape and quote characters are
different, ``\0`` otherwise..
:param encoding: sets the encoding (charset) of saved csv files. If None is set,
the default UTF-8 charset will be used.
:param emptyValue: sets the string representation of an empty value. If None is set, it uses
the default value, ``""``.
:param lineSep: defines the line separator that should be used for writing. If None is
set, it uses the default value, ``\\n``. Maximum length is 1 character.
>>> df.write.csv(os.path.join(tempfile.mkdtemp(), 'data'))
"""
self.mode(mode)
self._set_opts(compression=compression, sep=sep, quote=quote, escape=escape, header=header,
nullValue=nullValue, escapeQuotes=escapeQuotes, quoteAll=quoteAll,
dateFormat=dateFormat, timestampFormat=timestampFormat,
ignoreLeadingWhiteSpace=ignoreLeadingWhiteSpace,
ignoreTrailingWhiteSpace=ignoreTrailingWhiteSpace,
charToEscapeQuoteEscaping=charToEscapeQuoteEscaping,
encoding=encoding, emptyValue=emptyValue, lineSep=lineSep)
self._jwrite.csv(path) | [
"r",
"Saves",
"the",
"content",
"of",
"the",
":",
"class",
":",
"DataFrame",
"in",
"CSV",
"format",
"at",
"the",
"specified",
"path",
"."
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/readwriter.py#L874-L945 | [
"def",
"csv",
"(",
"self",
",",
"path",
",",
"mode",
"=",
"None",
",",
"compression",
"=",
"None",
",",
"sep",
"=",
"None",
",",
"quote",
"=",
"None",
",",
"escape",
"=",
"None",
",",
"header",
"=",
"None",
",",
"nullValue",
"=",
"None",
",",
"escapeQuotes",
"=",
"None",
",",
"quoteAll",
"=",
"None",
",",
"dateFormat",
"=",
"None",
",",
"timestampFormat",
"=",
"None",
",",
"ignoreLeadingWhiteSpace",
"=",
"None",
",",
"ignoreTrailingWhiteSpace",
"=",
"None",
",",
"charToEscapeQuoteEscaping",
"=",
"None",
",",
"encoding",
"=",
"None",
",",
"emptyValue",
"=",
"None",
",",
"lineSep",
"=",
"None",
")",
":",
"self",
".",
"mode",
"(",
"mode",
")",
"self",
".",
"_set_opts",
"(",
"compression",
"=",
"compression",
",",
"sep",
"=",
"sep",
",",
"quote",
"=",
"quote",
",",
"escape",
"=",
"escape",
",",
"header",
"=",
"header",
",",
"nullValue",
"=",
"nullValue",
",",
"escapeQuotes",
"=",
"escapeQuotes",
",",
"quoteAll",
"=",
"quoteAll",
",",
"dateFormat",
"=",
"dateFormat",
",",
"timestampFormat",
"=",
"timestampFormat",
",",
"ignoreLeadingWhiteSpace",
"=",
"ignoreLeadingWhiteSpace",
",",
"ignoreTrailingWhiteSpace",
"=",
"ignoreTrailingWhiteSpace",
",",
"charToEscapeQuoteEscaping",
"=",
"charToEscapeQuoteEscaping",
",",
"encoding",
"=",
"encoding",
",",
"emptyValue",
"=",
"emptyValue",
",",
"lineSep",
"=",
"lineSep",
")",
"self",
".",
"_jwrite",
".",
"csv",
"(",
"path",
")"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | DataFrameWriter.orc | Saves the content of the :class:`DataFrame` in ORC format at the specified path.
:param path: the path in any Hadoop supported file system
:param mode: specifies the behavior of the save operation when data already exists.
* ``append``: Append contents of this :class:`DataFrame` to existing data.
* ``overwrite``: Overwrite existing data.
* ``ignore``: Silently ignore this operation if data already exists.
* ``error`` or ``errorifexists`` (default case): Throw an exception if data already \
exists.
:param partitionBy: names of partitioning columns
:param compression: compression codec to use when saving to file. This can be one of the
known case-insensitive shorten names (none, snappy, zlib, and lzo).
This will override ``orc.compress`` and
``spark.sql.orc.compression.codec``. If None is set, it uses the value
specified in ``spark.sql.orc.compression.codec``.
>>> orc_df = spark.read.orc('python/test_support/sql/orc_partitioned')
>>> orc_df.write.orc(os.path.join(tempfile.mkdtemp(), 'data')) | python/pyspark/sql/readwriter.py | def orc(self, path, mode=None, partitionBy=None, compression=None):
"""Saves the content of the :class:`DataFrame` in ORC format at the specified path.
:param path: the path in any Hadoop supported file system
:param mode: specifies the behavior of the save operation when data already exists.
* ``append``: Append contents of this :class:`DataFrame` to existing data.
* ``overwrite``: Overwrite existing data.
* ``ignore``: Silently ignore this operation if data already exists.
* ``error`` or ``errorifexists`` (default case): Throw an exception if data already \
exists.
:param partitionBy: names of partitioning columns
:param compression: compression codec to use when saving to file. This can be one of the
known case-insensitive shorten names (none, snappy, zlib, and lzo).
This will override ``orc.compress`` and
``spark.sql.orc.compression.codec``. If None is set, it uses the value
specified in ``spark.sql.orc.compression.codec``.
>>> orc_df = spark.read.orc('python/test_support/sql/orc_partitioned')
>>> orc_df.write.orc(os.path.join(tempfile.mkdtemp(), 'data'))
"""
self.mode(mode)
if partitionBy is not None:
self.partitionBy(partitionBy)
self._set_opts(compression=compression)
self._jwrite.orc(path) | def orc(self, path, mode=None, partitionBy=None, compression=None):
"""Saves the content of the :class:`DataFrame` in ORC format at the specified path.
:param path: the path in any Hadoop supported file system
:param mode: specifies the behavior of the save operation when data already exists.
* ``append``: Append contents of this :class:`DataFrame` to existing data.
* ``overwrite``: Overwrite existing data.
* ``ignore``: Silently ignore this operation if data already exists.
* ``error`` or ``errorifexists`` (default case): Throw an exception if data already \
exists.
:param partitionBy: names of partitioning columns
:param compression: compression codec to use when saving to file. This can be one of the
known case-insensitive shorten names (none, snappy, zlib, and lzo).
This will override ``orc.compress`` and
``spark.sql.orc.compression.codec``. If None is set, it uses the value
specified in ``spark.sql.orc.compression.codec``.
>>> orc_df = spark.read.orc('python/test_support/sql/orc_partitioned')
>>> orc_df.write.orc(os.path.join(tempfile.mkdtemp(), 'data'))
"""
self.mode(mode)
if partitionBy is not None:
self.partitionBy(partitionBy)
self._set_opts(compression=compression)
self._jwrite.orc(path) | [
"Saves",
"the",
"content",
"of",
"the",
":",
"class",
":",
"DataFrame",
"in",
"ORC",
"format",
"at",
"the",
"specified",
"path",
"."
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/readwriter.py#L948-L973 | [
"def",
"orc",
"(",
"self",
",",
"path",
",",
"mode",
"=",
"None",
",",
"partitionBy",
"=",
"None",
",",
"compression",
"=",
"None",
")",
":",
"self",
".",
"mode",
"(",
"mode",
")",
"if",
"partitionBy",
"is",
"not",
"None",
":",
"self",
".",
"partitionBy",
"(",
"partitionBy",
")",
"self",
".",
"_set_opts",
"(",
"compression",
"=",
"compression",
")",
"self",
".",
"_jwrite",
".",
"orc",
"(",
"path",
")"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | DataFrameWriter.jdbc | Saves the content of the :class:`DataFrame` to an external database table via JDBC.
.. note:: Don't create too many partitions in parallel on a large cluster;
otherwise Spark might crash your external database systems.
:param url: a JDBC URL of the form ``jdbc:subprotocol:subname``
:param table: Name of the table in the external database.
:param mode: specifies the behavior of the save operation when data already exists.
* ``append``: Append contents of this :class:`DataFrame` to existing data.
* ``overwrite``: Overwrite existing data.
* ``ignore``: Silently ignore this operation if data already exists.
* ``error`` or ``errorifexists`` (default case): Throw an exception if data already \
exists.
:param properties: a dictionary of JDBC database connection arguments. Normally at
least properties "user" and "password" with their corresponding values.
For example { 'user' : 'SYSTEM', 'password' : 'mypassword' } | python/pyspark/sql/readwriter.py | def jdbc(self, url, table, mode=None, properties=None):
"""Saves the content of the :class:`DataFrame` to an external database table via JDBC.
.. note:: Don't create too many partitions in parallel on a large cluster;
otherwise Spark might crash your external database systems.
:param url: a JDBC URL of the form ``jdbc:subprotocol:subname``
:param table: Name of the table in the external database.
:param mode: specifies the behavior of the save operation when data already exists.
* ``append``: Append contents of this :class:`DataFrame` to existing data.
* ``overwrite``: Overwrite existing data.
* ``ignore``: Silently ignore this operation if data already exists.
* ``error`` or ``errorifexists`` (default case): Throw an exception if data already \
exists.
:param properties: a dictionary of JDBC database connection arguments. Normally at
least properties "user" and "password" with their corresponding values.
For example { 'user' : 'SYSTEM', 'password' : 'mypassword' }
"""
if properties is None:
properties = dict()
jprop = JavaClass("java.util.Properties", self._spark._sc._gateway._gateway_client)()
for k in properties:
jprop.setProperty(k, properties[k])
self.mode(mode)._jwrite.jdbc(url, table, jprop) | def jdbc(self, url, table, mode=None, properties=None):
"""Saves the content of the :class:`DataFrame` to an external database table via JDBC.
.. note:: Don't create too many partitions in parallel on a large cluster;
otherwise Spark might crash your external database systems.
:param url: a JDBC URL of the form ``jdbc:subprotocol:subname``
:param table: Name of the table in the external database.
:param mode: specifies the behavior of the save operation when data already exists.
* ``append``: Append contents of this :class:`DataFrame` to existing data.
* ``overwrite``: Overwrite existing data.
* ``ignore``: Silently ignore this operation if data already exists.
* ``error`` or ``errorifexists`` (default case): Throw an exception if data already \
exists.
:param properties: a dictionary of JDBC database connection arguments. Normally at
least properties "user" and "password" with their corresponding values.
For example { 'user' : 'SYSTEM', 'password' : 'mypassword' }
"""
if properties is None:
properties = dict()
jprop = JavaClass("java.util.Properties", self._spark._sc._gateway._gateway_client)()
for k in properties:
jprop.setProperty(k, properties[k])
self.mode(mode)._jwrite.jdbc(url, table, jprop) | [
"Saves",
"the",
"content",
"of",
"the",
":",
"class",
":",
"DataFrame",
"to",
"an",
"external",
"database",
"table",
"via",
"JDBC",
"."
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/readwriter.py#L976-L1000 | [
"def",
"jdbc",
"(",
"self",
",",
"url",
",",
"table",
",",
"mode",
"=",
"None",
",",
"properties",
"=",
"None",
")",
":",
"if",
"properties",
"is",
"None",
":",
"properties",
"=",
"dict",
"(",
")",
"jprop",
"=",
"JavaClass",
"(",
"\"java.util.Properties\"",
",",
"self",
".",
"_spark",
".",
"_sc",
".",
"_gateway",
".",
"_gateway_client",
")",
"(",
")",
"for",
"k",
"in",
"properties",
":",
"jprop",
".",
"setProperty",
"(",
"k",
",",
"properties",
"[",
"k",
"]",
")",
"self",
".",
"mode",
"(",
"mode",
")",
".",
"_jwrite",
".",
"jdbc",
"(",
"url",
",",
"table",
",",
"jprop",
")"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | KinesisUtils.createStream | Create an input stream that pulls messages from a Kinesis stream. This uses the
Kinesis Client Library (KCL) to pull messages from Kinesis.
.. note:: The given AWS credentials will get saved in DStream checkpoints if checkpointing
is enabled. Make sure that your checkpoint directory is secure.
:param ssc: StreamingContext object
:param kinesisAppName: Kinesis application name used by the Kinesis Client Library (KCL) to
update DynamoDB
:param streamName: Kinesis stream name
:param endpointUrl: Url of Kinesis service (e.g., https://kinesis.us-east-1.amazonaws.com)
:param regionName: Name of region used by the Kinesis Client Library (KCL) to update
DynamoDB (lease coordination and checkpointing) and CloudWatch (metrics)
:param initialPositionInStream: In the absence of Kinesis checkpoint info, this is the
worker's initial starting position in the stream. The
values are either the beginning of the stream per Kinesis'
limit of 24 hours (InitialPositionInStream.TRIM_HORIZON) or
the tip of the stream (InitialPositionInStream.LATEST).
:param checkpointInterval: Checkpoint interval for Kinesis checkpointing. See the Kinesis
Spark Streaming documentation for more details on the different
types of checkpoints.
:param storageLevel: Storage level to use for storing the received objects (default is
StorageLevel.MEMORY_AND_DISK_2)
:param awsAccessKeyId: AWS AccessKeyId (default is None. If None, will use
DefaultAWSCredentialsProviderChain)
:param awsSecretKey: AWS SecretKey (default is None. If None, will use
DefaultAWSCredentialsProviderChain)
:param decoder: A function used to decode value (default is utf8_decoder)
:param stsAssumeRoleArn: ARN of IAM role to assume when using STS sessions to read from
the Kinesis stream (default is None).
:param stsSessionName: Name to uniquely identify STS sessions used to read from Kinesis
stream, if STS is being used (default is None).
:param stsExternalId: External ID that can be used to validate against the assumed IAM
role's trust policy, if STS is being used (default is None).
:return: A DStream object | python/pyspark/streaming/kinesis.py | def createStream(ssc, kinesisAppName, streamName, endpointUrl, regionName,
initialPositionInStream, checkpointInterval,
storageLevel=StorageLevel.MEMORY_AND_DISK_2,
awsAccessKeyId=None, awsSecretKey=None, decoder=utf8_decoder,
stsAssumeRoleArn=None, stsSessionName=None, stsExternalId=None):
"""
Create an input stream that pulls messages from a Kinesis stream. This uses the
Kinesis Client Library (KCL) to pull messages from Kinesis.
.. note:: The given AWS credentials will get saved in DStream checkpoints if checkpointing
is enabled. Make sure that your checkpoint directory is secure.
:param ssc: StreamingContext object
:param kinesisAppName: Kinesis application name used by the Kinesis Client Library (KCL) to
update DynamoDB
:param streamName: Kinesis stream name
:param endpointUrl: Url of Kinesis service (e.g., https://kinesis.us-east-1.amazonaws.com)
:param regionName: Name of region used by the Kinesis Client Library (KCL) to update
DynamoDB (lease coordination and checkpointing) and CloudWatch (metrics)
:param initialPositionInStream: In the absence of Kinesis checkpoint info, this is the
worker's initial starting position in the stream. The
values are either the beginning of the stream per Kinesis'
limit of 24 hours (InitialPositionInStream.TRIM_HORIZON) or
the tip of the stream (InitialPositionInStream.LATEST).
:param checkpointInterval: Checkpoint interval for Kinesis checkpointing. See the Kinesis
Spark Streaming documentation for more details on the different
types of checkpoints.
:param storageLevel: Storage level to use for storing the received objects (default is
StorageLevel.MEMORY_AND_DISK_2)
:param awsAccessKeyId: AWS AccessKeyId (default is None. If None, will use
DefaultAWSCredentialsProviderChain)
:param awsSecretKey: AWS SecretKey (default is None. If None, will use
DefaultAWSCredentialsProviderChain)
:param decoder: A function used to decode value (default is utf8_decoder)
:param stsAssumeRoleArn: ARN of IAM role to assume when using STS sessions to read from
the Kinesis stream (default is None).
:param stsSessionName: Name to uniquely identify STS sessions used to read from Kinesis
stream, if STS is being used (default is None).
:param stsExternalId: External ID that can be used to validate against the assumed IAM
role's trust policy, if STS is being used (default is None).
:return: A DStream object
"""
jlevel = ssc._sc._getJavaStorageLevel(storageLevel)
jduration = ssc._jduration(checkpointInterval)
try:
# Use KinesisUtilsPythonHelper to access Scala's KinesisUtils
helper = ssc._jvm.org.apache.spark.streaming.kinesis.KinesisUtilsPythonHelper()
except TypeError as e:
if str(e) == "'JavaPackage' object is not callable":
_print_missing_jar(
"Streaming's Kinesis",
"streaming-kinesis-asl",
"streaming-kinesis-asl-assembly",
ssc.sparkContext.version)
raise
jstream = helper.createStream(ssc._jssc, kinesisAppName, streamName, endpointUrl,
regionName, initialPositionInStream, jduration, jlevel,
awsAccessKeyId, awsSecretKey, stsAssumeRoleArn,
stsSessionName, stsExternalId)
stream = DStream(jstream, ssc, NoOpSerializer())
return stream.map(lambda v: decoder(v)) | def createStream(ssc, kinesisAppName, streamName, endpointUrl, regionName,
initialPositionInStream, checkpointInterval,
storageLevel=StorageLevel.MEMORY_AND_DISK_2,
awsAccessKeyId=None, awsSecretKey=None, decoder=utf8_decoder,
stsAssumeRoleArn=None, stsSessionName=None, stsExternalId=None):
"""
Create an input stream that pulls messages from a Kinesis stream. This uses the
Kinesis Client Library (KCL) to pull messages from Kinesis.
.. note:: The given AWS credentials will get saved in DStream checkpoints if checkpointing
is enabled. Make sure that your checkpoint directory is secure.
:param ssc: StreamingContext object
:param kinesisAppName: Kinesis application name used by the Kinesis Client Library (KCL) to
update DynamoDB
:param streamName: Kinesis stream name
:param endpointUrl: Url of Kinesis service (e.g., https://kinesis.us-east-1.amazonaws.com)
:param regionName: Name of region used by the Kinesis Client Library (KCL) to update
DynamoDB (lease coordination and checkpointing) and CloudWatch (metrics)
:param initialPositionInStream: In the absence of Kinesis checkpoint info, this is the
worker's initial starting position in the stream. The
values are either the beginning of the stream per Kinesis'
limit of 24 hours (InitialPositionInStream.TRIM_HORIZON) or
the tip of the stream (InitialPositionInStream.LATEST).
:param checkpointInterval: Checkpoint interval for Kinesis checkpointing. See the Kinesis
Spark Streaming documentation for more details on the different
types of checkpoints.
:param storageLevel: Storage level to use for storing the received objects (default is
StorageLevel.MEMORY_AND_DISK_2)
:param awsAccessKeyId: AWS AccessKeyId (default is None. If None, will use
DefaultAWSCredentialsProviderChain)
:param awsSecretKey: AWS SecretKey (default is None. If None, will use
DefaultAWSCredentialsProviderChain)
:param decoder: A function used to decode value (default is utf8_decoder)
:param stsAssumeRoleArn: ARN of IAM role to assume when using STS sessions to read from
the Kinesis stream (default is None).
:param stsSessionName: Name to uniquely identify STS sessions used to read from Kinesis
stream, if STS is being used (default is None).
:param stsExternalId: External ID that can be used to validate against the assumed IAM
role's trust policy, if STS is being used (default is None).
:return: A DStream object
"""
jlevel = ssc._sc._getJavaStorageLevel(storageLevel)
jduration = ssc._jduration(checkpointInterval)
try:
# Use KinesisUtilsPythonHelper to access Scala's KinesisUtils
helper = ssc._jvm.org.apache.spark.streaming.kinesis.KinesisUtilsPythonHelper()
except TypeError as e:
if str(e) == "'JavaPackage' object is not callable":
_print_missing_jar(
"Streaming's Kinesis",
"streaming-kinesis-asl",
"streaming-kinesis-asl-assembly",
ssc.sparkContext.version)
raise
jstream = helper.createStream(ssc._jssc, kinesisAppName, streamName, endpointUrl,
regionName, initialPositionInStream, jduration, jlevel,
awsAccessKeyId, awsSecretKey, stsAssumeRoleArn,
stsSessionName, stsExternalId)
stream = DStream(jstream, ssc, NoOpSerializer())
return stream.map(lambda v: decoder(v)) | [
"Create",
"an",
"input",
"stream",
"that",
"pulls",
"messages",
"from",
"a",
"Kinesis",
"stream",
".",
"This",
"uses",
"the",
"Kinesis",
"Client",
"Library",
"(",
"KCL",
")",
"to",
"pull",
"messages",
"from",
"Kinesis",
"."
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/streaming/kinesis.py#L37-L98 | [
"def",
"createStream",
"(",
"ssc",
",",
"kinesisAppName",
",",
"streamName",
",",
"endpointUrl",
",",
"regionName",
",",
"initialPositionInStream",
",",
"checkpointInterval",
",",
"storageLevel",
"=",
"StorageLevel",
".",
"MEMORY_AND_DISK_2",
",",
"awsAccessKeyId",
"=",
"None",
",",
"awsSecretKey",
"=",
"None",
",",
"decoder",
"=",
"utf8_decoder",
",",
"stsAssumeRoleArn",
"=",
"None",
",",
"stsSessionName",
"=",
"None",
",",
"stsExternalId",
"=",
"None",
")",
":",
"jlevel",
"=",
"ssc",
".",
"_sc",
".",
"_getJavaStorageLevel",
"(",
"storageLevel",
")",
"jduration",
"=",
"ssc",
".",
"_jduration",
"(",
"checkpointInterval",
")",
"try",
":",
"# Use KinesisUtilsPythonHelper to access Scala's KinesisUtils",
"helper",
"=",
"ssc",
".",
"_jvm",
".",
"org",
".",
"apache",
".",
"spark",
".",
"streaming",
".",
"kinesis",
".",
"KinesisUtilsPythonHelper",
"(",
")",
"except",
"TypeError",
"as",
"e",
":",
"if",
"str",
"(",
"e",
")",
"==",
"\"'JavaPackage' object is not callable\"",
":",
"_print_missing_jar",
"(",
"\"Streaming's Kinesis\"",
",",
"\"streaming-kinesis-asl\"",
",",
"\"streaming-kinesis-asl-assembly\"",
",",
"ssc",
".",
"sparkContext",
".",
"version",
")",
"raise",
"jstream",
"=",
"helper",
".",
"createStream",
"(",
"ssc",
".",
"_jssc",
",",
"kinesisAppName",
",",
"streamName",
",",
"endpointUrl",
",",
"regionName",
",",
"initialPositionInStream",
",",
"jduration",
",",
"jlevel",
",",
"awsAccessKeyId",
",",
"awsSecretKey",
",",
"stsAssumeRoleArn",
",",
"stsSessionName",
",",
"stsExternalId",
")",
"stream",
"=",
"DStream",
"(",
"jstream",
",",
"ssc",
",",
"NoOpSerializer",
"(",
")",
")",
"return",
"stream",
".",
"map",
"(",
"lambda",
"v",
":",
"decoder",
"(",
"v",
")",
")"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | choose_jira_assignee | Prompt the user to choose who to assign the issue to in jira, given a list of candidates,
including the original reporter and all commentors | dev/merge_spark_pr.py | def choose_jira_assignee(issue, asf_jira):
"""
Prompt the user to choose who to assign the issue to in jira, given a list of candidates,
including the original reporter and all commentors
"""
while True:
try:
reporter = issue.fields.reporter
commentors = map(lambda x: x.author, issue.fields.comment.comments)
candidates = set(commentors)
candidates.add(reporter)
candidates = list(candidates)
print("JIRA is unassigned, choose assignee")
for idx, author in enumerate(candidates):
if author.key == "apachespark":
continue
annotations = ["Reporter"] if author == reporter else []
if author in commentors:
annotations.append("Commentor")
print("[%d] %s (%s)" % (idx, author.displayName, ",".join(annotations)))
raw_assignee = input(
"Enter number of user, or userid, to assign to (blank to leave unassigned):")
if raw_assignee == "":
return None
else:
try:
id = int(raw_assignee)
assignee = candidates[id]
except:
# assume it's a user id, and try to assign (might fail, we just prompt again)
assignee = asf_jira.user(raw_assignee)
asf_jira.assign_issue(issue.key, assignee.key)
return assignee
except KeyboardInterrupt:
raise
except:
traceback.print_exc()
print("Error assigning JIRA, try again (or leave blank and fix manually)") | def choose_jira_assignee(issue, asf_jira):
"""
Prompt the user to choose who to assign the issue to in jira, given a list of candidates,
including the original reporter and all commentors
"""
while True:
try:
reporter = issue.fields.reporter
commentors = map(lambda x: x.author, issue.fields.comment.comments)
candidates = set(commentors)
candidates.add(reporter)
candidates = list(candidates)
print("JIRA is unassigned, choose assignee")
for idx, author in enumerate(candidates):
if author.key == "apachespark":
continue
annotations = ["Reporter"] if author == reporter else []
if author in commentors:
annotations.append("Commentor")
print("[%d] %s (%s)" % (idx, author.displayName, ",".join(annotations)))
raw_assignee = input(
"Enter number of user, or userid, to assign to (blank to leave unassigned):")
if raw_assignee == "":
return None
else:
try:
id = int(raw_assignee)
assignee = candidates[id]
except:
# assume it's a user id, and try to assign (might fail, we just prompt again)
assignee = asf_jira.user(raw_assignee)
asf_jira.assign_issue(issue.key, assignee.key)
return assignee
except KeyboardInterrupt:
raise
except:
traceback.print_exc()
print("Error assigning JIRA, try again (or leave blank and fix manually)") | [
"Prompt",
"the",
"user",
"to",
"choose",
"who",
"to",
"assign",
"the",
"issue",
"to",
"in",
"jira",
"given",
"a",
"list",
"of",
"candidates",
"including",
"the",
"original",
"reporter",
"and",
"all",
"commentors"
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/dev/merge_spark_pr.py#L325-L362 | [
"def",
"choose_jira_assignee",
"(",
"issue",
",",
"asf_jira",
")",
":",
"while",
"True",
":",
"try",
":",
"reporter",
"=",
"issue",
".",
"fields",
".",
"reporter",
"commentors",
"=",
"map",
"(",
"lambda",
"x",
":",
"x",
".",
"author",
",",
"issue",
".",
"fields",
".",
"comment",
".",
"comments",
")",
"candidates",
"=",
"set",
"(",
"commentors",
")",
"candidates",
".",
"add",
"(",
"reporter",
")",
"candidates",
"=",
"list",
"(",
"candidates",
")",
"print",
"(",
"\"JIRA is unassigned, choose assignee\"",
")",
"for",
"idx",
",",
"author",
"in",
"enumerate",
"(",
"candidates",
")",
":",
"if",
"author",
".",
"key",
"==",
"\"apachespark\"",
":",
"continue",
"annotations",
"=",
"[",
"\"Reporter\"",
"]",
"if",
"author",
"==",
"reporter",
"else",
"[",
"]",
"if",
"author",
"in",
"commentors",
":",
"annotations",
".",
"append",
"(",
"\"Commentor\"",
")",
"print",
"(",
"\"[%d] %s (%s)\"",
"%",
"(",
"idx",
",",
"author",
".",
"displayName",
",",
"\",\"",
".",
"join",
"(",
"annotations",
")",
")",
")",
"raw_assignee",
"=",
"input",
"(",
"\"Enter number of user, or userid, to assign to (blank to leave unassigned):\"",
")",
"if",
"raw_assignee",
"==",
"\"\"",
":",
"return",
"None",
"else",
":",
"try",
":",
"id",
"=",
"int",
"(",
"raw_assignee",
")",
"assignee",
"=",
"candidates",
"[",
"id",
"]",
"except",
":",
"# assume it's a user id, and try to assign (might fail, we just prompt again)",
"assignee",
"=",
"asf_jira",
".",
"user",
"(",
"raw_assignee",
")",
"asf_jira",
".",
"assign_issue",
"(",
"issue",
".",
"key",
",",
"assignee",
".",
"key",
")",
"return",
"assignee",
"except",
"KeyboardInterrupt",
":",
"raise",
"except",
":",
"traceback",
".",
"print_exc",
"(",
")",
"print",
"(",
"\"Error assigning JIRA, try again (or leave blank and fix manually)\"",
")"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | standardize_jira_ref | Standardize the [SPARK-XXXXX] [MODULE] prefix
Converts "[SPARK-XXX][mllib] Issue", "[MLLib] SPARK-XXX. Issue" or "SPARK XXX [MLLIB]: Issue" to
"[SPARK-XXX][MLLIB] Issue"
>>> standardize_jira_ref(
... "[SPARK-5821] [SQL] ParquetRelation2 CTAS should check if delete is successful")
'[SPARK-5821][SQL] ParquetRelation2 CTAS should check if delete is successful'
>>> standardize_jira_ref(
... "[SPARK-4123][Project Infra][WIP]: Show new dependencies added in pull requests")
'[SPARK-4123][PROJECT INFRA][WIP] Show new dependencies added in pull requests'
>>> standardize_jira_ref("[MLlib] Spark 5954: Top by key")
'[SPARK-5954][MLLIB] Top by key'
>>> standardize_jira_ref("[SPARK-979] a LRU scheduler for load balancing in TaskSchedulerImpl")
'[SPARK-979] a LRU scheduler for load balancing in TaskSchedulerImpl'
>>> standardize_jira_ref(
... "SPARK-1094 Support MiMa for reporting binary compatibility across versions.")
'[SPARK-1094] Support MiMa for reporting binary compatibility across versions.'
>>> standardize_jira_ref("[WIP] [SPARK-1146] Vagrant support for Spark")
'[SPARK-1146][WIP] Vagrant support for Spark'
>>> standardize_jira_ref(
... "SPARK-1032. If Yarn app fails before registering, app master stays aroun...")
'[SPARK-1032] If Yarn app fails before registering, app master stays aroun...'
>>> standardize_jira_ref(
... "[SPARK-6250][SPARK-6146][SPARK-5911][SQL] Types are now reserved words in DDL parser.")
'[SPARK-6250][SPARK-6146][SPARK-5911][SQL] Types are now reserved words in DDL parser.'
>>> standardize_jira_ref("Additional information for users building from source code")
'Additional information for users building from source code' | dev/merge_spark_pr.py | def standardize_jira_ref(text):
"""
Standardize the [SPARK-XXXXX] [MODULE] prefix
Converts "[SPARK-XXX][mllib] Issue", "[MLLib] SPARK-XXX. Issue" or "SPARK XXX [MLLIB]: Issue" to
"[SPARK-XXX][MLLIB] Issue"
>>> standardize_jira_ref(
... "[SPARK-5821] [SQL] ParquetRelation2 CTAS should check if delete is successful")
'[SPARK-5821][SQL] ParquetRelation2 CTAS should check if delete is successful'
>>> standardize_jira_ref(
... "[SPARK-4123][Project Infra][WIP]: Show new dependencies added in pull requests")
'[SPARK-4123][PROJECT INFRA][WIP] Show new dependencies added in pull requests'
>>> standardize_jira_ref("[MLlib] Spark 5954: Top by key")
'[SPARK-5954][MLLIB] Top by key'
>>> standardize_jira_ref("[SPARK-979] a LRU scheduler for load balancing in TaskSchedulerImpl")
'[SPARK-979] a LRU scheduler for load balancing in TaskSchedulerImpl'
>>> standardize_jira_ref(
... "SPARK-1094 Support MiMa for reporting binary compatibility across versions.")
'[SPARK-1094] Support MiMa for reporting binary compatibility across versions.'
>>> standardize_jira_ref("[WIP] [SPARK-1146] Vagrant support for Spark")
'[SPARK-1146][WIP] Vagrant support for Spark'
>>> standardize_jira_ref(
... "SPARK-1032. If Yarn app fails before registering, app master stays aroun...")
'[SPARK-1032] If Yarn app fails before registering, app master stays aroun...'
>>> standardize_jira_ref(
... "[SPARK-6250][SPARK-6146][SPARK-5911][SQL] Types are now reserved words in DDL parser.")
'[SPARK-6250][SPARK-6146][SPARK-5911][SQL] Types are now reserved words in DDL parser.'
>>> standardize_jira_ref("Additional information for users building from source code")
'Additional information for users building from source code'
"""
jira_refs = []
components = []
# If the string is compliant, no need to process any further
if (re.search(r'^\[SPARK-[0-9]{3,6}\](\[[A-Z0-9_\s,]+\] )+\S+', text)):
return text
# Extract JIRA ref(s):
pattern = re.compile(r'(SPARK[-\s]*[0-9]{3,6})+', re.IGNORECASE)
for ref in pattern.findall(text):
# Add brackets, replace spaces with a dash, & convert to uppercase
jira_refs.append('[' + re.sub(r'\s+', '-', ref.upper()) + ']')
text = text.replace(ref, '')
# Extract spark component(s):
# Look for alphanumeric chars, spaces, dashes, periods, and/or commas
pattern = re.compile(r'(\[[\w\s,.-]+\])', re.IGNORECASE)
for component in pattern.findall(text):
components.append(component.upper())
text = text.replace(component, '')
# Cleanup any remaining symbols:
pattern = re.compile(r'^\W+(.*)', re.IGNORECASE)
if (pattern.search(text) is not None):
text = pattern.search(text).groups()[0]
# Assemble full text (JIRA ref(s), module(s), remaining text)
clean_text = ''.join(jira_refs).strip() + ''.join(components).strip() + " " + text.strip()
# Replace multiple spaces with a single space, e.g. if no jira refs and/or components were
# included
clean_text = re.sub(r'\s+', ' ', clean_text.strip())
return clean_text | def standardize_jira_ref(text):
"""
Standardize the [SPARK-XXXXX] [MODULE] prefix
Converts "[SPARK-XXX][mllib] Issue", "[MLLib] SPARK-XXX. Issue" or "SPARK XXX [MLLIB]: Issue" to
"[SPARK-XXX][MLLIB] Issue"
>>> standardize_jira_ref(
... "[SPARK-5821] [SQL] ParquetRelation2 CTAS should check if delete is successful")
'[SPARK-5821][SQL] ParquetRelation2 CTAS should check if delete is successful'
>>> standardize_jira_ref(
... "[SPARK-4123][Project Infra][WIP]: Show new dependencies added in pull requests")
'[SPARK-4123][PROJECT INFRA][WIP] Show new dependencies added in pull requests'
>>> standardize_jira_ref("[MLlib] Spark 5954: Top by key")
'[SPARK-5954][MLLIB] Top by key'
>>> standardize_jira_ref("[SPARK-979] a LRU scheduler for load balancing in TaskSchedulerImpl")
'[SPARK-979] a LRU scheduler for load balancing in TaskSchedulerImpl'
>>> standardize_jira_ref(
... "SPARK-1094 Support MiMa for reporting binary compatibility across versions.")
'[SPARK-1094] Support MiMa for reporting binary compatibility across versions.'
>>> standardize_jira_ref("[WIP] [SPARK-1146] Vagrant support for Spark")
'[SPARK-1146][WIP] Vagrant support for Spark'
>>> standardize_jira_ref(
... "SPARK-1032. If Yarn app fails before registering, app master stays aroun...")
'[SPARK-1032] If Yarn app fails before registering, app master stays aroun...'
>>> standardize_jira_ref(
... "[SPARK-6250][SPARK-6146][SPARK-5911][SQL] Types are now reserved words in DDL parser.")
'[SPARK-6250][SPARK-6146][SPARK-5911][SQL] Types are now reserved words in DDL parser.'
>>> standardize_jira_ref("Additional information for users building from source code")
'Additional information for users building from source code'
"""
jira_refs = []
components = []
# If the string is compliant, no need to process any further
if (re.search(r'^\[SPARK-[0-9]{3,6}\](\[[A-Z0-9_\s,]+\] )+\S+', text)):
return text
# Extract JIRA ref(s):
pattern = re.compile(r'(SPARK[-\s]*[0-9]{3,6})+', re.IGNORECASE)
for ref in pattern.findall(text):
# Add brackets, replace spaces with a dash, & convert to uppercase
jira_refs.append('[' + re.sub(r'\s+', '-', ref.upper()) + ']')
text = text.replace(ref, '')
# Extract spark component(s):
# Look for alphanumeric chars, spaces, dashes, periods, and/or commas
pattern = re.compile(r'(\[[\w\s,.-]+\])', re.IGNORECASE)
for component in pattern.findall(text):
components.append(component.upper())
text = text.replace(component, '')
# Cleanup any remaining symbols:
pattern = re.compile(r'^\W+(.*)', re.IGNORECASE)
if (pattern.search(text) is not None):
text = pattern.search(text).groups()[0]
# Assemble full text (JIRA ref(s), module(s), remaining text)
clean_text = ''.join(jira_refs).strip() + ''.join(components).strip() + " " + text.strip()
# Replace multiple spaces with a single space, e.g. if no jira refs and/or components were
# included
clean_text = re.sub(r'\s+', ' ', clean_text.strip())
return clean_text | [
"Standardize",
"the",
"[",
"SPARK",
"-",
"XXXXX",
"]",
"[",
"MODULE",
"]",
"prefix",
"Converts",
"[",
"SPARK",
"-",
"XXX",
"]",
"[",
"mllib",
"]",
"Issue",
"[",
"MLLib",
"]",
"SPARK",
"-",
"XXX",
".",
"Issue",
"or",
"SPARK",
"XXX",
"[",
"MLLIB",
"]",
":",
"Issue",
"to",
"[",
"SPARK",
"-",
"XXX",
"]",
"[",
"MLLIB",
"]",
"Issue"
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/dev/merge_spark_pr.py#L374-L437 | [
"def",
"standardize_jira_ref",
"(",
"text",
")",
":",
"jira_refs",
"=",
"[",
"]",
"components",
"=",
"[",
"]",
"# If the string is compliant, no need to process any further",
"if",
"(",
"re",
".",
"search",
"(",
"r'^\\[SPARK-[0-9]{3,6}\\](\\[[A-Z0-9_\\s,]+\\] )+\\S+'",
",",
"text",
")",
")",
":",
"return",
"text",
"# Extract JIRA ref(s):",
"pattern",
"=",
"re",
".",
"compile",
"(",
"r'(SPARK[-\\s]*[0-9]{3,6})+'",
",",
"re",
".",
"IGNORECASE",
")",
"for",
"ref",
"in",
"pattern",
".",
"findall",
"(",
"text",
")",
":",
"# Add brackets, replace spaces with a dash, & convert to uppercase",
"jira_refs",
".",
"append",
"(",
"'['",
"+",
"re",
".",
"sub",
"(",
"r'\\s+'",
",",
"'-'",
",",
"ref",
".",
"upper",
"(",
")",
")",
"+",
"']'",
")",
"text",
"=",
"text",
".",
"replace",
"(",
"ref",
",",
"''",
")",
"# Extract spark component(s):",
"# Look for alphanumeric chars, spaces, dashes, periods, and/or commas",
"pattern",
"=",
"re",
".",
"compile",
"(",
"r'(\\[[\\w\\s,.-]+\\])'",
",",
"re",
".",
"IGNORECASE",
")",
"for",
"component",
"in",
"pattern",
".",
"findall",
"(",
"text",
")",
":",
"components",
".",
"append",
"(",
"component",
".",
"upper",
"(",
")",
")",
"text",
"=",
"text",
".",
"replace",
"(",
"component",
",",
"''",
")",
"# Cleanup any remaining symbols:",
"pattern",
"=",
"re",
".",
"compile",
"(",
"r'^\\W+(.*)'",
",",
"re",
".",
"IGNORECASE",
")",
"if",
"(",
"pattern",
".",
"search",
"(",
"text",
")",
"is",
"not",
"None",
")",
":",
"text",
"=",
"pattern",
".",
"search",
"(",
"text",
")",
".",
"groups",
"(",
")",
"[",
"0",
"]",
"# Assemble full text (JIRA ref(s), module(s), remaining text)",
"clean_text",
"=",
"''",
".",
"join",
"(",
"jira_refs",
")",
".",
"strip",
"(",
")",
"+",
"''",
".",
"join",
"(",
"components",
")",
".",
"strip",
"(",
")",
"+",
"\" \"",
"+",
"text",
".",
"strip",
"(",
")",
"# Replace multiple spaces with a single space, e.g. if no jira refs and/or components were",
"# included",
"clean_text",
"=",
"re",
".",
"sub",
"(",
"r'\\s+'",
",",
"' '",
",",
"clean_text",
".",
"strip",
"(",
")",
")",
"return",
"clean_text"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | MLUtils._parse_libsvm_line | Parses a line in LIBSVM format into (label, indices, values). | python/pyspark/mllib/util.py | def _parse_libsvm_line(line):
"""
Parses a line in LIBSVM format into (label, indices, values).
"""
items = line.split(None)
label = float(items[0])
nnz = len(items) - 1
indices = np.zeros(nnz, dtype=np.int32)
values = np.zeros(nnz)
for i in xrange(nnz):
index, value = items[1 + i].split(":")
indices[i] = int(index) - 1
values[i] = float(value)
return label, indices, values | def _parse_libsvm_line(line):
"""
Parses a line in LIBSVM format into (label, indices, values).
"""
items = line.split(None)
label = float(items[0])
nnz = len(items) - 1
indices = np.zeros(nnz, dtype=np.int32)
values = np.zeros(nnz)
for i in xrange(nnz):
index, value = items[1 + i].split(":")
indices[i] = int(index) - 1
values[i] = float(value)
return label, indices, values | [
"Parses",
"a",
"line",
"in",
"LIBSVM",
"format",
"into",
"(",
"label",
"indices",
"values",
")",
"."
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/util.py#L40-L53 | [
"def",
"_parse_libsvm_line",
"(",
"line",
")",
":",
"items",
"=",
"line",
".",
"split",
"(",
"None",
")",
"label",
"=",
"float",
"(",
"items",
"[",
"0",
"]",
")",
"nnz",
"=",
"len",
"(",
"items",
")",
"-",
"1",
"indices",
"=",
"np",
".",
"zeros",
"(",
"nnz",
",",
"dtype",
"=",
"np",
".",
"int32",
")",
"values",
"=",
"np",
".",
"zeros",
"(",
"nnz",
")",
"for",
"i",
"in",
"xrange",
"(",
"nnz",
")",
":",
"index",
",",
"value",
"=",
"items",
"[",
"1",
"+",
"i",
"]",
".",
"split",
"(",
"\":\"",
")",
"indices",
"[",
"i",
"]",
"=",
"int",
"(",
"index",
")",
"-",
"1",
"values",
"[",
"i",
"]",
"=",
"float",
"(",
"value",
")",
"return",
"label",
",",
"indices",
",",
"values"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | MLUtils._convert_labeled_point_to_libsvm | Converts a LabeledPoint to a string in LIBSVM format. | python/pyspark/mllib/util.py | def _convert_labeled_point_to_libsvm(p):
"""Converts a LabeledPoint to a string in LIBSVM format."""
from pyspark.mllib.regression import LabeledPoint
assert isinstance(p, LabeledPoint)
items = [str(p.label)]
v = _convert_to_vector(p.features)
if isinstance(v, SparseVector):
nnz = len(v.indices)
for i in xrange(nnz):
items.append(str(v.indices[i] + 1) + ":" + str(v.values[i]))
else:
for i in xrange(len(v)):
items.append(str(i + 1) + ":" + str(v[i]))
return " ".join(items) | def _convert_labeled_point_to_libsvm(p):
"""Converts a LabeledPoint to a string in LIBSVM format."""
from pyspark.mllib.regression import LabeledPoint
assert isinstance(p, LabeledPoint)
items = [str(p.label)]
v = _convert_to_vector(p.features)
if isinstance(v, SparseVector):
nnz = len(v.indices)
for i in xrange(nnz):
items.append(str(v.indices[i] + 1) + ":" + str(v.values[i]))
else:
for i in xrange(len(v)):
items.append(str(i + 1) + ":" + str(v[i]))
return " ".join(items) | [
"Converts",
"a",
"LabeledPoint",
"to",
"a",
"string",
"in",
"LIBSVM",
"format",
"."
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/util.py#L56-L69 | [
"def",
"_convert_labeled_point_to_libsvm",
"(",
"p",
")",
":",
"from",
"pyspark",
".",
"mllib",
".",
"regression",
"import",
"LabeledPoint",
"assert",
"isinstance",
"(",
"p",
",",
"LabeledPoint",
")",
"items",
"=",
"[",
"str",
"(",
"p",
".",
"label",
")",
"]",
"v",
"=",
"_convert_to_vector",
"(",
"p",
".",
"features",
")",
"if",
"isinstance",
"(",
"v",
",",
"SparseVector",
")",
":",
"nnz",
"=",
"len",
"(",
"v",
".",
"indices",
")",
"for",
"i",
"in",
"xrange",
"(",
"nnz",
")",
":",
"items",
".",
"append",
"(",
"str",
"(",
"v",
".",
"indices",
"[",
"i",
"]",
"+",
"1",
")",
"+",
"\":\"",
"+",
"str",
"(",
"v",
".",
"values",
"[",
"i",
"]",
")",
")",
"else",
":",
"for",
"i",
"in",
"xrange",
"(",
"len",
"(",
"v",
")",
")",
":",
"items",
".",
"append",
"(",
"str",
"(",
"i",
"+",
"1",
")",
"+",
"\":\"",
"+",
"str",
"(",
"v",
"[",
"i",
"]",
")",
")",
"return",
"\" \"",
".",
"join",
"(",
"items",
")"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | MLUtils.loadLibSVMFile | Loads labeled data in the LIBSVM format into an RDD of
LabeledPoint. The LIBSVM format is a text-based format used by
LIBSVM and LIBLINEAR. Each line represents a labeled sparse
feature vector using the following format:
label index1:value1 index2:value2 ...
where the indices are one-based and in ascending order. This
method parses each line into a LabeledPoint, where the feature
indices are converted to zero-based.
:param sc: Spark context
:param path: file or directory path in any Hadoop-supported file
system URI
:param numFeatures: number of features, which will be determined
from the input data if a nonpositive value
is given. This is useful when the dataset is
already split into multiple files and you
want to load them separately, because some
features may not present in certain files,
which leads to inconsistent feature
dimensions.
:param minPartitions: min number of partitions
@return: labeled data stored as an RDD of LabeledPoint
>>> from tempfile import NamedTemporaryFile
>>> from pyspark.mllib.util import MLUtils
>>> from pyspark.mllib.regression import LabeledPoint
>>> tempFile = NamedTemporaryFile(delete=True)
>>> _ = tempFile.write(b"+1 1:1.0 3:2.0 5:3.0\\n-1\\n-1 2:4.0 4:5.0 6:6.0")
>>> tempFile.flush()
>>> examples = MLUtils.loadLibSVMFile(sc, tempFile.name).collect()
>>> tempFile.close()
>>> examples[0]
LabeledPoint(1.0, (6,[0,2,4],[1.0,2.0,3.0]))
>>> examples[1]
LabeledPoint(-1.0, (6,[],[]))
>>> examples[2]
LabeledPoint(-1.0, (6,[1,3,5],[4.0,5.0,6.0])) | python/pyspark/mllib/util.py | def loadLibSVMFile(sc, path, numFeatures=-1, minPartitions=None):
"""
Loads labeled data in the LIBSVM format into an RDD of
LabeledPoint. The LIBSVM format is a text-based format used by
LIBSVM and LIBLINEAR. Each line represents a labeled sparse
feature vector using the following format:
label index1:value1 index2:value2 ...
where the indices are one-based and in ascending order. This
method parses each line into a LabeledPoint, where the feature
indices are converted to zero-based.
:param sc: Spark context
:param path: file or directory path in any Hadoop-supported file
system URI
:param numFeatures: number of features, which will be determined
from the input data if a nonpositive value
is given. This is useful when the dataset is
already split into multiple files and you
want to load them separately, because some
features may not present in certain files,
which leads to inconsistent feature
dimensions.
:param minPartitions: min number of partitions
@return: labeled data stored as an RDD of LabeledPoint
>>> from tempfile import NamedTemporaryFile
>>> from pyspark.mllib.util import MLUtils
>>> from pyspark.mllib.regression import LabeledPoint
>>> tempFile = NamedTemporaryFile(delete=True)
>>> _ = tempFile.write(b"+1 1:1.0 3:2.0 5:3.0\\n-1\\n-1 2:4.0 4:5.0 6:6.0")
>>> tempFile.flush()
>>> examples = MLUtils.loadLibSVMFile(sc, tempFile.name).collect()
>>> tempFile.close()
>>> examples[0]
LabeledPoint(1.0, (6,[0,2,4],[1.0,2.0,3.0]))
>>> examples[1]
LabeledPoint(-1.0, (6,[],[]))
>>> examples[2]
LabeledPoint(-1.0, (6,[1,3,5],[4.0,5.0,6.0]))
"""
from pyspark.mllib.regression import LabeledPoint
lines = sc.textFile(path, minPartitions)
parsed = lines.map(lambda l: MLUtils._parse_libsvm_line(l))
if numFeatures <= 0:
parsed.cache()
numFeatures = parsed.map(lambda x: -1 if x[1].size == 0 else x[1][-1]).reduce(max) + 1
return parsed.map(lambda x: LabeledPoint(x[0], Vectors.sparse(numFeatures, x[1], x[2]))) | def loadLibSVMFile(sc, path, numFeatures=-1, minPartitions=None):
"""
Loads labeled data in the LIBSVM format into an RDD of
LabeledPoint. The LIBSVM format is a text-based format used by
LIBSVM and LIBLINEAR. Each line represents a labeled sparse
feature vector using the following format:
label index1:value1 index2:value2 ...
where the indices are one-based and in ascending order. This
method parses each line into a LabeledPoint, where the feature
indices are converted to zero-based.
:param sc: Spark context
:param path: file or directory path in any Hadoop-supported file
system URI
:param numFeatures: number of features, which will be determined
from the input data if a nonpositive value
is given. This is useful when the dataset is
already split into multiple files and you
want to load them separately, because some
features may not present in certain files,
which leads to inconsistent feature
dimensions.
:param minPartitions: min number of partitions
@return: labeled data stored as an RDD of LabeledPoint
>>> from tempfile import NamedTemporaryFile
>>> from pyspark.mllib.util import MLUtils
>>> from pyspark.mllib.regression import LabeledPoint
>>> tempFile = NamedTemporaryFile(delete=True)
>>> _ = tempFile.write(b"+1 1:1.0 3:2.0 5:3.0\\n-1\\n-1 2:4.0 4:5.0 6:6.0")
>>> tempFile.flush()
>>> examples = MLUtils.loadLibSVMFile(sc, tempFile.name).collect()
>>> tempFile.close()
>>> examples[0]
LabeledPoint(1.0, (6,[0,2,4],[1.0,2.0,3.0]))
>>> examples[1]
LabeledPoint(-1.0, (6,[],[]))
>>> examples[2]
LabeledPoint(-1.0, (6,[1,3,5],[4.0,5.0,6.0]))
"""
from pyspark.mllib.regression import LabeledPoint
lines = sc.textFile(path, minPartitions)
parsed = lines.map(lambda l: MLUtils._parse_libsvm_line(l))
if numFeatures <= 0:
parsed.cache()
numFeatures = parsed.map(lambda x: -1 if x[1].size == 0 else x[1][-1]).reduce(max) + 1
return parsed.map(lambda x: LabeledPoint(x[0], Vectors.sparse(numFeatures, x[1], x[2]))) | [
"Loads",
"labeled",
"data",
"in",
"the",
"LIBSVM",
"format",
"into",
"an",
"RDD",
"of",
"LabeledPoint",
".",
"The",
"LIBSVM",
"format",
"is",
"a",
"text",
"-",
"based",
"format",
"used",
"by",
"LIBSVM",
"and",
"LIBLINEAR",
".",
"Each",
"line",
"represents",
"a",
"labeled",
"sparse",
"feature",
"vector",
"using",
"the",
"following",
"format",
":"
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/util.py#L73-L122 | [
"def",
"loadLibSVMFile",
"(",
"sc",
",",
"path",
",",
"numFeatures",
"=",
"-",
"1",
",",
"minPartitions",
"=",
"None",
")",
":",
"from",
"pyspark",
".",
"mllib",
".",
"regression",
"import",
"LabeledPoint",
"lines",
"=",
"sc",
".",
"textFile",
"(",
"path",
",",
"minPartitions",
")",
"parsed",
"=",
"lines",
".",
"map",
"(",
"lambda",
"l",
":",
"MLUtils",
".",
"_parse_libsvm_line",
"(",
"l",
")",
")",
"if",
"numFeatures",
"<=",
"0",
":",
"parsed",
".",
"cache",
"(",
")",
"numFeatures",
"=",
"parsed",
".",
"map",
"(",
"lambda",
"x",
":",
"-",
"1",
"if",
"x",
"[",
"1",
"]",
".",
"size",
"==",
"0",
"else",
"x",
"[",
"1",
"]",
"[",
"-",
"1",
"]",
")",
".",
"reduce",
"(",
"max",
")",
"+",
"1",
"return",
"parsed",
".",
"map",
"(",
"lambda",
"x",
":",
"LabeledPoint",
"(",
"x",
"[",
"0",
"]",
",",
"Vectors",
".",
"sparse",
"(",
"numFeatures",
",",
"x",
"[",
"1",
"]",
",",
"x",
"[",
"2",
"]",
")",
")",
")"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | MLUtils.saveAsLibSVMFile | Save labeled data in LIBSVM format.
:param data: an RDD of LabeledPoint to be saved
:param dir: directory to save the data
>>> from tempfile import NamedTemporaryFile
>>> from fileinput import input
>>> from pyspark.mllib.regression import LabeledPoint
>>> from glob import glob
>>> from pyspark.mllib.util import MLUtils
>>> examples = [LabeledPoint(1.1, Vectors.sparse(3, [(0, 1.23), (2, 4.56)])),
... LabeledPoint(0.0, Vectors.dense([1.01, 2.02, 3.03]))]
>>> tempFile = NamedTemporaryFile(delete=True)
>>> tempFile.close()
>>> MLUtils.saveAsLibSVMFile(sc.parallelize(examples), tempFile.name)
>>> ''.join(sorted(input(glob(tempFile.name + "/part-0000*"))))
'0.0 1:1.01 2:2.02 3:3.03\\n1.1 1:1.23 3:4.56\\n' | python/pyspark/mllib/util.py | def saveAsLibSVMFile(data, dir):
"""
Save labeled data in LIBSVM format.
:param data: an RDD of LabeledPoint to be saved
:param dir: directory to save the data
>>> from tempfile import NamedTemporaryFile
>>> from fileinput import input
>>> from pyspark.mllib.regression import LabeledPoint
>>> from glob import glob
>>> from pyspark.mllib.util import MLUtils
>>> examples = [LabeledPoint(1.1, Vectors.sparse(3, [(0, 1.23), (2, 4.56)])),
... LabeledPoint(0.0, Vectors.dense([1.01, 2.02, 3.03]))]
>>> tempFile = NamedTemporaryFile(delete=True)
>>> tempFile.close()
>>> MLUtils.saveAsLibSVMFile(sc.parallelize(examples), tempFile.name)
>>> ''.join(sorted(input(glob(tempFile.name + "/part-0000*"))))
'0.0 1:1.01 2:2.02 3:3.03\\n1.1 1:1.23 3:4.56\\n'
"""
lines = data.map(lambda p: MLUtils._convert_labeled_point_to_libsvm(p))
lines.saveAsTextFile(dir) | def saveAsLibSVMFile(data, dir):
"""
Save labeled data in LIBSVM format.
:param data: an RDD of LabeledPoint to be saved
:param dir: directory to save the data
>>> from tempfile import NamedTemporaryFile
>>> from fileinput import input
>>> from pyspark.mllib.regression import LabeledPoint
>>> from glob import glob
>>> from pyspark.mllib.util import MLUtils
>>> examples = [LabeledPoint(1.1, Vectors.sparse(3, [(0, 1.23), (2, 4.56)])),
... LabeledPoint(0.0, Vectors.dense([1.01, 2.02, 3.03]))]
>>> tempFile = NamedTemporaryFile(delete=True)
>>> tempFile.close()
>>> MLUtils.saveAsLibSVMFile(sc.parallelize(examples), tempFile.name)
>>> ''.join(sorted(input(glob(tempFile.name + "/part-0000*"))))
'0.0 1:1.01 2:2.02 3:3.03\\n1.1 1:1.23 3:4.56\\n'
"""
lines = data.map(lambda p: MLUtils._convert_labeled_point_to_libsvm(p))
lines.saveAsTextFile(dir) | [
"Save",
"labeled",
"data",
"in",
"LIBSVM",
"format",
"."
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/util.py#L126-L147 | [
"def",
"saveAsLibSVMFile",
"(",
"data",
",",
"dir",
")",
":",
"lines",
"=",
"data",
".",
"map",
"(",
"lambda",
"p",
":",
"MLUtils",
".",
"_convert_labeled_point_to_libsvm",
"(",
"p",
")",
")",
"lines",
".",
"saveAsTextFile",
"(",
"dir",
")"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | MLUtils.loadLabeledPoints | Load labeled points saved using RDD.saveAsTextFile.
:param sc: Spark context
:param path: file or directory path in any Hadoop-supported file
system URI
:param minPartitions: min number of partitions
@return: labeled data stored as an RDD of LabeledPoint
>>> from tempfile import NamedTemporaryFile
>>> from pyspark.mllib.util import MLUtils
>>> from pyspark.mllib.regression import LabeledPoint
>>> examples = [LabeledPoint(1.1, Vectors.sparse(3, [(0, -1.23), (2, 4.56e-7)])),
... LabeledPoint(0.0, Vectors.dense([1.01, 2.02, 3.03]))]
>>> tempFile = NamedTemporaryFile(delete=True)
>>> tempFile.close()
>>> sc.parallelize(examples, 1).saveAsTextFile(tempFile.name)
>>> MLUtils.loadLabeledPoints(sc, tempFile.name).collect()
[LabeledPoint(1.1, (3,[0,2],[-1.23,4.56e-07])), LabeledPoint(0.0, [1.01,2.02,3.03])] | python/pyspark/mllib/util.py | def loadLabeledPoints(sc, path, minPartitions=None):
"""
Load labeled points saved using RDD.saveAsTextFile.
:param sc: Spark context
:param path: file or directory path in any Hadoop-supported file
system URI
:param minPartitions: min number of partitions
@return: labeled data stored as an RDD of LabeledPoint
>>> from tempfile import NamedTemporaryFile
>>> from pyspark.mllib.util import MLUtils
>>> from pyspark.mllib.regression import LabeledPoint
>>> examples = [LabeledPoint(1.1, Vectors.sparse(3, [(0, -1.23), (2, 4.56e-7)])),
... LabeledPoint(0.0, Vectors.dense([1.01, 2.02, 3.03]))]
>>> tempFile = NamedTemporaryFile(delete=True)
>>> tempFile.close()
>>> sc.parallelize(examples, 1).saveAsTextFile(tempFile.name)
>>> MLUtils.loadLabeledPoints(sc, tempFile.name).collect()
[LabeledPoint(1.1, (3,[0,2],[-1.23,4.56e-07])), LabeledPoint(0.0, [1.01,2.02,3.03])]
"""
minPartitions = minPartitions or min(sc.defaultParallelism, 2)
return callMLlibFunc("loadLabeledPoints", sc, path, minPartitions) | def loadLabeledPoints(sc, path, minPartitions=None):
"""
Load labeled points saved using RDD.saveAsTextFile.
:param sc: Spark context
:param path: file or directory path in any Hadoop-supported file
system URI
:param minPartitions: min number of partitions
@return: labeled data stored as an RDD of LabeledPoint
>>> from tempfile import NamedTemporaryFile
>>> from pyspark.mllib.util import MLUtils
>>> from pyspark.mllib.regression import LabeledPoint
>>> examples = [LabeledPoint(1.1, Vectors.sparse(3, [(0, -1.23), (2, 4.56e-7)])),
... LabeledPoint(0.0, Vectors.dense([1.01, 2.02, 3.03]))]
>>> tempFile = NamedTemporaryFile(delete=True)
>>> tempFile.close()
>>> sc.parallelize(examples, 1).saveAsTextFile(tempFile.name)
>>> MLUtils.loadLabeledPoints(sc, tempFile.name).collect()
[LabeledPoint(1.1, (3,[0,2],[-1.23,4.56e-07])), LabeledPoint(0.0, [1.01,2.02,3.03])]
"""
minPartitions = minPartitions or min(sc.defaultParallelism, 2)
return callMLlibFunc("loadLabeledPoints", sc, path, minPartitions) | [
"Load",
"labeled",
"points",
"saved",
"using",
"RDD",
".",
"saveAsTextFile",
"."
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/util.py#L151-L173 | [
"def",
"loadLabeledPoints",
"(",
"sc",
",",
"path",
",",
"minPartitions",
"=",
"None",
")",
":",
"minPartitions",
"=",
"minPartitions",
"or",
"min",
"(",
"sc",
".",
"defaultParallelism",
",",
"2",
")",
"return",
"callMLlibFunc",
"(",
"\"loadLabeledPoints\"",
",",
"sc",
",",
"path",
",",
"minPartitions",
")"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | MLUtils.appendBias | Returns a new vector with `1.0` (bias) appended to
the end of the input vector. | python/pyspark/mllib/util.py | def appendBias(data):
"""
Returns a new vector with `1.0` (bias) appended to
the end of the input vector.
"""
vec = _convert_to_vector(data)
if isinstance(vec, SparseVector):
newIndices = np.append(vec.indices, len(vec))
newValues = np.append(vec.values, 1.0)
return SparseVector(len(vec) + 1, newIndices, newValues)
else:
return _convert_to_vector(np.append(vec.toArray(), 1.0)) | def appendBias(data):
"""
Returns a new vector with `1.0` (bias) appended to
the end of the input vector.
"""
vec = _convert_to_vector(data)
if isinstance(vec, SparseVector):
newIndices = np.append(vec.indices, len(vec))
newValues = np.append(vec.values, 1.0)
return SparseVector(len(vec) + 1, newIndices, newValues)
else:
return _convert_to_vector(np.append(vec.toArray(), 1.0)) | [
"Returns",
"a",
"new",
"vector",
"with",
"1",
".",
"0",
"(",
"bias",
")",
"appended",
"to",
"the",
"end",
"of",
"the",
"input",
"vector",
"."
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/util.py#L177-L188 | [
"def",
"appendBias",
"(",
"data",
")",
":",
"vec",
"=",
"_convert_to_vector",
"(",
"data",
")",
"if",
"isinstance",
"(",
"vec",
",",
"SparseVector",
")",
":",
"newIndices",
"=",
"np",
".",
"append",
"(",
"vec",
".",
"indices",
",",
"len",
"(",
"vec",
")",
")",
"newValues",
"=",
"np",
".",
"append",
"(",
"vec",
".",
"values",
",",
"1.0",
")",
"return",
"SparseVector",
"(",
"len",
"(",
"vec",
")",
"+",
"1",
",",
"newIndices",
",",
"newValues",
")",
"else",
":",
"return",
"_convert_to_vector",
"(",
"np",
".",
"append",
"(",
"vec",
".",
"toArray",
"(",
")",
",",
"1.0",
")",
")"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | MLUtils.convertVectorColumnsToML | Converts vector columns in an input DataFrame from the
:py:class:`pyspark.mllib.linalg.Vector` type to the new
:py:class:`pyspark.ml.linalg.Vector` type under the `spark.ml`
package.
:param dataset:
input dataset
:param cols:
a list of vector columns to be converted.
New vector columns will be ignored. If unspecified, all old
vector columns will be converted excepted nested ones.
:return:
the input dataset with old vector columns converted to the
new vector type
>>> import pyspark
>>> from pyspark.mllib.linalg import Vectors
>>> from pyspark.mllib.util import MLUtils
>>> df = spark.createDataFrame(
... [(0, Vectors.sparse(2, [1], [1.0]), Vectors.dense(2.0, 3.0))],
... ["id", "x", "y"])
>>> r1 = MLUtils.convertVectorColumnsToML(df).first()
>>> isinstance(r1.x, pyspark.ml.linalg.SparseVector)
True
>>> isinstance(r1.y, pyspark.ml.linalg.DenseVector)
True
>>> r2 = MLUtils.convertVectorColumnsToML(df, "x").first()
>>> isinstance(r2.x, pyspark.ml.linalg.SparseVector)
True
>>> isinstance(r2.y, pyspark.mllib.linalg.DenseVector)
True | python/pyspark/mllib/util.py | def convertVectorColumnsToML(dataset, *cols):
"""
Converts vector columns in an input DataFrame from the
:py:class:`pyspark.mllib.linalg.Vector` type to the new
:py:class:`pyspark.ml.linalg.Vector` type under the `spark.ml`
package.
:param dataset:
input dataset
:param cols:
a list of vector columns to be converted.
New vector columns will be ignored. If unspecified, all old
vector columns will be converted excepted nested ones.
:return:
the input dataset with old vector columns converted to the
new vector type
>>> import pyspark
>>> from pyspark.mllib.linalg import Vectors
>>> from pyspark.mllib.util import MLUtils
>>> df = spark.createDataFrame(
... [(0, Vectors.sparse(2, [1], [1.0]), Vectors.dense(2.0, 3.0))],
... ["id", "x", "y"])
>>> r1 = MLUtils.convertVectorColumnsToML(df).first()
>>> isinstance(r1.x, pyspark.ml.linalg.SparseVector)
True
>>> isinstance(r1.y, pyspark.ml.linalg.DenseVector)
True
>>> r2 = MLUtils.convertVectorColumnsToML(df, "x").first()
>>> isinstance(r2.x, pyspark.ml.linalg.SparseVector)
True
>>> isinstance(r2.y, pyspark.mllib.linalg.DenseVector)
True
"""
if not isinstance(dataset, DataFrame):
raise TypeError("Input dataset must be a DataFrame but got {}.".format(type(dataset)))
return callMLlibFunc("convertVectorColumnsToML", dataset, list(cols)) | def convertVectorColumnsToML(dataset, *cols):
"""
Converts vector columns in an input DataFrame from the
:py:class:`pyspark.mllib.linalg.Vector` type to the new
:py:class:`pyspark.ml.linalg.Vector` type under the `spark.ml`
package.
:param dataset:
input dataset
:param cols:
a list of vector columns to be converted.
New vector columns will be ignored. If unspecified, all old
vector columns will be converted excepted nested ones.
:return:
the input dataset with old vector columns converted to the
new vector type
>>> import pyspark
>>> from pyspark.mllib.linalg import Vectors
>>> from pyspark.mllib.util import MLUtils
>>> df = spark.createDataFrame(
... [(0, Vectors.sparse(2, [1], [1.0]), Vectors.dense(2.0, 3.0))],
... ["id", "x", "y"])
>>> r1 = MLUtils.convertVectorColumnsToML(df).first()
>>> isinstance(r1.x, pyspark.ml.linalg.SparseVector)
True
>>> isinstance(r1.y, pyspark.ml.linalg.DenseVector)
True
>>> r2 = MLUtils.convertVectorColumnsToML(df, "x").first()
>>> isinstance(r2.x, pyspark.ml.linalg.SparseVector)
True
>>> isinstance(r2.y, pyspark.mllib.linalg.DenseVector)
True
"""
if not isinstance(dataset, DataFrame):
raise TypeError("Input dataset must be a DataFrame but got {}.".format(type(dataset)))
return callMLlibFunc("convertVectorColumnsToML", dataset, list(cols)) | [
"Converts",
"vector",
"columns",
"in",
"an",
"input",
"DataFrame",
"from",
"the",
":",
"py",
":",
"class",
":",
"pyspark",
".",
"mllib",
".",
"linalg",
".",
"Vector",
"type",
"to",
"the",
"new",
":",
"py",
":",
"class",
":",
"pyspark",
".",
"ml",
".",
"linalg",
".",
"Vector",
"type",
"under",
"the",
"spark",
".",
"ml",
"package",
"."
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/util.py#L201-L237 | [
"def",
"convertVectorColumnsToML",
"(",
"dataset",
",",
"*",
"cols",
")",
":",
"if",
"not",
"isinstance",
"(",
"dataset",
",",
"DataFrame",
")",
":",
"raise",
"TypeError",
"(",
"\"Input dataset must be a DataFrame but got {}.\"",
".",
"format",
"(",
"type",
"(",
"dataset",
")",
")",
")",
"return",
"callMLlibFunc",
"(",
"\"convertVectorColumnsToML\"",
",",
"dataset",
",",
"list",
"(",
"cols",
")",
")"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | LinearDataGenerator.generateLinearInput | :param: intercept bias factor, the term c in X'w + c
:param: weights feature vector, the term w in X'w + c
:param: xMean Point around which the data X is centered.
:param: xVariance Variance of the given data
:param: nPoints Number of points to be generated
:param: seed Random Seed
:param: eps Used to scale the noise. If eps is set high,
the amount of gaussian noise added is more.
Returns a list of LabeledPoints of length nPoints | python/pyspark/mllib/util.py | def generateLinearInput(intercept, weights, xMean, xVariance,
nPoints, seed, eps):
"""
:param: intercept bias factor, the term c in X'w + c
:param: weights feature vector, the term w in X'w + c
:param: xMean Point around which the data X is centered.
:param: xVariance Variance of the given data
:param: nPoints Number of points to be generated
:param: seed Random Seed
:param: eps Used to scale the noise. If eps is set high,
the amount of gaussian noise added is more.
Returns a list of LabeledPoints of length nPoints
"""
weights = [float(weight) for weight in weights]
xMean = [float(mean) for mean in xMean]
xVariance = [float(var) for var in xVariance]
return list(callMLlibFunc(
"generateLinearInputWrapper", float(intercept), weights, xMean,
xVariance, int(nPoints), int(seed), float(eps))) | def generateLinearInput(intercept, weights, xMean, xVariance,
nPoints, seed, eps):
"""
:param: intercept bias factor, the term c in X'w + c
:param: weights feature vector, the term w in X'w + c
:param: xMean Point around which the data X is centered.
:param: xVariance Variance of the given data
:param: nPoints Number of points to be generated
:param: seed Random Seed
:param: eps Used to scale the noise. If eps is set high,
the amount of gaussian noise added is more.
Returns a list of LabeledPoints of length nPoints
"""
weights = [float(weight) for weight in weights]
xMean = [float(mean) for mean in xMean]
xVariance = [float(var) for var in xVariance]
return list(callMLlibFunc(
"generateLinearInputWrapper", float(intercept), weights, xMean,
xVariance, int(nPoints), int(seed), float(eps))) | [
":",
"param",
":",
"intercept",
"bias",
"factor",
"the",
"term",
"c",
"in",
"X",
"w",
"+",
"c",
":",
"param",
":",
"weights",
"feature",
"vector",
"the",
"term",
"w",
"in",
"X",
"w",
"+",
"c",
":",
"param",
":",
"xMean",
"Point",
"around",
"which",
"the",
"data",
"X",
"is",
"centered",
".",
":",
"param",
":",
"xVariance",
"Variance",
"of",
"the",
"given",
"data",
":",
"param",
":",
"nPoints",
"Number",
"of",
"points",
"to",
"be",
"generated",
":",
"param",
":",
"seed",
"Random",
"Seed",
":",
"param",
":",
"eps",
"Used",
"to",
"scale",
"the",
"noise",
".",
"If",
"eps",
"is",
"set",
"high",
"the",
"amount",
"of",
"gaussian",
"noise",
"added",
"is",
"more",
"."
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/util.py#L471-L490 | [
"def",
"generateLinearInput",
"(",
"intercept",
",",
"weights",
",",
"xMean",
",",
"xVariance",
",",
"nPoints",
",",
"seed",
",",
"eps",
")",
":",
"weights",
"=",
"[",
"float",
"(",
"weight",
")",
"for",
"weight",
"in",
"weights",
"]",
"xMean",
"=",
"[",
"float",
"(",
"mean",
")",
"for",
"mean",
"in",
"xMean",
"]",
"xVariance",
"=",
"[",
"float",
"(",
"var",
")",
"for",
"var",
"in",
"xVariance",
"]",
"return",
"list",
"(",
"callMLlibFunc",
"(",
"\"generateLinearInputWrapper\"",
",",
"float",
"(",
"intercept",
")",
",",
"weights",
",",
"xMean",
",",
"xVariance",
",",
"int",
"(",
"nPoints",
")",
",",
"int",
"(",
"seed",
")",
",",
"float",
"(",
"eps",
")",
")",
")"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | LinearDataGenerator.generateLinearRDD | Generate an RDD of LabeledPoints. | python/pyspark/mllib/util.py | def generateLinearRDD(sc, nexamples, nfeatures, eps,
nParts=2, intercept=0.0):
"""
Generate an RDD of LabeledPoints.
"""
return callMLlibFunc(
"generateLinearRDDWrapper", sc, int(nexamples), int(nfeatures),
float(eps), int(nParts), float(intercept)) | def generateLinearRDD(sc, nexamples, nfeatures, eps,
nParts=2, intercept=0.0):
"""
Generate an RDD of LabeledPoints.
"""
return callMLlibFunc(
"generateLinearRDDWrapper", sc, int(nexamples), int(nfeatures),
float(eps), int(nParts), float(intercept)) | [
"Generate",
"an",
"RDD",
"of",
"LabeledPoints",
"."
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/util.py#L494-L501 | [
"def",
"generateLinearRDD",
"(",
"sc",
",",
"nexamples",
",",
"nfeatures",
",",
"eps",
",",
"nParts",
"=",
"2",
",",
"intercept",
"=",
"0.0",
")",
":",
"return",
"callMLlibFunc",
"(",
"\"generateLinearRDDWrapper\"",
",",
"sc",
",",
"int",
"(",
"nexamples",
")",
",",
"int",
"(",
"nfeatures",
")",
",",
"float",
"(",
"eps",
")",
",",
"int",
"(",
"nParts",
")",
",",
"float",
"(",
"intercept",
")",
")"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | LinearRegressionWithSGD.train | Train a linear regression model using Stochastic Gradient
Descent (SGD). This solves the least squares regression
formulation
f(weights) = 1/(2n) ||A weights - y||^2
which is the mean squared error. Here the data matrix has n rows,
and the input RDD holds the set of rows of A, each with its
corresponding right hand side label y.
See also the documentation for the precise formulation.
:param data:
The training data, an RDD of LabeledPoint.
:param iterations:
The number of iterations.
(default: 100)
:param step:
The step parameter used in SGD.
(default: 1.0)
:param miniBatchFraction:
Fraction of data to be used for each SGD iteration.
(default: 1.0)
:param initialWeights:
The initial weights.
(default: None)
:param regParam:
The regularizer parameter.
(default: 0.0)
:param regType:
The type of regularizer used for training our model.
Supported values:
- "l1" for using L1 regularization
- "l2" for using L2 regularization
- None for no regularization (default)
:param intercept:
Boolean parameter which indicates the use or not of the
augmented representation for training data (i.e., whether bias
features are activated or not).
(default: False)
:param validateData:
Boolean parameter which indicates if the algorithm should
validate data before training.
(default: True)
:param convergenceTol:
A condition which decides iteration termination.
(default: 0.001) | python/pyspark/mllib/regression.py | def train(cls, data, iterations=100, step=1.0, miniBatchFraction=1.0,
initialWeights=None, regParam=0.0, regType=None, intercept=False,
validateData=True, convergenceTol=0.001):
"""
Train a linear regression model using Stochastic Gradient
Descent (SGD). This solves the least squares regression
formulation
f(weights) = 1/(2n) ||A weights - y||^2
which is the mean squared error. Here the data matrix has n rows,
and the input RDD holds the set of rows of A, each with its
corresponding right hand side label y.
See also the documentation for the precise formulation.
:param data:
The training data, an RDD of LabeledPoint.
:param iterations:
The number of iterations.
(default: 100)
:param step:
The step parameter used in SGD.
(default: 1.0)
:param miniBatchFraction:
Fraction of data to be used for each SGD iteration.
(default: 1.0)
:param initialWeights:
The initial weights.
(default: None)
:param regParam:
The regularizer parameter.
(default: 0.0)
:param regType:
The type of regularizer used for training our model.
Supported values:
- "l1" for using L1 regularization
- "l2" for using L2 regularization
- None for no regularization (default)
:param intercept:
Boolean parameter which indicates the use or not of the
augmented representation for training data (i.e., whether bias
features are activated or not).
(default: False)
:param validateData:
Boolean parameter which indicates if the algorithm should
validate data before training.
(default: True)
:param convergenceTol:
A condition which decides iteration termination.
(default: 0.001)
"""
warnings.warn(
"Deprecated in 2.0.0. Use ml.regression.LinearRegression.", DeprecationWarning)
def train(rdd, i):
return callMLlibFunc("trainLinearRegressionModelWithSGD", rdd, int(iterations),
float(step), float(miniBatchFraction), i, float(regParam),
regType, bool(intercept), bool(validateData),
float(convergenceTol))
return _regression_train_wrapper(train, LinearRegressionModel, data, initialWeights) | def train(cls, data, iterations=100, step=1.0, miniBatchFraction=1.0,
initialWeights=None, regParam=0.0, regType=None, intercept=False,
validateData=True, convergenceTol=0.001):
"""
Train a linear regression model using Stochastic Gradient
Descent (SGD). This solves the least squares regression
formulation
f(weights) = 1/(2n) ||A weights - y||^2
which is the mean squared error. Here the data matrix has n rows,
and the input RDD holds the set of rows of A, each with its
corresponding right hand side label y.
See also the documentation for the precise formulation.
:param data:
The training data, an RDD of LabeledPoint.
:param iterations:
The number of iterations.
(default: 100)
:param step:
The step parameter used in SGD.
(default: 1.0)
:param miniBatchFraction:
Fraction of data to be used for each SGD iteration.
(default: 1.0)
:param initialWeights:
The initial weights.
(default: None)
:param regParam:
The regularizer parameter.
(default: 0.0)
:param regType:
The type of regularizer used for training our model.
Supported values:
- "l1" for using L1 regularization
- "l2" for using L2 regularization
- None for no regularization (default)
:param intercept:
Boolean parameter which indicates the use or not of the
augmented representation for training data (i.e., whether bias
features are activated or not).
(default: False)
:param validateData:
Boolean parameter which indicates if the algorithm should
validate data before training.
(default: True)
:param convergenceTol:
A condition which decides iteration termination.
(default: 0.001)
"""
warnings.warn(
"Deprecated in 2.0.0. Use ml.regression.LinearRegression.", DeprecationWarning)
def train(rdd, i):
return callMLlibFunc("trainLinearRegressionModelWithSGD", rdd, int(iterations),
float(step), float(miniBatchFraction), i, float(regParam),
regType, bool(intercept), bool(validateData),
float(convergenceTol))
return _regression_train_wrapper(train, LinearRegressionModel, data, initialWeights) | [
"Train",
"a",
"linear",
"regression",
"model",
"using",
"Stochastic",
"Gradient",
"Descent",
"(",
"SGD",
")",
".",
"This",
"solves",
"the",
"least",
"squares",
"regression",
"formulation"
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/regression.py#L230-L291 | [
"def",
"train",
"(",
"cls",
",",
"data",
",",
"iterations",
"=",
"100",
",",
"step",
"=",
"1.0",
",",
"miniBatchFraction",
"=",
"1.0",
",",
"initialWeights",
"=",
"None",
",",
"regParam",
"=",
"0.0",
",",
"regType",
"=",
"None",
",",
"intercept",
"=",
"False",
",",
"validateData",
"=",
"True",
",",
"convergenceTol",
"=",
"0.001",
")",
":",
"warnings",
".",
"warn",
"(",
"\"Deprecated in 2.0.0. Use ml.regression.LinearRegression.\"",
",",
"DeprecationWarning",
")",
"def",
"train",
"(",
"rdd",
",",
"i",
")",
":",
"return",
"callMLlibFunc",
"(",
"\"trainLinearRegressionModelWithSGD\"",
",",
"rdd",
",",
"int",
"(",
"iterations",
")",
",",
"float",
"(",
"step",
")",
",",
"float",
"(",
"miniBatchFraction",
")",
",",
"i",
",",
"float",
"(",
"regParam",
")",
",",
"regType",
",",
"bool",
"(",
"intercept",
")",
",",
"bool",
"(",
"validateData",
")",
",",
"float",
"(",
"convergenceTol",
")",
")",
"return",
"_regression_train_wrapper",
"(",
"train",
",",
"LinearRegressionModel",
",",
"data",
",",
"initialWeights",
")"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | IsotonicRegressionModel.predict | Predict labels for provided features.
Using a piecewise linear function.
1) If x exactly matches a boundary then associated prediction
is returned. In case there are multiple predictions with the
same boundary then one of them is returned. Which one is
undefined (same as java.util.Arrays.binarySearch).
2) If x is lower or higher than all boundaries then first or
last prediction is returned respectively. In case there are
multiple predictions with the same boundary then the lowest
or highest is returned respectively.
3) If x falls between two values in boundary array then
prediction is treated as piecewise linear function and
interpolated value is returned. In case there are multiple
values with the same boundary then the same rules as in 2)
are used.
:param x:
Feature or RDD of Features to be labeled. | python/pyspark/mllib/regression.py | def predict(self, x):
"""
Predict labels for provided features.
Using a piecewise linear function.
1) If x exactly matches a boundary then associated prediction
is returned. In case there are multiple predictions with the
same boundary then one of them is returned. Which one is
undefined (same as java.util.Arrays.binarySearch).
2) If x is lower or higher than all boundaries then first or
last prediction is returned respectively. In case there are
multiple predictions with the same boundary then the lowest
or highest is returned respectively.
3) If x falls between two values in boundary array then
prediction is treated as piecewise linear function and
interpolated value is returned. In case there are multiple
values with the same boundary then the same rules as in 2)
are used.
:param x:
Feature or RDD of Features to be labeled.
"""
if isinstance(x, RDD):
return x.map(lambda v: self.predict(v))
return np.interp(x, self.boundaries, self.predictions) | def predict(self, x):
"""
Predict labels for provided features.
Using a piecewise linear function.
1) If x exactly matches a boundary then associated prediction
is returned. In case there are multiple predictions with the
same boundary then one of them is returned. Which one is
undefined (same as java.util.Arrays.binarySearch).
2) If x is lower or higher than all boundaries then first or
last prediction is returned respectively. In case there are
multiple predictions with the same boundary then the lowest
or highest is returned respectively.
3) If x falls between two values in boundary array then
prediction is treated as piecewise linear function and
interpolated value is returned. In case there are multiple
values with the same boundary then the same rules as in 2)
are used.
:param x:
Feature or RDD of Features to be labeled.
"""
if isinstance(x, RDD):
return x.map(lambda v: self.predict(v))
return np.interp(x, self.boundaries, self.predictions) | [
"Predict",
"labels",
"for",
"provided",
"features",
".",
"Using",
"a",
"piecewise",
"linear",
"function",
".",
"1",
")",
"If",
"x",
"exactly",
"matches",
"a",
"boundary",
"then",
"associated",
"prediction",
"is",
"returned",
".",
"In",
"case",
"there",
"are",
"multiple",
"predictions",
"with",
"the",
"same",
"boundary",
"then",
"one",
"of",
"them",
"is",
"returned",
".",
"Which",
"one",
"is",
"undefined",
"(",
"same",
"as",
"java",
".",
"util",
".",
"Arrays",
".",
"binarySearch",
")",
".",
"2",
")",
"If",
"x",
"is",
"lower",
"or",
"higher",
"than",
"all",
"boundaries",
"then",
"first",
"or",
"last",
"prediction",
"is",
"returned",
"respectively",
".",
"In",
"case",
"there",
"are",
"multiple",
"predictions",
"with",
"the",
"same",
"boundary",
"then",
"the",
"lowest",
"or",
"highest",
"is",
"returned",
"respectively",
".",
"3",
")",
"If",
"x",
"falls",
"between",
"two",
"values",
"in",
"boundary",
"array",
"then",
"prediction",
"is",
"treated",
"as",
"piecewise",
"linear",
"function",
"and",
"interpolated",
"value",
"is",
"returned",
".",
"In",
"case",
"there",
"are",
"multiple",
"values",
"with",
"the",
"same",
"boundary",
"then",
"the",
"same",
"rules",
"as",
"in",
"2",
")",
"are",
"used",
"."
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/regression.py#L628-L651 | [
"def",
"predict",
"(",
"self",
",",
"x",
")",
":",
"if",
"isinstance",
"(",
"x",
",",
"RDD",
")",
":",
"return",
"x",
".",
"map",
"(",
"lambda",
"v",
":",
"self",
".",
"predict",
"(",
"v",
")",
")",
"return",
"np",
".",
"interp",
"(",
"x",
",",
"self",
".",
"boundaries",
",",
"self",
".",
"predictions",
")"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | IsotonicRegressionModel.save | Save an IsotonicRegressionModel. | python/pyspark/mllib/regression.py | def save(self, sc, path):
"""Save an IsotonicRegressionModel."""
java_boundaries = _py2java(sc, self.boundaries.tolist())
java_predictions = _py2java(sc, self.predictions.tolist())
java_model = sc._jvm.org.apache.spark.mllib.regression.IsotonicRegressionModel(
java_boundaries, java_predictions, self.isotonic)
java_model.save(sc._jsc.sc(), path) | def save(self, sc, path):
"""Save an IsotonicRegressionModel."""
java_boundaries = _py2java(sc, self.boundaries.tolist())
java_predictions = _py2java(sc, self.predictions.tolist())
java_model = sc._jvm.org.apache.spark.mllib.regression.IsotonicRegressionModel(
java_boundaries, java_predictions, self.isotonic)
java_model.save(sc._jsc.sc(), path) | [
"Save",
"an",
"IsotonicRegressionModel",
"."
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/regression.py#L654-L660 | [
"def",
"save",
"(",
"self",
",",
"sc",
",",
"path",
")",
":",
"java_boundaries",
"=",
"_py2java",
"(",
"sc",
",",
"self",
".",
"boundaries",
".",
"tolist",
"(",
")",
")",
"java_predictions",
"=",
"_py2java",
"(",
"sc",
",",
"self",
".",
"predictions",
".",
"tolist",
"(",
")",
")",
"java_model",
"=",
"sc",
".",
"_jvm",
".",
"org",
".",
"apache",
".",
"spark",
".",
"mllib",
".",
"regression",
".",
"IsotonicRegressionModel",
"(",
"java_boundaries",
",",
"java_predictions",
",",
"self",
".",
"isotonic",
")",
"java_model",
".",
"save",
"(",
"sc",
".",
"_jsc",
".",
"sc",
"(",
")",
",",
"path",
")"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | IsotonicRegressionModel.load | Load an IsotonicRegressionModel. | python/pyspark/mllib/regression.py | def load(cls, sc, path):
"""Load an IsotonicRegressionModel."""
java_model = sc._jvm.org.apache.spark.mllib.regression.IsotonicRegressionModel.load(
sc._jsc.sc(), path)
py_boundaries = _java2py(sc, java_model.boundaryVector()).toArray()
py_predictions = _java2py(sc, java_model.predictionVector()).toArray()
return IsotonicRegressionModel(py_boundaries, py_predictions, java_model.isotonic) | def load(cls, sc, path):
"""Load an IsotonicRegressionModel."""
java_model = sc._jvm.org.apache.spark.mllib.regression.IsotonicRegressionModel.load(
sc._jsc.sc(), path)
py_boundaries = _java2py(sc, java_model.boundaryVector()).toArray()
py_predictions = _java2py(sc, java_model.predictionVector()).toArray()
return IsotonicRegressionModel(py_boundaries, py_predictions, java_model.isotonic) | [
"Load",
"an",
"IsotonicRegressionModel",
"."
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/regression.py#L664-L670 | [
"def",
"load",
"(",
"cls",
",",
"sc",
",",
"path",
")",
":",
"java_model",
"=",
"sc",
".",
"_jvm",
".",
"org",
".",
"apache",
".",
"spark",
".",
"mllib",
".",
"regression",
".",
"IsotonicRegressionModel",
".",
"load",
"(",
"sc",
".",
"_jsc",
".",
"sc",
"(",
")",
",",
"path",
")",
"py_boundaries",
"=",
"_java2py",
"(",
"sc",
",",
"java_model",
".",
"boundaryVector",
"(",
")",
")",
".",
"toArray",
"(",
")",
"py_predictions",
"=",
"_java2py",
"(",
"sc",
",",
"java_model",
".",
"predictionVector",
"(",
")",
")",
".",
"toArray",
"(",
")",
"return",
"IsotonicRegressionModel",
"(",
"py_boundaries",
",",
"py_predictions",
",",
"java_model",
".",
"isotonic",
")"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | IsotonicRegression.train | Train an isotonic regression model on the given data.
:param data:
RDD of (label, feature, weight) tuples.
:param isotonic:
Whether this is isotonic (which is default) or antitonic.
(default: True) | python/pyspark/mllib/regression.py | def train(cls, data, isotonic=True):
"""
Train an isotonic regression model on the given data.
:param data:
RDD of (label, feature, weight) tuples.
:param isotonic:
Whether this is isotonic (which is default) or antitonic.
(default: True)
"""
boundaries, predictions = callMLlibFunc("trainIsotonicRegressionModel",
data.map(_convert_to_vector), bool(isotonic))
return IsotonicRegressionModel(boundaries.toArray(), predictions.toArray(), isotonic) | def train(cls, data, isotonic=True):
"""
Train an isotonic regression model on the given data.
:param data:
RDD of (label, feature, weight) tuples.
:param isotonic:
Whether this is isotonic (which is default) or antitonic.
(default: True)
"""
boundaries, predictions = callMLlibFunc("trainIsotonicRegressionModel",
data.map(_convert_to_vector), bool(isotonic))
return IsotonicRegressionModel(boundaries.toArray(), predictions.toArray(), isotonic) | [
"Train",
"an",
"isotonic",
"regression",
"model",
"on",
"the",
"given",
"data",
"."
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/regression.py#L699-L711 | [
"def",
"train",
"(",
"cls",
",",
"data",
",",
"isotonic",
"=",
"True",
")",
":",
"boundaries",
",",
"predictions",
"=",
"callMLlibFunc",
"(",
"\"trainIsotonicRegressionModel\"",
",",
"data",
".",
"map",
"(",
"_convert_to_vector",
")",
",",
"bool",
"(",
"isotonic",
")",
")",
"return",
"IsotonicRegressionModel",
"(",
"boundaries",
".",
"toArray",
"(",
")",
",",
"predictions",
".",
"toArray",
"(",
")",
",",
"isotonic",
")"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | RowMatrix.columnSimilarities | Compute similarities between columns of this matrix.
The threshold parameter is a trade-off knob between estimate
quality and computational cost.
The default threshold setting of 0 guarantees deterministically
correct results, but uses the brute-force approach of computing
normalized dot products.
Setting the threshold to positive values uses a sampling
approach and incurs strictly less computational cost than the
brute-force approach. However the similarities computed will
be estimates.
The sampling guarantees relative-error correctness for those
pairs of columns that have similarity greater than the given
similarity threshold.
To describe the guarantee, we set some notation:
* Let A be the smallest in magnitude non-zero element of
this matrix.
* Let B be the largest in magnitude non-zero element of
this matrix.
* Let L be the maximum number of non-zeros per row.
For example, for {0,1} matrices: A=B=1.
Another example, for the Netflix matrix: A=1, B=5
For those column pairs that are above the threshold, the
computed similarity is correct to within 20% relative error
with probability at least 1 - (0.981)^10/B^
The shuffle size is bounded by the *smaller* of the following
two expressions:
* O(n log(n) L / (threshold * A))
* O(m L^2^)
The latter is the cost of the brute-force approach, so for
non-zero thresholds, the cost is always cheaper than the
brute-force approach.
:param: threshold: Set to 0 for deterministic guaranteed
correctness. Similarities above this
threshold are estimated with the cost vs
estimate quality trade-off described above.
:return: An n x n sparse upper-triangular CoordinateMatrix of
cosine similarities between columns of this matrix.
>>> rows = sc.parallelize([[1, 2], [1, 5]])
>>> mat = RowMatrix(rows)
>>> sims = mat.columnSimilarities()
>>> sims.entries.first().value
0.91914503... | python/pyspark/mllib/linalg/distributed.py | def columnSimilarities(self, threshold=0.0):
"""
Compute similarities between columns of this matrix.
The threshold parameter is a trade-off knob between estimate
quality and computational cost.
The default threshold setting of 0 guarantees deterministically
correct results, but uses the brute-force approach of computing
normalized dot products.
Setting the threshold to positive values uses a sampling
approach and incurs strictly less computational cost than the
brute-force approach. However the similarities computed will
be estimates.
The sampling guarantees relative-error correctness for those
pairs of columns that have similarity greater than the given
similarity threshold.
To describe the guarantee, we set some notation:
* Let A be the smallest in magnitude non-zero element of
this matrix.
* Let B be the largest in magnitude non-zero element of
this matrix.
* Let L be the maximum number of non-zeros per row.
For example, for {0,1} matrices: A=B=1.
Another example, for the Netflix matrix: A=1, B=5
For those column pairs that are above the threshold, the
computed similarity is correct to within 20% relative error
with probability at least 1 - (0.981)^10/B^
The shuffle size is bounded by the *smaller* of the following
two expressions:
* O(n log(n) L / (threshold * A))
* O(m L^2^)
The latter is the cost of the brute-force approach, so for
non-zero thresholds, the cost is always cheaper than the
brute-force approach.
:param: threshold: Set to 0 for deterministic guaranteed
correctness. Similarities above this
threshold are estimated with the cost vs
estimate quality trade-off described above.
:return: An n x n sparse upper-triangular CoordinateMatrix of
cosine similarities between columns of this matrix.
>>> rows = sc.parallelize([[1, 2], [1, 5]])
>>> mat = RowMatrix(rows)
>>> sims = mat.columnSimilarities()
>>> sims.entries.first().value
0.91914503...
"""
java_sims_mat = self._java_matrix_wrapper.call("columnSimilarities", float(threshold))
return CoordinateMatrix(java_sims_mat) | def columnSimilarities(self, threshold=0.0):
"""
Compute similarities between columns of this matrix.
The threshold parameter is a trade-off knob between estimate
quality and computational cost.
The default threshold setting of 0 guarantees deterministically
correct results, but uses the brute-force approach of computing
normalized dot products.
Setting the threshold to positive values uses a sampling
approach and incurs strictly less computational cost than the
brute-force approach. However the similarities computed will
be estimates.
The sampling guarantees relative-error correctness for those
pairs of columns that have similarity greater than the given
similarity threshold.
To describe the guarantee, we set some notation:
* Let A be the smallest in magnitude non-zero element of
this matrix.
* Let B be the largest in magnitude non-zero element of
this matrix.
* Let L be the maximum number of non-zeros per row.
For example, for {0,1} matrices: A=B=1.
Another example, for the Netflix matrix: A=1, B=5
For those column pairs that are above the threshold, the
computed similarity is correct to within 20% relative error
with probability at least 1 - (0.981)^10/B^
The shuffle size is bounded by the *smaller* of the following
two expressions:
* O(n log(n) L / (threshold * A))
* O(m L^2^)
The latter is the cost of the brute-force approach, so for
non-zero thresholds, the cost is always cheaper than the
brute-force approach.
:param: threshold: Set to 0 for deterministic guaranteed
correctness. Similarities above this
threshold are estimated with the cost vs
estimate quality trade-off described above.
:return: An n x n sparse upper-triangular CoordinateMatrix of
cosine similarities between columns of this matrix.
>>> rows = sc.parallelize([[1, 2], [1, 5]])
>>> mat = RowMatrix(rows)
>>> sims = mat.columnSimilarities()
>>> sims.entries.first().value
0.91914503...
"""
java_sims_mat = self._java_matrix_wrapper.call("columnSimilarities", float(threshold))
return CoordinateMatrix(java_sims_mat) | [
"Compute",
"similarities",
"between",
"columns",
"of",
"this",
"matrix",
"."
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/distributed.py#L201-L260 | [
"def",
"columnSimilarities",
"(",
"self",
",",
"threshold",
"=",
"0.0",
")",
":",
"java_sims_mat",
"=",
"self",
".",
"_java_matrix_wrapper",
".",
"call",
"(",
"\"columnSimilarities\"",
",",
"float",
"(",
"threshold",
")",
")",
"return",
"CoordinateMatrix",
"(",
"java_sims_mat",
")"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | RowMatrix.tallSkinnyQR | Compute the QR decomposition of this RowMatrix.
The implementation is designed to optimize the QR decomposition
(factorization) for the RowMatrix of a tall and skinny shape.
Reference:
Paul G. Constantine, David F. Gleich. "Tall and skinny QR
factorizations in MapReduce architectures"
([[https://doi.org/10.1145/1996092.1996103]])
:param: computeQ: whether to computeQ
:return: QRDecomposition(Q: RowMatrix, R: Matrix), where
Q = None if computeQ = false.
>>> rows = sc.parallelize([[3, -6], [4, -8], [0, 1]])
>>> mat = RowMatrix(rows)
>>> decomp = mat.tallSkinnyQR(True)
>>> Q = decomp.Q
>>> R = decomp.R
>>> # Test with absolute values
>>> absQRows = Q.rows.map(lambda row: abs(row.toArray()).tolist())
>>> absQRows.collect()
[[0.6..., 0.0], [0.8..., 0.0], [0.0, 1.0]]
>>> # Test with absolute values
>>> abs(R.toArray()).tolist()
[[5.0, 10.0], [0.0, 1.0]] | python/pyspark/mllib/linalg/distributed.py | def tallSkinnyQR(self, computeQ=False):
"""
Compute the QR decomposition of this RowMatrix.
The implementation is designed to optimize the QR decomposition
(factorization) for the RowMatrix of a tall and skinny shape.
Reference:
Paul G. Constantine, David F. Gleich. "Tall and skinny QR
factorizations in MapReduce architectures"
([[https://doi.org/10.1145/1996092.1996103]])
:param: computeQ: whether to computeQ
:return: QRDecomposition(Q: RowMatrix, R: Matrix), where
Q = None if computeQ = false.
>>> rows = sc.parallelize([[3, -6], [4, -8], [0, 1]])
>>> mat = RowMatrix(rows)
>>> decomp = mat.tallSkinnyQR(True)
>>> Q = decomp.Q
>>> R = decomp.R
>>> # Test with absolute values
>>> absQRows = Q.rows.map(lambda row: abs(row.toArray()).tolist())
>>> absQRows.collect()
[[0.6..., 0.0], [0.8..., 0.0], [0.0, 1.0]]
>>> # Test with absolute values
>>> abs(R.toArray()).tolist()
[[5.0, 10.0], [0.0, 1.0]]
"""
decomp = JavaModelWrapper(self._java_matrix_wrapper.call("tallSkinnyQR", computeQ))
if computeQ:
java_Q = decomp.call("Q")
Q = RowMatrix(java_Q)
else:
Q = None
R = decomp.call("R")
return QRDecomposition(Q, R) | def tallSkinnyQR(self, computeQ=False):
"""
Compute the QR decomposition of this RowMatrix.
The implementation is designed to optimize the QR decomposition
(factorization) for the RowMatrix of a tall and skinny shape.
Reference:
Paul G. Constantine, David F. Gleich. "Tall and skinny QR
factorizations in MapReduce architectures"
([[https://doi.org/10.1145/1996092.1996103]])
:param: computeQ: whether to computeQ
:return: QRDecomposition(Q: RowMatrix, R: Matrix), where
Q = None if computeQ = false.
>>> rows = sc.parallelize([[3, -6], [4, -8], [0, 1]])
>>> mat = RowMatrix(rows)
>>> decomp = mat.tallSkinnyQR(True)
>>> Q = decomp.Q
>>> R = decomp.R
>>> # Test with absolute values
>>> absQRows = Q.rows.map(lambda row: abs(row.toArray()).tolist())
>>> absQRows.collect()
[[0.6..., 0.0], [0.8..., 0.0], [0.0, 1.0]]
>>> # Test with absolute values
>>> abs(R.toArray()).tolist()
[[5.0, 10.0], [0.0, 1.0]]
"""
decomp = JavaModelWrapper(self._java_matrix_wrapper.call("tallSkinnyQR", computeQ))
if computeQ:
java_Q = decomp.call("Q")
Q = RowMatrix(java_Q)
else:
Q = None
R = decomp.call("R")
return QRDecomposition(Q, R) | [
"Compute",
"the",
"QR",
"decomposition",
"of",
"this",
"RowMatrix",
"."
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/distributed.py#L263-L301 | [
"def",
"tallSkinnyQR",
"(",
"self",
",",
"computeQ",
"=",
"False",
")",
":",
"decomp",
"=",
"JavaModelWrapper",
"(",
"self",
".",
"_java_matrix_wrapper",
".",
"call",
"(",
"\"tallSkinnyQR\"",
",",
"computeQ",
")",
")",
"if",
"computeQ",
":",
"java_Q",
"=",
"decomp",
".",
"call",
"(",
"\"Q\"",
")",
"Q",
"=",
"RowMatrix",
"(",
"java_Q",
")",
"else",
":",
"Q",
"=",
"None",
"R",
"=",
"decomp",
".",
"call",
"(",
"\"R\"",
")",
"return",
"QRDecomposition",
"(",
"Q",
",",
"R",
")"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | RowMatrix.computeSVD | Computes the singular value decomposition of the RowMatrix.
The given row matrix A of dimension (m X n) is decomposed into
U * s * V'T where
* U: (m X k) (left singular vectors) is a RowMatrix whose
columns are the eigenvectors of (A X A')
* s: DenseVector consisting of square root of the eigenvalues
(singular values) in descending order.
* v: (n X k) (right singular vectors) is a Matrix whose columns
are the eigenvectors of (A' X A)
For more specific details on implementation, please refer
the Scala documentation.
:param k: Number of leading singular values to keep (`0 < k <= n`).
It might return less than k if there are numerically zero singular values
or there are not enough Ritz values converged before the maximum number of
Arnoldi update iterations is reached (in case that matrix A is ill-conditioned).
:param computeU: Whether or not to compute U. If set to be
True, then U is computed by A * V * s^-1
:param rCond: Reciprocal condition number. All singular values
smaller than rCond * s[0] are treated as zero
where s[0] is the largest singular value.
:returns: :py:class:`SingularValueDecomposition`
>>> rows = sc.parallelize([[3, 1, 1], [-1, 3, 1]])
>>> rm = RowMatrix(rows)
>>> svd_model = rm.computeSVD(2, True)
>>> svd_model.U.rows.collect()
[DenseVector([-0.7071, 0.7071]), DenseVector([-0.7071, -0.7071])]
>>> svd_model.s
DenseVector([3.4641, 3.1623])
>>> svd_model.V
DenseMatrix(3, 2, [-0.4082, -0.8165, -0.4082, 0.8944, -0.4472, 0.0], 0) | python/pyspark/mllib/linalg/distributed.py | def computeSVD(self, k, computeU=False, rCond=1e-9):
"""
Computes the singular value decomposition of the RowMatrix.
The given row matrix A of dimension (m X n) is decomposed into
U * s * V'T where
* U: (m X k) (left singular vectors) is a RowMatrix whose
columns are the eigenvectors of (A X A')
* s: DenseVector consisting of square root of the eigenvalues
(singular values) in descending order.
* v: (n X k) (right singular vectors) is a Matrix whose columns
are the eigenvectors of (A' X A)
For more specific details on implementation, please refer
the Scala documentation.
:param k: Number of leading singular values to keep (`0 < k <= n`).
It might return less than k if there are numerically zero singular values
or there are not enough Ritz values converged before the maximum number of
Arnoldi update iterations is reached (in case that matrix A is ill-conditioned).
:param computeU: Whether or not to compute U. If set to be
True, then U is computed by A * V * s^-1
:param rCond: Reciprocal condition number. All singular values
smaller than rCond * s[0] are treated as zero
where s[0] is the largest singular value.
:returns: :py:class:`SingularValueDecomposition`
>>> rows = sc.parallelize([[3, 1, 1], [-1, 3, 1]])
>>> rm = RowMatrix(rows)
>>> svd_model = rm.computeSVD(2, True)
>>> svd_model.U.rows.collect()
[DenseVector([-0.7071, 0.7071]), DenseVector([-0.7071, -0.7071])]
>>> svd_model.s
DenseVector([3.4641, 3.1623])
>>> svd_model.V
DenseMatrix(3, 2, [-0.4082, -0.8165, -0.4082, 0.8944, -0.4472, 0.0], 0)
"""
j_model = self._java_matrix_wrapper.call(
"computeSVD", int(k), bool(computeU), float(rCond))
return SingularValueDecomposition(j_model) | def computeSVD(self, k, computeU=False, rCond=1e-9):
"""
Computes the singular value decomposition of the RowMatrix.
The given row matrix A of dimension (m X n) is decomposed into
U * s * V'T where
* U: (m X k) (left singular vectors) is a RowMatrix whose
columns are the eigenvectors of (A X A')
* s: DenseVector consisting of square root of the eigenvalues
(singular values) in descending order.
* v: (n X k) (right singular vectors) is a Matrix whose columns
are the eigenvectors of (A' X A)
For more specific details on implementation, please refer
the Scala documentation.
:param k: Number of leading singular values to keep (`0 < k <= n`).
It might return less than k if there are numerically zero singular values
or there are not enough Ritz values converged before the maximum number of
Arnoldi update iterations is reached (in case that matrix A is ill-conditioned).
:param computeU: Whether or not to compute U. If set to be
True, then U is computed by A * V * s^-1
:param rCond: Reciprocal condition number. All singular values
smaller than rCond * s[0] are treated as zero
where s[0] is the largest singular value.
:returns: :py:class:`SingularValueDecomposition`
>>> rows = sc.parallelize([[3, 1, 1], [-1, 3, 1]])
>>> rm = RowMatrix(rows)
>>> svd_model = rm.computeSVD(2, True)
>>> svd_model.U.rows.collect()
[DenseVector([-0.7071, 0.7071]), DenseVector([-0.7071, -0.7071])]
>>> svd_model.s
DenseVector([3.4641, 3.1623])
>>> svd_model.V
DenseMatrix(3, 2, [-0.4082, -0.8165, -0.4082, 0.8944, -0.4472, 0.0], 0)
"""
j_model = self._java_matrix_wrapper.call(
"computeSVD", int(k), bool(computeU), float(rCond))
return SingularValueDecomposition(j_model) | [
"Computes",
"the",
"singular",
"value",
"decomposition",
"of",
"the",
"RowMatrix",
"."
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/distributed.py#L304-L345 | [
"def",
"computeSVD",
"(",
"self",
",",
"k",
",",
"computeU",
"=",
"False",
",",
"rCond",
"=",
"1e-9",
")",
":",
"j_model",
"=",
"self",
".",
"_java_matrix_wrapper",
".",
"call",
"(",
"\"computeSVD\"",
",",
"int",
"(",
"k",
")",
",",
"bool",
"(",
"computeU",
")",
",",
"float",
"(",
"rCond",
")",
")",
"return",
"SingularValueDecomposition",
"(",
"j_model",
")"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | RowMatrix.multiply | Multiply this matrix by a local dense matrix on the right.
:param matrix: a local dense matrix whose number of rows must match the number of columns
of this matrix
:returns: :py:class:`RowMatrix`
>>> rm = RowMatrix(sc.parallelize([[0, 1], [2, 3]]))
>>> rm.multiply(DenseMatrix(2, 2, [0, 2, 1, 3])).rows.collect()
[DenseVector([2.0, 3.0]), DenseVector([6.0, 11.0])] | python/pyspark/mllib/linalg/distributed.py | def multiply(self, matrix):
"""
Multiply this matrix by a local dense matrix on the right.
:param matrix: a local dense matrix whose number of rows must match the number of columns
of this matrix
:returns: :py:class:`RowMatrix`
>>> rm = RowMatrix(sc.parallelize([[0, 1], [2, 3]]))
>>> rm.multiply(DenseMatrix(2, 2, [0, 2, 1, 3])).rows.collect()
[DenseVector([2.0, 3.0]), DenseVector([6.0, 11.0])]
"""
if not isinstance(matrix, DenseMatrix):
raise ValueError("Only multiplication with DenseMatrix "
"is supported.")
j_model = self._java_matrix_wrapper.call("multiply", matrix)
return RowMatrix(j_model) | def multiply(self, matrix):
"""
Multiply this matrix by a local dense matrix on the right.
:param matrix: a local dense matrix whose number of rows must match the number of columns
of this matrix
:returns: :py:class:`RowMatrix`
>>> rm = RowMatrix(sc.parallelize([[0, 1], [2, 3]]))
>>> rm.multiply(DenseMatrix(2, 2, [0, 2, 1, 3])).rows.collect()
[DenseVector([2.0, 3.0]), DenseVector([6.0, 11.0])]
"""
if not isinstance(matrix, DenseMatrix):
raise ValueError("Only multiplication with DenseMatrix "
"is supported.")
j_model = self._java_matrix_wrapper.call("multiply", matrix)
return RowMatrix(j_model) | [
"Multiply",
"this",
"matrix",
"by",
"a",
"local",
"dense",
"matrix",
"on",
"the",
"right",
"."
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/distributed.py#L373-L389 | [
"def",
"multiply",
"(",
"self",
",",
"matrix",
")",
":",
"if",
"not",
"isinstance",
"(",
"matrix",
",",
"DenseMatrix",
")",
":",
"raise",
"ValueError",
"(",
"\"Only multiplication with DenseMatrix \"",
"\"is supported.\"",
")",
"j_model",
"=",
"self",
".",
"_java_matrix_wrapper",
".",
"call",
"(",
"\"multiply\"",
",",
"matrix",
")",
"return",
"RowMatrix",
"(",
"j_model",
")"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | SingularValueDecomposition.U | Returns a distributed matrix whose columns are the left
singular vectors of the SingularValueDecomposition if computeU was set to be True. | python/pyspark/mllib/linalg/distributed.py | def U(self):
"""
Returns a distributed matrix whose columns are the left
singular vectors of the SingularValueDecomposition if computeU was set to be True.
"""
u = self.call("U")
if u is not None:
mat_name = u.getClass().getSimpleName()
if mat_name == "RowMatrix":
return RowMatrix(u)
elif mat_name == "IndexedRowMatrix":
return IndexedRowMatrix(u)
else:
raise TypeError("Expected RowMatrix/IndexedRowMatrix got %s" % mat_name) | def U(self):
"""
Returns a distributed matrix whose columns are the left
singular vectors of the SingularValueDecomposition if computeU was set to be True.
"""
u = self.call("U")
if u is not None:
mat_name = u.getClass().getSimpleName()
if mat_name == "RowMatrix":
return RowMatrix(u)
elif mat_name == "IndexedRowMatrix":
return IndexedRowMatrix(u)
else:
raise TypeError("Expected RowMatrix/IndexedRowMatrix got %s" % mat_name) | [
"Returns",
"a",
"distributed",
"matrix",
"whose",
"columns",
"are",
"the",
"left",
"singular",
"vectors",
"of",
"the",
"SingularValueDecomposition",
"if",
"computeU",
"was",
"set",
"to",
"be",
"True",
"."
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/distributed.py#L401-L414 | [
"def",
"U",
"(",
"self",
")",
":",
"u",
"=",
"self",
".",
"call",
"(",
"\"U\"",
")",
"if",
"u",
"is",
"not",
"None",
":",
"mat_name",
"=",
"u",
".",
"getClass",
"(",
")",
".",
"getSimpleName",
"(",
")",
"if",
"mat_name",
"==",
"\"RowMatrix\"",
":",
"return",
"RowMatrix",
"(",
"u",
")",
"elif",
"mat_name",
"==",
"\"IndexedRowMatrix\"",
":",
"return",
"IndexedRowMatrix",
"(",
"u",
")",
"else",
":",
"raise",
"TypeError",
"(",
"\"Expected RowMatrix/IndexedRowMatrix got %s\"",
"%",
"mat_name",
")"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | IndexedRowMatrix.rows | Rows of the IndexedRowMatrix stored as an RDD of IndexedRows.
>>> mat = IndexedRowMatrix(sc.parallelize([IndexedRow(0, [1, 2, 3]),
... IndexedRow(1, [4, 5, 6])]))
>>> rows = mat.rows
>>> rows.first()
IndexedRow(0, [1.0,2.0,3.0]) | python/pyspark/mllib/linalg/distributed.py | def rows(self):
"""
Rows of the IndexedRowMatrix stored as an RDD of IndexedRows.
>>> mat = IndexedRowMatrix(sc.parallelize([IndexedRow(0, [1, 2, 3]),
... IndexedRow(1, [4, 5, 6])]))
>>> rows = mat.rows
>>> rows.first()
IndexedRow(0, [1.0,2.0,3.0])
"""
# We use DataFrames for serialization of IndexedRows from
# Java, so we first convert the RDD of rows to a DataFrame
# on the Scala/Java side. Then we map each Row in the
# DataFrame back to an IndexedRow on this side.
rows_df = callMLlibFunc("getIndexedRows", self._java_matrix_wrapper._java_model)
rows = rows_df.rdd.map(lambda row: IndexedRow(row[0], row[1]))
return rows | def rows(self):
"""
Rows of the IndexedRowMatrix stored as an RDD of IndexedRows.
>>> mat = IndexedRowMatrix(sc.parallelize([IndexedRow(0, [1, 2, 3]),
... IndexedRow(1, [4, 5, 6])]))
>>> rows = mat.rows
>>> rows.first()
IndexedRow(0, [1.0,2.0,3.0])
"""
# We use DataFrames for serialization of IndexedRows from
# Java, so we first convert the RDD of rows to a DataFrame
# on the Scala/Java side. Then we map each Row in the
# DataFrame back to an IndexedRow on this side.
rows_df = callMLlibFunc("getIndexedRows", self._java_matrix_wrapper._java_model)
rows = rows_df.rdd.map(lambda row: IndexedRow(row[0], row[1]))
return rows | [
"Rows",
"of",
"the",
"IndexedRowMatrix",
"stored",
"as",
"an",
"RDD",
"of",
"IndexedRows",
"."
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/distributed.py#L519-L535 | [
"def",
"rows",
"(",
"self",
")",
":",
"# We use DataFrames for serialization of IndexedRows from",
"# Java, so we first convert the RDD of rows to a DataFrame",
"# on the Scala/Java side. Then we map each Row in the",
"# DataFrame back to an IndexedRow on this side.",
"rows_df",
"=",
"callMLlibFunc",
"(",
"\"getIndexedRows\"",
",",
"self",
".",
"_java_matrix_wrapper",
".",
"_java_model",
")",
"rows",
"=",
"rows_df",
".",
"rdd",
".",
"map",
"(",
"lambda",
"row",
":",
"IndexedRow",
"(",
"row",
"[",
"0",
"]",
",",
"row",
"[",
"1",
"]",
")",
")",
"return",
"rows"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | IndexedRowMatrix.toBlockMatrix | Convert this matrix to a BlockMatrix.
:param rowsPerBlock: Number of rows that make up each block.
The blocks forming the final rows are not
required to have the given number of rows.
:param colsPerBlock: Number of columns that make up each block.
The blocks forming the final columns are not
required to have the given number of columns.
>>> rows = sc.parallelize([IndexedRow(0, [1, 2, 3]),
... IndexedRow(6, [4, 5, 6])])
>>> mat = IndexedRowMatrix(rows).toBlockMatrix()
>>> # This IndexedRowMatrix will have 7 effective rows, due to
>>> # the highest row index being 6, and the ensuing
>>> # BlockMatrix will have 7 rows as well.
>>> print(mat.numRows())
7
>>> print(mat.numCols())
3 | python/pyspark/mllib/linalg/distributed.py | def toBlockMatrix(self, rowsPerBlock=1024, colsPerBlock=1024):
"""
Convert this matrix to a BlockMatrix.
:param rowsPerBlock: Number of rows that make up each block.
The blocks forming the final rows are not
required to have the given number of rows.
:param colsPerBlock: Number of columns that make up each block.
The blocks forming the final columns are not
required to have the given number of columns.
>>> rows = sc.parallelize([IndexedRow(0, [1, 2, 3]),
... IndexedRow(6, [4, 5, 6])])
>>> mat = IndexedRowMatrix(rows).toBlockMatrix()
>>> # This IndexedRowMatrix will have 7 effective rows, due to
>>> # the highest row index being 6, and the ensuing
>>> # BlockMatrix will have 7 rows as well.
>>> print(mat.numRows())
7
>>> print(mat.numCols())
3
"""
java_block_matrix = self._java_matrix_wrapper.call("toBlockMatrix",
rowsPerBlock,
colsPerBlock)
return BlockMatrix(java_block_matrix, rowsPerBlock, colsPerBlock) | def toBlockMatrix(self, rowsPerBlock=1024, colsPerBlock=1024):
"""
Convert this matrix to a BlockMatrix.
:param rowsPerBlock: Number of rows that make up each block.
The blocks forming the final rows are not
required to have the given number of rows.
:param colsPerBlock: Number of columns that make up each block.
The blocks forming the final columns are not
required to have the given number of columns.
>>> rows = sc.parallelize([IndexedRow(0, [1, 2, 3]),
... IndexedRow(6, [4, 5, 6])])
>>> mat = IndexedRowMatrix(rows).toBlockMatrix()
>>> # This IndexedRowMatrix will have 7 effective rows, due to
>>> # the highest row index being 6, and the ensuing
>>> # BlockMatrix will have 7 rows as well.
>>> print(mat.numRows())
7
>>> print(mat.numCols())
3
"""
java_block_matrix = self._java_matrix_wrapper.call("toBlockMatrix",
rowsPerBlock,
colsPerBlock)
return BlockMatrix(java_block_matrix, rowsPerBlock, colsPerBlock) | [
"Convert",
"this",
"matrix",
"to",
"a",
"BlockMatrix",
"."
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/distributed.py#L631-L658 | [
"def",
"toBlockMatrix",
"(",
"self",
",",
"rowsPerBlock",
"=",
"1024",
",",
"colsPerBlock",
"=",
"1024",
")",
":",
"java_block_matrix",
"=",
"self",
".",
"_java_matrix_wrapper",
".",
"call",
"(",
"\"toBlockMatrix\"",
",",
"rowsPerBlock",
",",
"colsPerBlock",
")",
"return",
"BlockMatrix",
"(",
"java_block_matrix",
",",
"rowsPerBlock",
",",
"colsPerBlock",
")"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | IndexedRowMatrix.multiply | Multiply this matrix by a local dense matrix on the right.
:param matrix: a local dense matrix whose number of rows must match the number of columns
of this matrix
:returns: :py:class:`IndexedRowMatrix`
>>> mat = IndexedRowMatrix(sc.parallelize([(0, (0, 1)), (1, (2, 3))]))
>>> mat.multiply(DenseMatrix(2, 2, [0, 2, 1, 3])).rows.collect()
[IndexedRow(0, [2.0,3.0]), IndexedRow(1, [6.0,11.0])] | python/pyspark/mllib/linalg/distributed.py | def multiply(self, matrix):
"""
Multiply this matrix by a local dense matrix on the right.
:param matrix: a local dense matrix whose number of rows must match the number of columns
of this matrix
:returns: :py:class:`IndexedRowMatrix`
>>> mat = IndexedRowMatrix(sc.parallelize([(0, (0, 1)), (1, (2, 3))]))
>>> mat.multiply(DenseMatrix(2, 2, [0, 2, 1, 3])).rows.collect()
[IndexedRow(0, [2.0,3.0]), IndexedRow(1, [6.0,11.0])]
"""
if not isinstance(matrix, DenseMatrix):
raise ValueError("Only multiplication with DenseMatrix "
"is supported.")
return IndexedRowMatrix(self._java_matrix_wrapper.call("multiply", matrix)) | def multiply(self, matrix):
"""
Multiply this matrix by a local dense matrix on the right.
:param matrix: a local dense matrix whose number of rows must match the number of columns
of this matrix
:returns: :py:class:`IndexedRowMatrix`
>>> mat = IndexedRowMatrix(sc.parallelize([(0, (0, 1)), (1, (2, 3))]))
>>> mat.multiply(DenseMatrix(2, 2, [0, 2, 1, 3])).rows.collect()
[IndexedRow(0, [2.0,3.0]), IndexedRow(1, [6.0,11.0])]
"""
if not isinstance(matrix, DenseMatrix):
raise ValueError("Only multiplication with DenseMatrix "
"is supported.")
return IndexedRowMatrix(self._java_matrix_wrapper.call("multiply", matrix)) | [
"Multiply",
"this",
"matrix",
"by",
"a",
"local",
"dense",
"matrix",
"on",
"the",
"right",
"."
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/distributed.py#L705-L720 | [
"def",
"multiply",
"(",
"self",
",",
"matrix",
")",
":",
"if",
"not",
"isinstance",
"(",
"matrix",
",",
"DenseMatrix",
")",
":",
"raise",
"ValueError",
"(",
"\"Only multiplication with DenseMatrix \"",
"\"is supported.\"",
")",
"return",
"IndexedRowMatrix",
"(",
"self",
".",
"_java_matrix_wrapper",
".",
"call",
"(",
"\"multiply\"",
",",
"matrix",
")",
")"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | CoordinateMatrix.entries | Entries of the CoordinateMatrix stored as an RDD of
MatrixEntries.
>>> mat = CoordinateMatrix(sc.parallelize([MatrixEntry(0, 0, 1.2),
... MatrixEntry(6, 4, 2.1)]))
>>> entries = mat.entries
>>> entries.first()
MatrixEntry(0, 0, 1.2) | python/pyspark/mllib/linalg/distributed.py | def entries(self):
"""
Entries of the CoordinateMatrix stored as an RDD of
MatrixEntries.
>>> mat = CoordinateMatrix(sc.parallelize([MatrixEntry(0, 0, 1.2),
... MatrixEntry(6, 4, 2.1)]))
>>> entries = mat.entries
>>> entries.first()
MatrixEntry(0, 0, 1.2)
"""
# We use DataFrames for serialization of MatrixEntry entries
# from Java, so we first convert the RDD of entries to a
# DataFrame on the Scala/Java side. Then we map each Row in
# the DataFrame back to a MatrixEntry on this side.
entries_df = callMLlibFunc("getMatrixEntries", self._java_matrix_wrapper._java_model)
entries = entries_df.rdd.map(lambda row: MatrixEntry(row[0], row[1], row[2]))
return entries | def entries(self):
"""
Entries of the CoordinateMatrix stored as an RDD of
MatrixEntries.
>>> mat = CoordinateMatrix(sc.parallelize([MatrixEntry(0, 0, 1.2),
... MatrixEntry(6, 4, 2.1)]))
>>> entries = mat.entries
>>> entries.first()
MatrixEntry(0, 0, 1.2)
"""
# We use DataFrames for serialization of MatrixEntry entries
# from Java, so we first convert the RDD of entries to a
# DataFrame on the Scala/Java side. Then we map each Row in
# the DataFrame back to a MatrixEntry on this side.
entries_df = callMLlibFunc("getMatrixEntries", self._java_matrix_wrapper._java_model)
entries = entries_df.rdd.map(lambda row: MatrixEntry(row[0], row[1], row[2]))
return entries | [
"Entries",
"of",
"the",
"CoordinateMatrix",
"stored",
"as",
"an",
"RDD",
"of",
"MatrixEntries",
"."
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/distributed.py#L811-L828 | [
"def",
"entries",
"(",
"self",
")",
":",
"# We use DataFrames for serialization of MatrixEntry entries",
"# from Java, so we first convert the RDD of entries to a",
"# DataFrame on the Scala/Java side. Then we map each Row in",
"# the DataFrame back to a MatrixEntry on this side.",
"entries_df",
"=",
"callMLlibFunc",
"(",
"\"getMatrixEntries\"",
",",
"self",
".",
"_java_matrix_wrapper",
".",
"_java_model",
")",
"entries",
"=",
"entries_df",
".",
"rdd",
".",
"map",
"(",
"lambda",
"row",
":",
"MatrixEntry",
"(",
"row",
"[",
"0",
"]",
",",
"row",
"[",
"1",
"]",
",",
"row",
"[",
"2",
"]",
")",
")",
"return",
"entries"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | BlockMatrix.blocks | The RDD of sub-matrix blocks
((blockRowIndex, blockColIndex), sub-matrix) that form this
distributed matrix.
>>> mat = BlockMatrix(
... sc.parallelize([((0, 0), Matrices.dense(3, 2, [1, 2, 3, 4, 5, 6])),
... ((1, 0), Matrices.dense(3, 2, [7, 8, 9, 10, 11, 12]))]), 3, 2)
>>> blocks = mat.blocks
>>> blocks.first()
((0, 0), DenseMatrix(3, 2, [1.0, 2.0, 3.0, 4.0, 5.0, 6.0], 0)) | python/pyspark/mllib/linalg/distributed.py | def blocks(self):
"""
The RDD of sub-matrix blocks
((blockRowIndex, blockColIndex), sub-matrix) that form this
distributed matrix.
>>> mat = BlockMatrix(
... sc.parallelize([((0, 0), Matrices.dense(3, 2, [1, 2, 3, 4, 5, 6])),
... ((1, 0), Matrices.dense(3, 2, [7, 8, 9, 10, 11, 12]))]), 3, 2)
>>> blocks = mat.blocks
>>> blocks.first()
((0, 0), DenseMatrix(3, 2, [1.0, 2.0, 3.0, 4.0, 5.0, 6.0], 0))
"""
# We use DataFrames for serialization of sub-matrix blocks
# from Java, so we first convert the RDD of blocks to a
# DataFrame on the Scala/Java side. Then we map each Row in
# the DataFrame back to a sub-matrix block on this side.
blocks_df = callMLlibFunc("getMatrixBlocks", self._java_matrix_wrapper._java_model)
blocks = blocks_df.rdd.map(lambda row: ((row[0][0], row[0][1]), row[1]))
return blocks | def blocks(self):
"""
The RDD of sub-matrix blocks
((blockRowIndex, blockColIndex), sub-matrix) that form this
distributed matrix.
>>> mat = BlockMatrix(
... sc.parallelize([((0, 0), Matrices.dense(3, 2, [1, 2, 3, 4, 5, 6])),
... ((1, 0), Matrices.dense(3, 2, [7, 8, 9, 10, 11, 12]))]), 3, 2)
>>> blocks = mat.blocks
>>> blocks.first()
((0, 0), DenseMatrix(3, 2, [1.0, 2.0, 3.0, 4.0, 5.0, 6.0], 0))
"""
# We use DataFrames for serialization of sub-matrix blocks
# from Java, so we first convert the RDD of blocks to a
# DataFrame on the Scala/Java side. Then we map each Row in
# the DataFrame back to a sub-matrix block on this side.
blocks_df = callMLlibFunc("getMatrixBlocks", self._java_matrix_wrapper._java_model)
blocks = blocks_df.rdd.map(lambda row: ((row[0][0], row[0][1]), row[1]))
return blocks | [
"The",
"RDD",
"of",
"sub",
"-",
"matrix",
"blocks",
"((",
"blockRowIndex",
"blockColIndex",
")",
"sub",
"-",
"matrix",
")",
"that",
"form",
"this",
"distributed",
"matrix",
"."
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/distributed.py#L1051-L1071 | [
"def",
"blocks",
"(",
"self",
")",
":",
"# We use DataFrames for serialization of sub-matrix blocks",
"# from Java, so we first convert the RDD of blocks to a",
"# DataFrame on the Scala/Java side. Then we map each Row in",
"# the DataFrame back to a sub-matrix block on this side.",
"blocks_df",
"=",
"callMLlibFunc",
"(",
"\"getMatrixBlocks\"",
",",
"self",
".",
"_java_matrix_wrapper",
".",
"_java_model",
")",
"blocks",
"=",
"blocks_df",
".",
"rdd",
".",
"map",
"(",
"lambda",
"row",
":",
"(",
"(",
"row",
"[",
"0",
"]",
"[",
"0",
"]",
",",
"row",
"[",
"0",
"]",
"[",
"1",
"]",
")",
",",
"row",
"[",
"1",
"]",
")",
")",
"return",
"blocks"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | BlockMatrix.persist | Persists the underlying RDD with the specified storage level. | python/pyspark/mllib/linalg/distributed.py | def persist(self, storageLevel):
"""
Persists the underlying RDD with the specified storage level.
"""
if not isinstance(storageLevel, StorageLevel):
raise TypeError("`storageLevel` should be a StorageLevel, got %s" % type(storageLevel))
javaStorageLevel = self._java_matrix_wrapper._sc._getJavaStorageLevel(storageLevel)
self._java_matrix_wrapper.call("persist", javaStorageLevel)
return self | def persist(self, storageLevel):
"""
Persists the underlying RDD with the specified storage level.
"""
if not isinstance(storageLevel, StorageLevel):
raise TypeError("`storageLevel` should be a StorageLevel, got %s" % type(storageLevel))
javaStorageLevel = self._java_matrix_wrapper._sc._getJavaStorageLevel(storageLevel)
self._java_matrix_wrapper.call("persist", javaStorageLevel)
return self | [
"Persists",
"the",
"underlying",
"RDD",
"with",
"the",
"specified",
"storage",
"level",
"."
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/distributed.py#L1168-L1176 | [
"def",
"persist",
"(",
"self",
",",
"storageLevel",
")",
":",
"if",
"not",
"isinstance",
"(",
"storageLevel",
",",
"StorageLevel",
")",
":",
"raise",
"TypeError",
"(",
"\"`storageLevel` should be a StorageLevel, got %s\"",
"%",
"type",
"(",
"storageLevel",
")",
")",
"javaStorageLevel",
"=",
"self",
".",
"_java_matrix_wrapper",
".",
"_sc",
".",
"_getJavaStorageLevel",
"(",
"storageLevel",
")",
"self",
".",
"_java_matrix_wrapper",
".",
"call",
"(",
"\"persist\"",
",",
"javaStorageLevel",
")",
"return",
"self"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | BlockMatrix.add | Adds two block matrices together. The matrices must have the
same size and matching `rowsPerBlock` and `colsPerBlock` values.
If one of the sub matrix blocks that are being added is a
SparseMatrix, the resulting sub matrix block will also be a
SparseMatrix, even if it is being added to a DenseMatrix. If
two dense sub matrix blocks are added, the output block will
also be a DenseMatrix.
>>> dm1 = Matrices.dense(3, 2, [1, 2, 3, 4, 5, 6])
>>> dm2 = Matrices.dense(3, 2, [7, 8, 9, 10, 11, 12])
>>> sm = Matrices.sparse(3, 2, [0, 1, 3], [0, 1, 2], [7, 11, 12])
>>> blocks1 = sc.parallelize([((0, 0), dm1), ((1, 0), dm2)])
>>> blocks2 = sc.parallelize([((0, 0), dm1), ((1, 0), dm2)])
>>> blocks3 = sc.parallelize([((0, 0), sm), ((1, 0), dm2)])
>>> mat1 = BlockMatrix(blocks1, 3, 2)
>>> mat2 = BlockMatrix(blocks2, 3, 2)
>>> mat3 = BlockMatrix(blocks3, 3, 2)
>>> mat1.add(mat2).toLocalMatrix()
DenseMatrix(6, 2, [2.0, 4.0, 6.0, 14.0, 16.0, 18.0, 8.0, 10.0, 12.0, 20.0, 22.0, 24.0], 0)
>>> mat1.add(mat3).toLocalMatrix()
DenseMatrix(6, 2, [8.0, 2.0, 3.0, 14.0, 16.0, 18.0, 4.0, 16.0, 18.0, 20.0, 22.0, 24.0], 0) | python/pyspark/mllib/linalg/distributed.py | def add(self, other):
"""
Adds two block matrices together. The matrices must have the
same size and matching `rowsPerBlock` and `colsPerBlock` values.
If one of the sub matrix blocks that are being added is a
SparseMatrix, the resulting sub matrix block will also be a
SparseMatrix, even if it is being added to a DenseMatrix. If
two dense sub matrix blocks are added, the output block will
also be a DenseMatrix.
>>> dm1 = Matrices.dense(3, 2, [1, 2, 3, 4, 5, 6])
>>> dm2 = Matrices.dense(3, 2, [7, 8, 9, 10, 11, 12])
>>> sm = Matrices.sparse(3, 2, [0, 1, 3], [0, 1, 2], [7, 11, 12])
>>> blocks1 = sc.parallelize([((0, 0), dm1), ((1, 0), dm2)])
>>> blocks2 = sc.parallelize([((0, 0), dm1), ((1, 0), dm2)])
>>> blocks3 = sc.parallelize([((0, 0), sm), ((1, 0), dm2)])
>>> mat1 = BlockMatrix(blocks1, 3, 2)
>>> mat2 = BlockMatrix(blocks2, 3, 2)
>>> mat3 = BlockMatrix(blocks3, 3, 2)
>>> mat1.add(mat2).toLocalMatrix()
DenseMatrix(6, 2, [2.0, 4.0, 6.0, 14.0, 16.0, 18.0, 8.0, 10.0, 12.0, 20.0, 22.0, 24.0], 0)
>>> mat1.add(mat3).toLocalMatrix()
DenseMatrix(6, 2, [8.0, 2.0, 3.0, 14.0, 16.0, 18.0, 4.0, 16.0, 18.0, 20.0, 22.0, 24.0], 0)
"""
if not isinstance(other, BlockMatrix):
raise TypeError("Other should be a BlockMatrix, got %s" % type(other))
other_java_block_matrix = other._java_matrix_wrapper._java_model
java_block_matrix = self._java_matrix_wrapper.call("add", other_java_block_matrix)
return BlockMatrix(java_block_matrix, self.rowsPerBlock, self.colsPerBlock) | def add(self, other):
"""
Adds two block matrices together. The matrices must have the
same size and matching `rowsPerBlock` and `colsPerBlock` values.
If one of the sub matrix blocks that are being added is a
SparseMatrix, the resulting sub matrix block will also be a
SparseMatrix, even if it is being added to a DenseMatrix. If
two dense sub matrix blocks are added, the output block will
also be a DenseMatrix.
>>> dm1 = Matrices.dense(3, 2, [1, 2, 3, 4, 5, 6])
>>> dm2 = Matrices.dense(3, 2, [7, 8, 9, 10, 11, 12])
>>> sm = Matrices.sparse(3, 2, [0, 1, 3], [0, 1, 2], [7, 11, 12])
>>> blocks1 = sc.parallelize([((0, 0), dm1), ((1, 0), dm2)])
>>> blocks2 = sc.parallelize([((0, 0), dm1), ((1, 0), dm2)])
>>> blocks3 = sc.parallelize([((0, 0), sm), ((1, 0), dm2)])
>>> mat1 = BlockMatrix(blocks1, 3, 2)
>>> mat2 = BlockMatrix(blocks2, 3, 2)
>>> mat3 = BlockMatrix(blocks3, 3, 2)
>>> mat1.add(mat2).toLocalMatrix()
DenseMatrix(6, 2, [2.0, 4.0, 6.0, 14.0, 16.0, 18.0, 8.0, 10.0, 12.0, 20.0, 22.0, 24.0], 0)
>>> mat1.add(mat3).toLocalMatrix()
DenseMatrix(6, 2, [8.0, 2.0, 3.0, 14.0, 16.0, 18.0, 4.0, 16.0, 18.0, 20.0, 22.0, 24.0], 0)
"""
if not isinstance(other, BlockMatrix):
raise TypeError("Other should be a BlockMatrix, got %s" % type(other))
other_java_block_matrix = other._java_matrix_wrapper._java_model
java_block_matrix = self._java_matrix_wrapper.call("add", other_java_block_matrix)
return BlockMatrix(java_block_matrix, self.rowsPerBlock, self.colsPerBlock) | [
"Adds",
"two",
"block",
"matrices",
"together",
".",
"The",
"matrices",
"must",
"have",
"the",
"same",
"size",
"and",
"matching",
"rowsPerBlock",
"and",
"colsPerBlock",
"values",
".",
"If",
"one",
"of",
"the",
"sub",
"matrix",
"blocks",
"that",
"are",
"being",
"added",
"is",
"a",
"SparseMatrix",
"the",
"resulting",
"sub",
"matrix",
"block",
"will",
"also",
"be",
"a",
"SparseMatrix",
"even",
"if",
"it",
"is",
"being",
"added",
"to",
"a",
"DenseMatrix",
".",
"If",
"two",
"dense",
"sub",
"matrix",
"blocks",
"are",
"added",
"the",
"output",
"block",
"will",
"also",
"be",
"a",
"DenseMatrix",
"."
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/distributed.py#L1186-L1217 | [
"def",
"add",
"(",
"self",
",",
"other",
")",
":",
"if",
"not",
"isinstance",
"(",
"other",
",",
"BlockMatrix",
")",
":",
"raise",
"TypeError",
"(",
"\"Other should be a BlockMatrix, got %s\"",
"%",
"type",
"(",
"other",
")",
")",
"other_java_block_matrix",
"=",
"other",
".",
"_java_matrix_wrapper",
".",
"_java_model",
"java_block_matrix",
"=",
"self",
".",
"_java_matrix_wrapper",
".",
"call",
"(",
"\"add\"",
",",
"other_java_block_matrix",
")",
"return",
"BlockMatrix",
"(",
"java_block_matrix",
",",
"self",
".",
"rowsPerBlock",
",",
"self",
".",
"colsPerBlock",
")"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | BlockMatrix.transpose | Transpose this BlockMatrix. Returns a new BlockMatrix
instance sharing the same underlying data. Is a lazy operation.
>>> blocks = sc.parallelize([((0, 0), Matrices.dense(3, 2, [1, 2, 3, 4, 5, 6])),
... ((1, 0), Matrices.dense(3, 2, [7, 8, 9, 10, 11, 12]))])
>>> mat = BlockMatrix(blocks, 3, 2)
>>> mat_transposed = mat.transpose()
>>> mat_transposed.toLocalMatrix()
DenseMatrix(2, 6, [1.0, 4.0, 2.0, 5.0, 3.0, 6.0, 7.0, 10.0, 8.0, 11.0, 9.0, 12.0], 0) | python/pyspark/mllib/linalg/distributed.py | def transpose(self):
"""
Transpose this BlockMatrix. Returns a new BlockMatrix
instance sharing the same underlying data. Is a lazy operation.
>>> blocks = sc.parallelize([((0, 0), Matrices.dense(3, 2, [1, 2, 3, 4, 5, 6])),
... ((1, 0), Matrices.dense(3, 2, [7, 8, 9, 10, 11, 12]))])
>>> mat = BlockMatrix(blocks, 3, 2)
>>> mat_transposed = mat.transpose()
>>> mat_transposed.toLocalMatrix()
DenseMatrix(2, 6, [1.0, 4.0, 2.0, 5.0, 3.0, 6.0, 7.0, 10.0, 8.0, 11.0, 9.0, 12.0], 0)
"""
java_transposed_matrix = self._java_matrix_wrapper.call("transpose")
return BlockMatrix(java_transposed_matrix, self.colsPerBlock, self.rowsPerBlock) | def transpose(self):
"""
Transpose this BlockMatrix. Returns a new BlockMatrix
instance sharing the same underlying data. Is a lazy operation.
>>> blocks = sc.parallelize([((0, 0), Matrices.dense(3, 2, [1, 2, 3, 4, 5, 6])),
... ((1, 0), Matrices.dense(3, 2, [7, 8, 9, 10, 11, 12]))])
>>> mat = BlockMatrix(blocks, 3, 2)
>>> mat_transposed = mat.transpose()
>>> mat_transposed.toLocalMatrix()
DenseMatrix(2, 6, [1.0, 4.0, 2.0, 5.0, 3.0, 6.0, 7.0, 10.0, 8.0, 11.0, 9.0, 12.0], 0)
"""
java_transposed_matrix = self._java_matrix_wrapper.call("transpose")
return BlockMatrix(java_transposed_matrix, self.colsPerBlock, self.rowsPerBlock) | [
"Transpose",
"this",
"BlockMatrix",
".",
"Returns",
"a",
"new",
"BlockMatrix",
"instance",
"sharing",
"the",
"same",
"underlying",
"data",
".",
"Is",
"a",
"lazy",
"operation",
"."
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/distributed.py#L1290-L1304 | [
"def",
"transpose",
"(",
"self",
")",
":",
"java_transposed_matrix",
"=",
"self",
".",
"_java_matrix_wrapper",
".",
"call",
"(",
"\"transpose\"",
")",
"return",
"BlockMatrix",
"(",
"java_transposed_matrix",
",",
"self",
".",
"colsPerBlock",
",",
"self",
".",
"rowsPerBlock",
")"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | _vector_size | Returns the size of the vector.
>>> _vector_size([1., 2., 3.])
3
>>> _vector_size((1., 2., 3.))
3
>>> _vector_size(array.array('d', [1., 2., 3.]))
3
>>> _vector_size(np.zeros(3))
3
>>> _vector_size(np.zeros((3, 1)))
3
>>> _vector_size(np.zeros((1, 3)))
Traceback (most recent call last):
...
ValueError: Cannot treat an ndarray of shape (1, 3) as a vector | python/pyspark/mllib/linalg/__init__.py | def _vector_size(v):
"""
Returns the size of the vector.
>>> _vector_size([1., 2., 3.])
3
>>> _vector_size((1., 2., 3.))
3
>>> _vector_size(array.array('d', [1., 2., 3.]))
3
>>> _vector_size(np.zeros(3))
3
>>> _vector_size(np.zeros((3, 1)))
3
>>> _vector_size(np.zeros((1, 3)))
Traceback (most recent call last):
...
ValueError: Cannot treat an ndarray of shape (1, 3) as a vector
"""
if isinstance(v, Vector):
return len(v)
elif type(v) in (array.array, list, tuple, xrange):
return len(v)
elif type(v) == np.ndarray:
if v.ndim == 1 or (v.ndim == 2 and v.shape[1] == 1):
return len(v)
else:
raise ValueError("Cannot treat an ndarray of shape %s as a vector" % str(v.shape))
elif _have_scipy and scipy.sparse.issparse(v):
assert v.shape[1] == 1, "Expected column vector"
return v.shape[0]
else:
raise TypeError("Cannot treat type %s as a vector" % type(v)) | def _vector_size(v):
"""
Returns the size of the vector.
>>> _vector_size([1., 2., 3.])
3
>>> _vector_size((1., 2., 3.))
3
>>> _vector_size(array.array('d', [1., 2., 3.]))
3
>>> _vector_size(np.zeros(3))
3
>>> _vector_size(np.zeros((3, 1)))
3
>>> _vector_size(np.zeros((1, 3)))
Traceback (most recent call last):
...
ValueError: Cannot treat an ndarray of shape (1, 3) as a vector
"""
if isinstance(v, Vector):
return len(v)
elif type(v) in (array.array, list, tuple, xrange):
return len(v)
elif type(v) == np.ndarray:
if v.ndim == 1 or (v.ndim == 2 and v.shape[1] == 1):
return len(v)
else:
raise ValueError("Cannot treat an ndarray of shape %s as a vector" % str(v.shape))
elif _have_scipy and scipy.sparse.issparse(v):
assert v.shape[1] == 1, "Expected column vector"
return v.shape[0]
else:
raise TypeError("Cannot treat type %s as a vector" % type(v)) | [
"Returns",
"the",
"size",
"of",
"the",
"vector",
"."
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/__init__.py#L86-L118 | [
"def",
"_vector_size",
"(",
"v",
")",
":",
"if",
"isinstance",
"(",
"v",
",",
"Vector",
")",
":",
"return",
"len",
"(",
"v",
")",
"elif",
"type",
"(",
"v",
")",
"in",
"(",
"array",
".",
"array",
",",
"list",
",",
"tuple",
",",
"xrange",
")",
":",
"return",
"len",
"(",
"v",
")",
"elif",
"type",
"(",
"v",
")",
"==",
"np",
".",
"ndarray",
":",
"if",
"v",
".",
"ndim",
"==",
"1",
"or",
"(",
"v",
".",
"ndim",
"==",
"2",
"and",
"v",
".",
"shape",
"[",
"1",
"]",
"==",
"1",
")",
":",
"return",
"len",
"(",
"v",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Cannot treat an ndarray of shape %s as a vector\"",
"%",
"str",
"(",
"v",
".",
"shape",
")",
")",
"elif",
"_have_scipy",
"and",
"scipy",
".",
"sparse",
".",
"issparse",
"(",
"v",
")",
":",
"assert",
"v",
".",
"shape",
"[",
"1",
"]",
"==",
"1",
",",
"\"Expected column vector\"",
"return",
"v",
".",
"shape",
"[",
"0",
"]",
"else",
":",
"raise",
"TypeError",
"(",
"\"Cannot treat type %s as a vector\"",
"%",
"type",
"(",
"v",
")",
")"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | DenseVector.parse | Parse string representation back into the DenseVector.
>>> DenseVector.parse(' [ 0.0,1.0,2.0, 3.0]')
DenseVector([0.0, 1.0, 2.0, 3.0]) | python/pyspark/mllib/linalg/__init__.py | def parse(s):
"""
Parse string representation back into the DenseVector.
>>> DenseVector.parse(' [ 0.0,1.0,2.0, 3.0]')
DenseVector([0.0, 1.0, 2.0, 3.0])
"""
start = s.find('[')
if start == -1:
raise ValueError("Array should start with '['.")
end = s.find(']')
if end == -1:
raise ValueError("Array should end with ']'.")
s = s[start + 1: end]
try:
values = [float(val) for val in s.split(',') if val]
except ValueError:
raise ValueError("Unable to parse values from %s" % s)
return DenseVector(values) | def parse(s):
"""
Parse string representation back into the DenseVector.
>>> DenseVector.parse(' [ 0.0,1.0,2.0, 3.0]')
DenseVector([0.0, 1.0, 2.0, 3.0])
"""
start = s.find('[')
if start == -1:
raise ValueError("Array should start with '['.")
end = s.find(']')
if end == -1:
raise ValueError("Array should end with ']'.")
s = s[start + 1: end]
try:
values = [float(val) for val in s.split(',') if val]
except ValueError:
raise ValueError("Unable to parse values from %s" % s)
return DenseVector(values) | [
"Parse",
"string",
"representation",
"back",
"into",
"the",
"DenseVector",
"."
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/__init__.py#L297-L316 | [
"def",
"parse",
"(",
"s",
")",
":",
"start",
"=",
"s",
".",
"find",
"(",
"'['",
")",
"if",
"start",
"==",
"-",
"1",
":",
"raise",
"ValueError",
"(",
"\"Array should start with '['.\"",
")",
"end",
"=",
"s",
".",
"find",
"(",
"']'",
")",
"if",
"end",
"==",
"-",
"1",
":",
"raise",
"ValueError",
"(",
"\"Array should end with ']'.\"",
")",
"s",
"=",
"s",
"[",
"start",
"+",
"1",
":",
"end",
"]",
"try",
":",
"values",
"=",
"[",
"float",
"(",
"val",
")",
"for",
"val",
"in",
"s",
".",
"split",
"(",
"','",
")",
"if",
"val",
"]",
"except",
"ValueError",
":",
"raise",
"ValueError",
"(",
"\"Unable to parse values from %s\"",
"%",
"s",
")",
"return",
"DenseVector",
"(",
"values",
")"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | DenseVector.dot | Compute the dot product of two Vectors. We support
(Numpy array, list, SparseVector, or SciPy sparse)
and a target NumPy array that is either 1- or 2-dimensional.
Equivalent to calling numpy.dot of the two vectors.
>>> dense = DenseVector(array.array('d', [1., 2.]))
>>> dense.dot(dense)
5.0
>>> dense.dot(SparseVector(2, [0, 1], [2., 1.]))
4.0
>>> dense.dot(range(1, 3))
5.0
>>> dense.dot(np.array(range(1, 3)))
5.0
>>> dense.dot([1.,])
Traceback (most recent call last):
...
AssertionError: dimension mismatch
>>> dense.dot(np.reshape([1., 2., 3., 4.], (2, 2), order='F'))
array([ 5., 11.])
>>> dense.dot(np.reshape([1., 2., 3.], (3, 1), order='F'))
Traceback (most recent call last):
...
AssertionError: dimension mismatch | python/pyspark/mllib/linalg/__init__.py | def dot(self, other):
"""
Compute the dot product of two Vectors. We support
(Numpy array, list, SparseVector, or SciPy sparse)
and a target NumPy array that is either 1- or 2-dimensional.
Equivalent to calling numpy.dot of the two vectors.
>>> dense = DenseVector(array.array('d', [1., 2.]))
>>> dense.dot(dense)
5.0
>>> dense.dot(SparseVector(2, [0, 1], [2., 1.]))
4.0
>>> dense.dot(range(1, 3))
5.0
>>> dense.dot(np.array(range(1, 3)))
5.0
>>> dense.dot([1.,])
Traceback (most recent call last):
...
AssertionError: dimension mismatch
>>> dense.dot(np.reshape([1., 2., 3., 4.], (2, 2), order='F'))
array([ 5., 11.])
>>> dense.dot(np.reshape([1., 2., 3.], (3, 1), order='F'))
Traceback (most recent call last):
...
AssertionError: dimension mismatch
"""
if type(other) == np.ndarray:
if other.ndim > 1:
assert len(self) == other.shape[0], "dimension mismatch"
return np.dot(self.array, other)
elif _have_scipy and scipy.sparse.issparse(other):
assert len(self) == other.shape[0], "dimension mismatch"
return other.transpose().dot(self.toArray())
else:
assert len(self) == _vector_size(other), "dimension mismatch"
if isinstance(other, SparseVector):
return other.dot(self)
elif isinstance(other, Vector):
return np.dot(self.toArray(), other.toArray())
else:
return np.dot(self.toArray(), other) | def dot(self, other):
"""
Compute the dot product of two Vectors. We support
(Numpy array, list, SparseVector, or SciPy sparse)
and a target NumPy array that is either 1- or 2-dimensional.
Equivalent to calling numpy.dot of the two vectors.
>>> dense = DenseVector(array.array('d', [1., 2.]))
>>> dense.dot(dense)
5.0
>>> dense.dot(SparseVector(2, [0, 1], [2., 1.]))
4.0
>>> dense.dot(range(1, 3))
5.0
>>> dense.dot(np.array(range(1, 3)))
5.0
>>> dense.dot([1.,])
Traceback (most recent call last):
...
AssertionError: dimension mismatch
>>> dense.dot(np.reshape([1., 2., 3., 4.], (2, 2), order='F'))
array([ 5., 11.])
>>> dense.dot(np.reshape([1., 2., 3.], (3, 1), order='F'))
Traceback (most recent call last):
...
AssertionError: dimension mismatch
"""
if type(other) == np.ndarray:
if other.ndim > 1:
assert len(self) == other.shape[0], "dimension mismatch"
return np.dot(self.array, other)
elif _have_scipy and scipy.sparse.issparse(other):
assert len(self) == other.shape[0], "dimension mismatch"
return other.transpose().dot(self.toArray())
else:
assert len(self) == _vector_size(other), "dimension mismatch"
if isinstance(other, SparseVector):
return other.dot(self)
elif isinstance(other, Vector):
return np.dot(self.toArray(), other.toArray())
else:
return np.dot(self.toArray(), other) | [
"Compute",
"the",
"dot",
"product",
"of",
"two",
"Vectors",
".",
"We",
"support",
"(",
"Numpy",
"array",
"list",
"SparseVector",
"or",
"SciPy",
"sparse",
")",
"and",
"a",
"target",
"NumPy",
"array",
"that",
"is",
"either",
"1",
"-",
"or",
"2",
"-",
"dimensional",
".",
"Equivalent",
"to",
"calling",
"numpy",
".",
"dot",
"of",
"the",
"two",
"vectors",
"."
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/__init__.py#L339-L380 | [
"def",
"dot",
"(",
"self",
",",
"other",
")",
":",
"if",
"type",
"(",
"other",
")",
"==",
"np",
".",
"ndarray",
":",
"if",
"other",
".",
"ndim",
">",
"1",
":",
"assert",
"len",
"(",
"self",
")",
"==",
"other",
".",
"shape",
"[",
"0",
"]",
",",
"\"dimension mismatch\"",
"return",
"np",
".",
"dot",
"(",
"self",
".",
"array",
",",
"other",
")",
"elif",
"_have_scipy",
"and",
"scipy",
".",
"sparse",
".",
"issparse",
"(",
"other",
")",
":",
"assert",
"len",
"(",
"self",
")",
"==",
"other",
".",
"shape",
"[",
"0",
"]",
",",
"\"dimension mismatch\"",
"return",
"other",
".",
"transpose",
"(",
")",
".",
"dot",
"(",
"self",
".",
"toArray",
"(",
")",
")",
"else",
":",
"assert",
"len",
"(",
"self",
")",
"==",
"_vector_size",
"(",
"other",
")",
",",
"\"dimension mismatch\"",
"if",
"isinstance",
"(",
"other",
",",
"SparseVector",
")",
":",
"return",
"other",
".",
"dot",
"(",
"self",
")",
"elif",
"isinstance",
"(",
"other",
",",
"Vector",
")",
":",
"return",
"np",
".",
"dot",
"(",
"self",
".",
"toArray",
"(",
")",
",",
"other",
".",
"toArray",
"(",
")",
")",
"else",
":",
"return",
"np",
".",
"dot",
"(",
"self",
".",
"toArray",
"(",
")",
",",
"other",
")"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | DenseVector.squared_distance | Squared distance of two Vectors.
>>> dense1 = DenseVector(array.array('d', [1., 2.]))
>>> dense1.squared_distance(dense1)
0.0
>>> dense2 = np.array([2., 1.])
>>> dense1.squared_distance(dense2)
2.0
>>> dense3 = [2., 1.]
>>> dense1.squared_distance(dense3)
2.0
>>> sparse1 = SparseVector(2, [0, 1], [2., 1.])
>>> dense1.squared_distance(sparse1)
2.0
>>> dense1.squared_distance([1.,])
Traceback (most recent call last):
...
AssertionError: dimension mismatch
>>> dense1.squared_distance(SparseVector(1, [0,], [1.,]))
Traceback (most recent call last):
...
AssertionError: dimension mismatch | python/pyspark/mllib/linalg/__init__.py | def squared_distance(self, other):
"""
Squared distance of two Vectors.
>>> dense1 = DenseVector(array.array('d', [1., 2.]))
>>> dense1.squared_distance(dense1)
0.0
>>> dense2 = np.array([2., 1.])
>>> dense1.squared_distance(dense2)
2.0
>>> dense3 = [2., 1.]
>>> dense1.squared_distance(dense3)
2.0
>>> sparse1 = SparseVector(2, [0, 1], [2., 1.])
>>> dense1.squared_distance(sparse1)
2.0
>>> dense1.squared_distance([1.,])
Traceback (most recent call last):
...
AssertionError: dimension mismatch
>>> dense1.squared_distance(SparseVector(1, [0,], [1.,]))
Traceback (most recent call last):
...
AssertionError: dimension mismatch
"""
assert len(self) == _vector_size(other), "dimension mismatch"
if isinstance(other, SparseVector):
return other.squared_distance(self)
elif _have_scipy and scipy.sparse.issparse(other):
return _convert_to_vector(other).squared_distance(self)
if isinstance(other, Vector):
other = other.toArray()
elif not isinstance(other, np.ndarray):
other = np.array(other)
diff = self.toArray() - other
return np.dot(diff, diff) | def squared_distance(self, other):
"""
Squared distance of two Vectors.
>>> dense1 = DenseVector(array.array('d', [1., 2.]))
>>> dense1.squared_distance(dense1)
0.0
>>> dense2 = np.array([2., 1.])
>>> dense1.squared_distance(dense2)
2.0
>>> dense3 = [2., 1.]
>>> dense1.squared_distance(dense3)
2.0
>>> sparse1 = SparseVector(2, [0, 1], [2., 1.])
>>> dense1.squared_distance(sparse1)
2.0
>>> dense1.squared_distance([1.,])
Traceback (most recent call last):
...
AssertionError: dimension mismatch
>>> dense1.squared_distance(SparseVector(1, [0,], [1.,]))
Traceback (most recent call last):
...
AssertionError: dimension mismatch
"""
assert len(self) == _vector_size(other), "dimension mismatch"
if isinstance(other, SparseVector):
return other.squared_distance(self)
elif _have_scipy and scipy.sparse.issparse(other):
return _convert_to_vector(other).squared_distance(self)
if isinstance(other, Vector):
other = other.toArray()
elif not isinstance(other, np.ndarray):
other = np.array(other)
diff = self.toArray() - other
return np.dot(diff, diff) | [
"Squared",
"distance",
"of",
"two",
"Vectors",
"."
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/__init__.py#L382-L418 | [
"def",
"squared_distance",
"(",
"self",
",",
"other",
")",
":",
"assert",
"len",
"(",
"self",
")",
"==",
"_vector_size",
"(",
"other",
")",
",",
"\"dimension mismatch\"",
"if",
"isinstance",
"(",
"other",
",",
"SparseVector",
")",
":",
"return",
"other",
".",
"squared_distance",
"(",
"self",
")",
"elif",
"_have_scipy",
"and",
"scipy",
".",
"sparse",
".",
"issparse",
"(",
"other",
")",
":",
"return",
"_convert_to_vector",
"(",
"other",
")",
".",
"squared_distance",
"(",
"self",
")",
"if",
"isinstance",
"(",
"other",
",",
"Vector",
")",
":",
"other",
"=",
"other",
".",
"toArray",
"(",
")",
"elif",
"not",
"isinstance",
"(",
"other",
",",
"np",
".",
"ndarray",
")",
":",
"other",
"=",
"np",
".",
"array",
"(",
"other",
")",
"diff",
"=",
"self",
".",
"toArray",
"(",
")",
"-",
"other",
"return",
"np",
".",
"dot",
"(",
"diff",
",",
"diff",
")"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | SparseVector.parse | Parse string representation back into the SparseVector.
>>> SparseVector.parse(' (4, [0,1 ],[ 4.0,5.0] )')
SparseVector(4, {0: 4.0, 1: 5.0}) | python/pyspark/mllib/linalg/__init__.py | def parse(s):
"""
Parse string representation back into the SparseVector.
>>> SparseVector.parse(' (4, [0,1 ],[ 4.0,5.0] )')
SparseVector(4, {0: 4.0, 1: 5.0})
"""
start = s.find('(')
if start == -1:
raise ValueError("Tuple should start with '('")
end = s.find(')')
if end == -1:
raise ValueError("Tuple should end with ')'")
s = s[start + 1: end].strip()
size = s[: s.find(',')]
try:
size = int(size)
except ValueError:
raise ValueError("Cannot parse size %s." % size)
ind_start = s.find('[')
if ind_start == -1:
raise ValueError("Indices array should start with '['.")
ind_end = s.find(']')
if ind_end == -1:
raise ValueError("Indices array should end with ']'")
new_s = s[ind_start + 1: ind_end]
ind_list = new_s.split(',')
try:
indices = [int(ind) for ind in ind_list if ind]
except ValueError:
raise ValueError("Unable to parse indices from %s." % new_s)
s = s[ind_end + 1:].strip()
val_start = s.find('[')
if val_start == -1:
raise ValueError("Values array should start with '['.")
val_end = s.find(']')
if val_end == -1:
raise ValueError("Values array should end with ']'.")
val_list = s[val_start + 1: val_end].split(',')
try:
values = [float(val) for val in val_list if val]
except ValueError:
raise ValueError("Unable to parse values from %s." % s)
return SparseVector(size, indices, values) | def parse(s):
"""
Parse string representation back into the SparseVector.
>>> SparseVector.parse(' (4, [0,1 ],[ 4.0,5.0] )')
SparseVector(4, {0: 4.0, 1: 5.0})
"""
start = s.find('(')
if start == -1:
raise ValueError("Tuple should start with '('")
end = s.find(')')
if end == -1:
raise ValueError("Tuple should end with ')'")
s = s[start + 1: end].strip()
size = s[: s.find(',')]
try:
size = int(size)
except ValueError:
raise ValueError("Cannot parse size %s." % size)
ind_start = s.find('[')
if ind_start == -1:
raise ValueError("Indices array should start with '['.")
ind_end = s.find(']')
if ind_end == -1:
raise ValueError("Indices array should end with ']'")
new_s = s[ind_start + 1: ind_end]
ind_list = new_s.split(',')
try:
indices = [int(ind) for ind in ind_list if ind]
except ValueError:
raise ValueError("Unable to parse indices from %s." % new_s)
s = s[ind_end + 1:].strip()
val_start = s.find('[')
if val_start == -1:
raise ValueError("Values array should start with '['.")
val_end = s.find(']')
if val_end == -1:
raise ValueError("Values array should end with ']'.")
val_list = s[val_start + 1: val_end].split(',')
try:
values = [float(val) for val in val_list if val]
except ValueError:
raise ValueError("Unable to parse values from %s." % s)
return SparseVector(size, indices, values) | [
"Parse",
"string",
"representation",
"back",
"into",
"the",
"SparseVector",
"."
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/__init__.py#L589-L635 | [
"def",
"parse",
"(",
"s",
")",
":",
"start",
"=",
"s",
".",
"find",
"(",
"'('",
")",
"if",
"start",
"==",
"-",
"1",
":",
"raise",
"ValueError",
"(",
"\"Tuple should start with '('\"",
")",
"end",
"=",
"s",
".",
"find",
"(",
"')'",
")",
"if",
"end",
"==",
"-",
"1",
":",
"raise",
"ValueError",
"(",
"\"Tuple should end with ')'\"",
")",
"s",
"=",
"s",
"[",
"start",
"+",
"1",
":",
"end",
"]",
".",
"strip",
"(",
")",
"size",
"=",
"s",
"[",
":",
"s",
".",
"find",
"(",
"','",
")",
"]",
"try",
":",
"size",
"=",
"int",
"(",
"size",
")",
"except",
"ValueError",
":",
"raise",
"ValueError",
"(",
"\"Cannot parse size %s.\"",
"%",
"size",
")",
"ind_start",
"=",
"s",
".",
"find",
"(",
"'['",
")",
"if",
"ind_start",
"==",
"-",
"1",
":",
"raise",
"ValueError",
"(",
"\"Indices array should start with '['.\"",
")",
"ind_end",
"=",
"s",
".",
"find",
"(",
"']'",
")",
"if",
"ind_end",
"==",
"-",
"1",
":",
"raise",
"ValueError",
"(",
"\"Indices array should end with ']'\"",
")",
"new_s",
"=",
"s",
"[",
"ind_start",
"+",
"1",
":",
"ind_end",
"]",
"ind_list",
"=",
"new_s",
".",
"split",
"(",
"','",
")",
"try",
":",
"indices",
"=",
"[",
"int",
"(",
"ind",
")",
"for",
"ind",
"in",
"ind_list",
"if",
"ind",
"]",
"except",
"ValueError",
":",
"raise",
"ValueError",
"(",
"\"Unable to parse indices from %s.\"",
"%",
"new_s",
")",
"s",
"=",
"s",
"[",
"ind_end",
"+",
"1",
":",
"]",
".",
"strip",
"(",
")",
"val_start",
"=",
"s",
".",
"find",
"(",
"'['",
")",
"if",
"val_start",
"==",
"-",
"1",
":",
"raise",
"ValueError",
"(",
"\"Values array should start with '['.\"",
")",
"val_end",
"=",
"s",
".",
"find",
"(",
"']'",
")",
"if",
"val_end",
"==",
"-",
"1",
":",
"raise",
"ValueError",
"(",
"\"Values array should end with ']'.\"",
")",
"val_list",
"=",
"s",
"[",
"val_start",
"+",
"1",
":",
"val_end",
"]",
".",
"split",
"(",
"','",
")",
"try",
":",
"values",
"=",
"[",
"float",
"(",
"val",
")",
"for",
"val",
"in",
"val_list",
"if",
"val",
"]",
"except",
"ValueError",
":",
"raise",
"ValueError",
"(",
"\"Unable to parse values from %s.\"",
"%",
"s",
")",
"return",
"SparseVector",
"(",
"size",
",",
"indices",
",",
"values",
")"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | SparseVector.dot | Dot product with a SparseVector or 1- or 2-dimensional Numpy array.
>>> a = SparseVector(4, [1, 3], [3.0, 4.0])
>>> a.dot(a)
25.0
>>> a.dot(array.array('d', [1., 2., 3., 4.]))
22.0
>>> b = SparseVector(4, [2], [1.0])
>>> a.dot(b)
0.0
>>> a.dot(np.array([[1, 1], [2, 2], [3, 3], [4, 4]]))
array([ 22., 22.])
>>> a.dot([1., 2., 3.])
Traceback (most recent call last):
...
AssertionError: dimension mismatch
>>> a.dot(np.array([1., 2.]))
Traceback (most recent call last):
...
AssertionError: dimension mismatch
>>> a.dot(DenseVector([1., 2.]))
Traceback (most recent call last):
...
AssertionError: dimension mismatch
>>> a.dot(np.zeros((3, 2)))
Traceback (most recent call last):
...
AssertionError: dimension mismatch | python/pyspark/mllib/linalg/__init__.py | def dot(self, other):
"""
Dot product with a SparseVector or 1- or 2-dimensional Numpy array.
>>> a = SparseVector(4, [1, 3], [3.0, 4.0])
>>> a.dot(a)
25.0
>>> a.dot(array.array('d', [1., 2., 3., 4.]))
22.0
>>> b = SparseVector(4, [2], [1.0])
>>> a.dot(b)
0.0
>>> a.dot(np.array([[1, 1], [2, 2], [3, 3], [4, 4]]))
array([ 22., 22.])
>>> a.dot([1., 2., 3.])
Traceback (most recent call last):
...
AssertionError: dimension mismatch
>>> a.dot(np.array([1., 2.]))
Traceback (most recent call last):
...
AssertionError: dimension mismatch
>>> a.dot(DenseVector([1., 2.]))
Traceback (most recent call last):
...
AssertionError: dimension mismatch
>>> a.dot(np.zeros((3, 2)))
Traceback (most recent call last):
...
AssertionError: dimension mismatch
"""
if isinstance(other, np.ndarray):
if other.ndim not in [2, 1]:
raise ValueError("Cannot call dot with %d-dimensional array" % other.ndim)
assert len(self) == other.shape[0], "dimension mismatch"
return np.dot(self.values, other[self.indices])
assert len(self) == _vector_size(other), "dimension mismatch"
if isinstance(other, DenseVector):
return np.dot(other.array[self.indices], self.values)
elif isinstance(other, SparseVector):
# Find out common indices.
self_cmind = np.in1d(self.indices, other.indices, assume_unique=True)
self_values = self.values[self_cmind]
if self_values.size == 0:
return 0.0
else:
other_cmind = np.in1d(other.indices, self.indices, assume_unique=True)
return np.dot(self_values, other.values[other_cmind])
else:
return self.dot(_convert_to_vector(other)) | def dot(self, other):
"""
Dot product with a SparseVector or 1- or 2-dimensional Numpy array.
>>> a = SparseVector(4, [1, 3], [3.0, 4.0])
>>> a.dot(a)
25.0
>>> a.dot(array.array('d', [1., 2., 3., 4.]))
22.0
>>> b = SparseVector(4, [2], [1.0])
>>> a.dot(b)
0.0
>>> a.dot(np.array([[1, 1], [2, 2], [3, 3], [4, 4]]))
array([ 22., 22.])
>>> a.dot([1., 2., 3.])
Traceback (most recent call last):
...
AssertionError: dimension mismatch
>>> a.dot(np.array([1., 2.]))
Traceback (most recent call last):
...
AssertionError: dimension mismatch
>>> a.dot(DenseVector([1., 2.]))
Traceback (most recent call last):
...
AssertionError: dimension mismatch
>>> a.dot(np.zeros((3, 2)))
Traceback (most recent call last):
...
AssertionError: dimension mismatch
"""
if isinstance(other, np.ndarray):
if other.ndim not in [2, 1]:
raise ValueError("Cannot call dot with %d-dimensional array" % other.ndim)
assert len(self) == other.shape[0], "dimension mismatch"
return np.dot(self.values, other[self.indices])
assert len(self) == _vector_size(other), "dimension mismatch"
if isinstance(other, DenseVector):
return np.dot(other.array[self.indices], self.values)
elif isinstance(other, SparseVector):
# Find out common indices.
self_cmind = np.in1d(self.indices, other.indices, assume_unique=True)
self_values = self.values[self_cmind]
if self_values.size == 0:
return 0.0
else:
other_cmind = np.in1d(other.indices, self.indices, assume_unique=True)
return np.dot(self_values, other.values[other_cmind])
else:
return self.dot(_convert_to_vector(other)) | [
"Dot",
"product",
"with",
"a",
"SparseVector",
"or",
"1",
"-",
"or",
"2",
"-",
"dimensional",
"Numpy",
"array",
"."
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/__init__.py#L637-L691 | [
"def",
"dot",
"(",
"self",
",",
"other",
")",
":",
"if",
"isinstance",
"(",
"other",
",",
"np",
".",
"ndarray",
")",
":",
"if",
"other",
".",
"ndim",
"not",
"in",
"[",
"2",
",",
"1",
"]",
":",
"raise",
"ValueError",
"(",
"\"Cannot call dot with %d-dimensional array\"",
"%",
"other",
".",
"ndim",
")",
"assert",
"len",
"(",
"self",
")",
"==",
"other",
".",
"shape",
"[",
"0",
"]",
",",
"\"dimension mismatch\"",
"return",
"np",
".",
"dot",
"(",
"self",
".",
"values",
",",
"other",
"[",
"self",
".",
"indices",
"]",
")",
"assert",
"len",
"(",
"self",
")",
"==",
"_vector_size",
"(",
"other",
")",
",",
"\"dimension mismatch\"",
"if",
"isinstance",
"(",
"other",
",",
"DenseVector",
")",
":",
"return",
"np",
".",
"dot",
"(",
"other",
".",
"array",
"[",
"self",
".",
"indices",
"]",
",",
"self",
".",
"values",
")",
"elif",
"isinstance",
"(",
"other",
",",
"SparseVector",
")",
":",
"# Find out common indices.",
"self_cmind",
"=",
"np",
".",
"in1d",
"(",
"self",
".",
"indices",
",",
"other",
".",
"indices",
",",
"assume_unique",
"=",
"True",
")",
"self_values",
"=",
"self",
".",
"values",
"[",
"self_cmind",
"]",
"if",
"self_values",
".",
"size",
"==",
"0",
":",
"return",
"0.0",
"else",
":",
"other_cmind",
"=",
"np",
".",
"in1d",
"(",
"other",
".",
"indices",
",",
"self",
".",
"indices",
",",
"assume_unique",
"=",
"True",
")",
"return",
"np",
".",
"dot",
"(",
"self_values",
",",
"other",
".",
"values",
"[",
"other_cmind",
"]",
")",
"else",
":",
"return",
"self",
".",
"dot",
"(",
"_convert_to_vector",
"(",
"other",
")",
")"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | SparseVector.squared_distance | Squared distance from a SparseVector or 1-dimensional NumPy array.
>>> a = SparseVector(4, [1, 3], [3.0, 4.0])
>>> a.squared_distance(a)
0.0
>>> a.squared_distance(array.array('d', [1., 2., 3., 4.]))
11.0
>>> a.squared_distance(np.array([1., 2., 3., 4.]))
11.0
>>> b = SparseVector(4, [2], [1.0])
>>> a.squared_distance(b)
26.0
>>> b.squared_distance(a)
26.0
>>> b.squared_distance([1., 2.])
Traceback (most recent call last):
...
AssertionError: dimension mismatch
>>> b.squared_distance(SparseVector(3, [1,], [1.0,]))
Traceback (most recent call last):
...
AssertionError: dimension mismatch | python/pyspark/mllib/linalg/__init__.py | def squared_distance(self, other):
"""
Squared distance from a SparseVector or 1-dimensional NumPy array.
>>> a = SparseVector(4, [1, 3], [3.0, 4.0])
>>> a.squared_distance(a)
0.0
>>> a.squared_distance(array.array('d', [1., 2., 3., 4.]))
11.0
>>> a.squared_distance(np.array([1., 2., 3., 4.]))
11.0
>>> b = SparseVector(4, [2], [1.0])
>>> a.squared_distance(b)
26.0
>>> b.squared_distance(a)
26.0
>>> b.squared_distance([1., 2.])
Traceback (most recent call last):
...
AssertionError: dimension mismatch
>>> b.squared_distance(SparseVector(3, [1,], [1.0,]))
Traceback (most recent call last):
...
AssertionError: dimension mismatch
"""
assert len(self) == _vector_size(other), "dimension mismatch"
if isinstance(other, np.ndarray) or isinstance(other, DenseVector):
if isinstance(other, np.ndarray) and other.ndim != 1:
raise Exception("Cannot call squared_distance with %d-dimensional array" %
other.ndim)
if isinstance(other, DenseVector):
other = other.array
sparse_ind = np.zeros(other.size, dtype=bool)
sparse_ind[self.indices] = True
dist = other[sparse_ind] - self.values
result = np.dot(dist, dist)
other_ind = other[~sparse_ind]
result += np.dot(other_ind, other_ind)
return result
elif isinstance(other, SparseVector):
result = 0.0
i, j = 0, 0
while i < len(self.indices) and j < len(other.indices):
if self.indices[i] == other.indices[j]:
diff = self.values[i] - other.values[j]
result += diff * diff
i += 1
j += 1
elif self.indices[i] < other.indices[j]:
result += self.values[i] * self.values[i]
i += 1
else:
result += other.values[j] * other.values[j]
j += 1
while i < len(self.indices):
result += self.values[i] * self.values[i]
i += 1
while j < len(other.indices):
result += other.values[j] * other.values[j]
j += 1
return result
else:
return self.squared_distance(_convert_to_vector(other)) | def squared_distance(self, other):
"""
Squared distance from a SparseVector or 1-dimensional NumPy array.
>>> a = SparseVector(4, [1, 3], [3.0, 4.0])
>>> a.squared_distance(a)
0.0
>>> a.squared_distance(array.array('d', [1., 2., 3., 4.]))
11.0
>>> a.squared_distance(np.array([1., 2., 3., 4.]))
11.0
>>> b = SparseVector(4, [2], [1.0])
>>> a.squared_distance(b)
26.0
>>> b.squared_distance(a)
26.0
>>> b.squared_distance([1., 2.])
Traceback (most recent call last):
...
AssertionError: dimension mismatch
>>> b.squared_distance(SparseVector(3, [1,], [1.0,]))
Traceback (most recent call last):
...
AssertionError: dimension mismatch
"""
assert len(self) == _vector_size(other), "dimension mismatch"
if isinstance(other, np.ndarray) or isinstance(other, DenseVector):
if isinstance(other, np.ndarray) and other.ndim != 1:
raise Exception("Cannot call squared_distance with %d-dimensional array" %
other.ndim)
if isinstance(other, DenseVector):
other = other.array
sparse_ind = np.zeros(other.size, dtype=bool)
sparse_ind[self.indices] = True
dist = other[sparse_ind] - self.values
result = np.dot(dist, dist)
other_ind = other[~sparse_ind]
result += np.dot(other_ind, other_ind)
return result
elif isinstance(other, SparseVector):
result = 0.0
i, j = 0, 0
while i < len(self.indices) and j < len(other.indices):
if self.indices[i] == other.indices[j]:
diff = self.values[i] - other.values[j]
result += diff * diff
i += 1
j += 1
elif self.indices[i] < other.indices[j]:
result += self.values[i] * self.values[i]
i += 1
else:
result += other.values[j] * other.values[j]
j += 1
while i < len(self.indices):
result += self.values[i] * self.values[i]
i += 1
while j < len(other.indices):
result += other.values[j] * other.values[j]
j += 1
return result
else:
return self.squared_distance(_convert_to_vector(other)) | [
"Squared",
"distance",
"from",
"a",
"SparseVector",
"or",
"1",
"-",
"dimensional",
"NumPy",
"array",
"."
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/__init__.py#L693-L758 | [
"def",
"squared_distance",
"(",
"self",
",",
"other",
")",
":",
"assert",
"len",
"(",
"self",
")",
"==",
"_vector_size",
"(",
"other",
")",
",",
"\"dimension mismatch\"",
"if",
"isinstance",
"(",
"other",
",",
"np",
".",
"ndarray",
")",
"or",
"isinstance",
"(",
"other",
",",
"DenseVector",
")",
":",
"if",
"isinstance",
"(",
"other",
",",
"np",
".",
"ndarray",
")",
"and",
"other",
".",
"ndim",
"!=",
"1",
":",
"raise",
"Exception",
"(",
"\"Cannot call squared_distance with %d-dimensional array\"",
"%",
"other",
".",
"ndim",
")",
"if",
"isinstance",
"(",
"other",
",",
"DenseVector",
")",
":",
"other",
"=",
"other",
".",
"array",
"sparse_ind",
"=",
"np",
".",
"zeros",
"(",
"other",
".",
"size",
",",
"dtype",
"=",
"bool",
")",
"sparse_ind",
"[",
"self",
".",
"indices",
"]",
"=",
"True",
"dist",
"=",
"other",
"[",
"sparse_ind",
"]",
"-",
"self",
".",
"values",
"result",
"=",
"np",
".",
"dot",
"(",
"dist",
",",
"dist",
")",
"other_ind",
"=",
"other",
"[",
"~",
"sparse_ind",
"]",
"result",
"+=",
"np",
".",
"dot",
"(",
"other_ind",
",",
"other_ind",
")",
"return",
"result",
"elif",
"isinstance",
"(",
"other",
",",
"SparseVector",
")",
":",
"result",
"=",
"0.0",
"i",
",",
"j",
"=",
"0",
",",
"0",
"while",
"i",
"<",
"len",
"(",
"self",
".",
"indices",
")",
"and",
"j",
"<",
"len",
"(",
"other",
".",
"indices",
")",
":",
"if",
"self",
".",
"indices",
"[",
"i",
"]",
"==",
"other",
".",
"indices",
"[",
"j",
"]",
":",
"diff",
"=",
"self",
".",
"values",
"[",
"i",
"]",
"-",
"other",
".",
"values",
"[",
"j",
"]",
"result",
"+=",
"diff",
"*",
"diff",
"i",
"+=",
"1",
"j",
"+=",
"1",
"elif",
"self",
".",
"indices",
"[",
"i",
"]",
"<",
"other",
".",
"indices",
"[",
"j",
"]",
":",
"result",
"+=",
"self",
".",
"values",
"[",
"i",
"]",
"*",
"self",
".",
"values",
"[",
"i",
"]",
"i",
"+=",
"1",
"else",
":",
"result",
"+=",
"other",
".",
"values",
"[",
"j",
"]",
"*",
"other",
".",
"values",
"[",
"j",
"]",
"j",
"+=",
"1",
"while",
"i",
"<",
"len",
"(",
"self",
".",
"indices",
")",
":",
"result",
"+=",
"self",
".",
"values",
"[",
"i",
"]",
"*",
"self",
".",
"values",
"[",
"i",
"]",
"i",
"+=",
"1",
"while",
"j",
"<",
"len",
"(",
"other",
".",
"indices",
")",
":",
"result",
"+=",
"other",
".",
"values",
"[",
"j",
"]",
"*",
"other",
".",
"values",
"[",
"j",
"]",
"j",
"+=",
"1",
"return",
"result",
"else",
":",
"return",
"self",
".",
"squared_distance",
"(",
"_convert_to_vector",
"(",
"other",
")",
")"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | SparseVector.toArray | Returns a copy of this SparseVector as a 1-dimensional NumPy array. | python/pyspark/mllib/linalg/__init__.py | def toArray(self):
"""
Returns a copy of this SparseVector as a 1-dimensional NumPy array.
"""
arr = np.zeros((self.size,), dtype=np.float64)
arr[self.indices] = self.values
return arr | def toArray(self):
"""
Returns a copy of this SparseVector as a 1-dimensional NumPy array.
"""
arr = np.zeros((self.size,), dtype=np.float64)
arr[self.indices] = self.values
return arr | [
"Returns",
"a",
"copy",
"of",
"this",
"SparseVector",
"as",
"a",
"1",
"-",
"dimensional",
"NumPy",
"array",
"."
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/__init__.py#L760-L766 | [
"def",
"toArray",
"(",
"self",
")",
":",
"arr",
"=",
"np",
".",
"zeros",
"(",
"(",
"self",
".",
"size",
",",
")",
",",
"dtype",
"=",
"np",
".",
"float64",
")",
"arr",
"[",
"self",
".",
"indices",
"]",
"=",
"self",
".",
"values",
"return",
"arr"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | SparseVector.asML | Convert this vector to the new mllib-local representation.
This does NOT copy the data; it copies references.
:return: :py:class:`pyspark.ml.linalg.SparseVector`
.. versionadded:: 2.0.0 | python/pyspark/mllib/linalg/__init__.py | def asML(self):
"""
Convert this vector to the new mllib-local representation.
This does NOT copy the data; it copies references.
:return: :py:class:`pyspark.ml.linalg.SparseVector`
.. versionadded:: 2.0.0
"""
return newlinalg.SparseVector(self.size, self.indices, self.values) | def asML(self):
"""
Convert this vector to the new mllib-local representation.
This does NOT copy the data; it copies references.
:return: :py:class:`pyspark.ml.linalg.SparseVector`
.. versionadded:: 2.0.0
"""
return newlinalg.SparseVector(self.size, self.indices, self.values) | [
"Convert",
"this",
"vector",
"to",
"the",
"new",
"mllib",
"-",
"local",
"representation",
".",
"This",
"does",
"NOT",
"copy",
"the",
"data",
";",
"it",
"copies",
"references",
"."
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/__init__.py#L768-L777 | [
"def",
"asML",
"(",
"self",
")",
":",
"return",
"newlinalg",
".",
"SparseVector",
"(",
"self",
".",
"size",
",",
"self",
".",
"indices",
",",
"self",
".",
"values",
")"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | Vectors.dense | Create a dense vector of 64-bit floats from a Python list or numbers.
>>> Vectors.dense([1, 2, 3])
DenseVector([1.0, 2.0, 3.0])
>>> Vectors.dense(1.0, 2.0)
DenseVector([1.0, 2.0]) | python/pyspark/mllib/linalg/__init__.py | def dense(*elements):
"""
Create a dense vector of 64-bit floats from a Python list or numbers.
>>> Vectors.dense([1, 2, 3])
DenseVector([1.0, 2.0, 3.0])
>>> Vectors.dense(1.0, 2.0)
DenseVector([1.0, 2.0])
"""
if len(elements) == 1 and not isinstance(elements[0], (float, int, long)):
# it's list, numpy.array or other iterable object.
elements = elements[0]
return DenseVector(elements) | def dense(*elements):
"""
Create a dense vector of 64-bit floats from a Python list or numbers.
>>> Vectors.dense([1, 2, 3])
DenseVector([1.0, 2.0, 3.0])
>>> Vectors.dense(1.0, 2.0)
DenseVector([1.0, 2.0])
"""
if len(elements) == 1 and not isinstance(elements[0], (float, int, long)):
# it's list, numpy.array or other iterable object.
elements = elements[0]
return DenseVector(elements) | [
"Create",
"a",
"dense",
"vector",
"of",
"64",
"-",
"bit",
"floats",
"from",
"a",
"Python",
"list",
"or",
"numbers",
"."
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/__init__.py#L874-L886 | [
"def",
"dense",
"(",
"*",
"elements",
")",
":",
"if",
"len",
"(",
"elements",
")",
"==",
"1",
"and",
"not",
"isinstance",
"(",
"elements",
"[",
"0",
"]",
",",
"(",
"float",
",",
"int",
",",
"long",
")",
")",
":",
"# it's list, numpy.array or other iterable object.",
"elements",
"=",
"elements",
"[",
"0",
"]",
"return",
"DenseVector",
"(",
"elements",
")"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | Vectors.fromML | Convert a vector from the new mllib-local representation.
This does NOT copy the data; it copies references.
:param vec: a :py:class:`pyspark.ml.linalg.Vector`
:return: a :py:class:`pyspark.mllib.linalg.Vector`
.. versionadded:: 2.0.0 | python/pyspark/mllib/linalg/__init__.py | def fromML(vec):
"""
Convert a vector from the new mllib-local representation.
This does NOT copy the data; it copies references.
:param vec: a :py:class:`pyspark.ml.linalg.Vector`
:return: a :py:class:`pyspark.mllib.linalg.Vector`
.. versionadded:: 2.0.0
"""
if isinstance(vec, newlinalg.DenseVector):
return DenseVector(vec.array)
elif isinstance(vec, newlinalg.SparseVector):
return SparseVector(vec.size, vec.indices, vec.values)
else:
raise TypeError("Unsupported vector type %s" % type(vec)) | def fromML(vec):
"""
Convert a vector from the new mllib-local representation.
This does NOT copy the data; it copies references.
:param vec: a :py:class:`pyspark.ml.linalg.Vector`
:return: a :py:class:`pyspark.mllib.linalg.Vector`
.. versionadded:: 2.0.0
"""
if isinstance(vec, newlinalg.DenseVector):
return DenseVector(vec.array)
elif isinstance(vec, newlinalg.SparseVector):
return SparseVector(vec.size, vec.indices, vec.values)
else:
raise TypeError("Unsupported vector type %s" % type(vec)) | [
"Convert",
"a",
"vector",
"from",
"the",
"new",
"mllib",
"-",
"local",
"representation",
".",
"This",
"does",
"NOT",
"copy",
"the",
"data",
";",
"it",
"copies",
"references",
"."
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/__init__.py#L889-L904 | [
"def",
"fromML",
"(",
"vec",
")",
":",
"if",
"isinstance",
"(",
"vec",
",",
"newlinalg",
".",
"DenseVector",
")",
":",
"return",
"DenseVector",
"(",
"vec",
".",
"array",
")",
"elif",
"isinstance",
"(",
"vec",
",",
"newlinalg",
".",
"SparseVector",
")",
":",
"return",
"SparseVector",
"(",
"vec",
".",
"size",
",",
"vec",
".",
"indices",
",",
"vec",
".",
"values",
")",
"else",
":",
"raise",
"TypeError",
"(",
"\"Unsupported vector type %s\"",
"%",
"type",
"(",
"vec",
")",
")"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | Vectors.squared_distance | Squared distance between two vectors.
a and b can be of type SparseVector, DenseVector, np.ndarray
or array.array.
>>> a = Vectors.sparse(4, [(0, 1), (3, 4)])
>>> b = Vectors.dense([2, 5, 4, 1])
>>> a.squared_distance(b)
51.0 | python/pyspark/mllib/linalg/__init__.py | def squared_distance(v1, v2):
"""
Squared distance between two vectors.
a and b can be of type SparseVector, DenseVector, np.ndarray
or array.array.
>>> a = Vectors.sparse(4, [(0, 1), (3, 4)])
>>> b = Vectors.dense([2, 5, 4, 1])
>>> a.squared_distance(b)
51.0
"""
v1, v2 = _convert_to_vector(v1), _convert_to_vector(v2)
return v1.squared_distance(v2) | def squared_distance(v1, v2):
"""
Squared distance between two vectors.
a and b can be of type SparseVector, DenseVector, np.ndarray
or array.array.
>>> a = Vectors.sparse(4, [(0, 1), (3, 4)])
>>> b = Vectors.dense([2, 5, 4, 1])
>>> a.squared_distance(b)
51.0
"""
v1, v2 = _convert_to_vector(v1), _convert_to_vector(v2)
return v1.squared_distance(v2) | [
"Squared",
"distance",
"between",
"two",
"vectors",
".",
"a",
"and",
"b",
"can",
"be",
"of",
"type",
"SparseVector",
"DenseVector",
"np",
".",
"ndarray",
"or",
"array",
".",
"array",
"."
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/__init__.py#L920-L932 | [
"def",
"squared_distance",
"(",
"v1",
",",
"v2",
")",
":",
"v1",
",",
"v2",
"=",
"_convert_to_vector",
"(",
"v1",
")",
",",
"_convert_to_vector",
"(",
"v2",
")",
"return",
"v1",
".",
"squared_distance",
"(",
"v2",
")"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | Vectors.parse | Parse a string representation back into the Vector.
>>> Vectors.parse('[2,1,2 ]')
DenseVector([2.0, 1.0, 2.0])
>>> Vectors.parse(' ( 100, [0], [2])')
SparseVector(100, {0: 2.0}) | python/pyspark/mllib/linalg/__init__.py | def parse(s):
"""Parse a string representation back into the Vector.
>>> Vectors.parse('[2,1,2 ]')
DenseVector([2.0, 1.0, 2.0])
>>> Vectors.parse(' ( 100, [0], [2])')
SparseVector(100, {0: 2.0})
"""
if s.find('(') == -1 and s.find('[') != -1:
return DenseVector.parse(s)
elif s.find('(') != -1:
return SparseVector.parse(s)
else:
raise ValueError(
"Cannot find tokens '[' or '(' from the input string.") | def parse(s):
"""Parse a string representation back into the Vector.
>>> Vectors.parse('[2,1,2 ]')
DenseVector([2.0, 1.0, 2.0])
>>> Vectors.parse(' ( 100, [0], [2])')
SparseVector(100, {0: 2.0})
"""
if s.find('(') == -1 and s.find('[') != -1:
return DenseVector.parse(s)
elif s.find('(') != -1:
return SparseVector.parse(s)
else:
raise ValueError(
"Cannot find tokens '[' or '(' from the input string.") | [
"Parse",
"a",
"string",
"representation",
"back",
"into",
"the",
"Vector",
"."
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/__init__.py#L942-L956 | [
"def",
"parse",
"(",
"s",
")",
":",
"if",
"s",
".",
"find",
"(",
"'('",
")",
"==",
"-",
"1",
"and",
"s",
".",
"find",
"(",
"'['",
")",
"!=",
"-",
"1",
":",
"return",
"DenseVector",
".",
"parse",
"(",
"s",
")",
"elif",
"s",
".",
"find",
"(",
"'('",
")",
"!=",
"-",
"1",
":",
"return",
"SparseVector",
".",
"parse",
"(",
"s",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Cannot find tokens '[' or '(' from the input string.\"",
")"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | Vectors._equals | Check equality between sparse/dense vectors,
v1_indices and v2_indices assume to be strictly increasing. | python/pyspark/mllib/linalg/__init__.py | def _equals(v1_indices, v1_values, v2_indices, v2_values):
"""
Check equality between sparse/dense vectors,
v1_indices and v2_indices assume to be strictly increasing.
"""
v1_size = len(v1_values)
v2_size = len(v2_values)
k1 = 0
k2 = 0
all_equal = True
while all_equal:
while k1 < v1_size and v1_values[k1] == 0:
k1 += 1
while k2 < v2_size and v2_values[k2] == 0:
k2 += 1
if k1 >= v1_size or k2 >= v2_size:
return k1 >= v1_size and k2 >= v2_size
all_equal = v1_indices[k1] == v2_indices[k2] and v1_values[k1] == v2_values[k2]
k1 += 1
k2 += 1
return all_equal | def _equals(v1_indices, v1_values, v2_indices, v2_values):
"""
Check equality between sparse/dense vectors,
v1_indices and v2_indices assume to be strictly increasing.
"""
v1_size = len(v1_values)
v2_size = len(v2_values)
k1 = 0
k2 = 0
all_equal = True
while all_equal:
while k1 < v1_size and v1_values[k1] == 0:
k1 += 1
while k2 < v2_size and v2_values[k2] == 0:
k2 += 1
if k1 >= v1_size or k2 >= v2_size:
return k1 >= v1_size and k2 >= v2_size
all_equal = v1_indices[k1] == v2_indices[k2] and v1_values[k1] == v2_values[k2]
k1 += 1
k2 += 1
return all_equal | [
"Check",
"equality",
"between",
"sparse",
"/",
"dense",
"vectors",
"v1_indices",
"and",
"v2_indices",
"assume",
"to",
"be",
"strictly",
"increasing",
"."
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/__init__.py#L963-L985 | [
"def",
"_equals",
"(",
"v1_indices",
",",
"v1_values",
",",
"v2_indices",
",",
"v2_values",
")",
":",
"v1_size",
"=",
"len",
"(",
"v1_values",
")",
"v2_size",
"=",
"len",
"(",
"v2_values",
")",
"k1",
"=",
"0",
"k2",
"=",
"0",
"all_equal",
"=",
"True",
"while",
"all_equal",
":",
"while",
"k1",
"<",
"v1_size",
"and",
"v1_values",
"[",
"k1",
"]",
"==",
"0",
":",
"k1",
"+=",
"1",
"while",
"k2",
"<",
"v2_size",
"and",
"v2_values",
"[",
"k2",
"]",
"==",
"0",
":",
"k2",
"+=",
"1",
"if",
"k1",
">=",
"v1_size",
"or",
"k2",
">=",
"v2_size",
":",
"return",
"k1",
">=",
"v1_size",
"and",
"k2",
">=",
"v2_size",
"all_equal",
"=",
"v1_indices",
"[",
"k1",
"]",
"==",
"v2_indices",
"[",
"k2",
"]",
"and",
"v1_values",
"[",
"k1",
"]",
"==",
"v2_values",
"[",
"k2",
"]",
"k1",
"+=",
"1",
"k2",
"+=",
"1",
"return",
"all_equal"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | Matrix._convert_to_array | Convert Matrix attributes which are array-like or buffer to array. | python/pyspark/mllib/linalg/__init__.py | def _convert_to_array(array_like, dtype):
"""
Convert Matrix attributes which are array-like or buffer to array.
"""
if isinstance(array_like, bytes):
return np.frombuffer(array_like, dtype=dtype)
return np.asarray(array_like, dtype=dtype) | def _convert_to_array(array_like, dtype):
"""
Convert Matrix attributes which are array-like or buffer to array.
"""
if isinstance(array_like, bytes):
return np.frombuffer(array_like, dtype=dtype)
return np.asarray(array_like, dtype=dtype) | [
"Convert",
"Matrix",
"attributes",
"which",
"are",
"array",
"-",
"like",
"or",
"buffer",
"to",
"array",
"."
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/__init__.py#L1014-L1020 | [
"def",
"_convert_to_array",
"(",
"array_like",
",",
"dtype",
")",
":",
"if",
"isinstance",
"(",
"array_like",
",",
"bytes",
")",
":",
"return",
"np",
".",
"frombuffer",
"(",
"array_like",
",",
"dtype",
"=",
"dtype",
")",
"return",
"np",
".",
"asarray",
"(",
"array_like",
",",
"dtype",
"=",
"dtype",
")"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | DenseMatrix.toArray | Return an numpy.ndarray
>>> m = DenseMatrix(2, 2, range(4))
>>> m.toArray()
array([[ 0., 2.],
[ 1., 3.]]) | python/pyspark/mllib/linalg/__init__.py | def toArray(self):
"""
Return an numpy.ndarray
>>> m = DenseMatrix(2, 2, range(4))
>>> m.toArray()
array([[ 0., 2.],
[ 1., 3.]])
"""
if self.isTransposed:
return np.asfortranarray(
self.values.reshape((self.numRows, self.numCols)))
else:
return self.values.reshape((self.numRows, self.numCols), order='F') | def toArray(self):
"""
Return an numpy.ndarray
>>> m = DenseMatrix(2, 2, range(4))
>>> m.toArray()
array([[ 0., 2.],
[ 1., 3.]])
"""
if self.isTransposed:
return np.asfortranarray(
self.values.reshape((self.numRows, self.numCols)))
else:
return self.values.reshape((self.numRows, self.numCols), order='F') | [
"Return",
"an",
"numpy",
".",
"ndarray"
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/__init__.py#L1082-L1095 | [
"def",
"toArray",
"(",
"self",
")",
":",
"if",
"self",
".",
"isTransposed",
":",
"return",
"np",
".",
"asfortranarray",
"(",
"self",
".",
"values",
".",
"reshape",
"(",
"(",
"self",
".",
"numRows",
",",
"self",
".",
"numCols",
")",
")",
")",
"else",
":",
"return",
"self",
".",
"values",
".",
"reshape",
"(",
"(",
"self",
".",
"numRows",
",",
"self",
".",
"numCols",
")",
",",
"order",
"=",
"'F'",
")"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | DenseMatrix.toSparse | Convert to SparseMatrix | python/pyspark/mllib/linalg/__init__.py | def toSparse(self):
"""Convert to SparseMatrix"""
if self.isTransposed:
values = np.ravel(self.toArray(), order='F')
else:
values = self.values
indices = np.nonzero(values)[0]
colCounts = np.bincount(indices // self.numRows)
colPtrs = np.cumsum(np.hstack(
(0, colCounts, np.zeros(self.numCols - colCounts.size))))
values = values[indices]
rowIndices = indices % self.numRows
return SparseMatrix(self.numRows, self.numCols, colPtrs, rowIndices, values) | def toSparse(self):
"""Convert to SparseMatrix"""
if self.isTransposed:
values = np.ravel(self.toArray(), order='F')
else:
values = self.values
indices = np.nonzero(values)[0]
colCounts = np.bincount(indices // self.numRows)
colPtrs = np.cumsum(np.hstack(
(0, colCounts, np.zeros(self.numCols - colCounts.size))))
values = values[indices]
rowIndices = indices % self.numRows
return SparseMatrix(self.numRows, self.numCols, colPtrs, rowIndices, values) | [
"Convert",
"to",
"SparseMatrix"
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/__init__.py#L1097-L1110 | [
"def",
"toSparse",
"(",
"self",
")",
":",
"if",
"self",
".",
"isTransposed",
":",
"values",
"=",
"np",
".",
"ravel",
"(",
"self",
".",
"toArray",
"(",
")",
",",
"order",
"=",
"'F'",
")",
"else",
":",
"values",
"=",
"self",
".",
"values",
"indices",
"=",
"np",
".",
"nonzero",
"(",
"values",
")",
"[",
"0",
"]",
"colCounts",
"=",
"np",
".",
"bincount",
"(",
"indices",
"//",
"self",
".",
"numRows",
")",
"colPtrs",
"=",
"np",
".",
"cumsum",
"(",
"np",
".",
"hstack",
"(",
"(",
"0",
",",
"colCounts",
",",
"np",
".",
"zeros",
"(",
"self",
".",
"numCols",
"-",
"colCounts",
".",
"size",
")",
")",
")",
")",
"values",
"=",
"values",
"[",
"indices",
"]",
"rowIndices",
"=",
"indices",
"%",
"self",
".",
"numRows",
"return",
"SparseMatrix",
"(",
"self",
".",
"numRows",
",",
"self",
".",
"numCols",
",",
"colPtrs",
",",
"rowIndices",
",",
"values",
")"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | DenseMatrix.asML | Convert this matrix to the new mllib-local representation.
This does NOT copy the data; it copies references.
:return: :py:class:`pyspark.ml.linalg.DenseMatrix`
.. versionadded:: 2.0.0 | python/pyspark/mllib/linalg/__init__.py | def asML(self):
"""
Convert this matrix to the new mllib-local representation.
This does NOT copy the data; it copies references.
:return: :py:class:`pyspark.ml.linalg.DenseMatrix`
.. versionadded:: 2.0.0
"""
return newlinalg.DenseMatrix(self.numRows, self.numCols, self.values, self.isTransposed) | def asML(self):
"""
Convert this matrix to the new mllib-local representation.
This does NOT copy the data; it copies references.
:return: :py:class:`pyspark.ml.linalg.DenseMatrix`
.. versionadded:: 2.0.0
"""
return newlinalg.DenseMatrix(self.numRows, self.numCols, self.values, self.isTransposed) | [
"Convert",
"this",
"matrix",
"to",
"the",
"new",
"mllib",
"-",
"local",
"representation",
".",
"This",
"does",
"NOT",
"copy",
"the",
"data",
";",
"it",
"copies",
"references",
"."
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/__init__.py#L1112-L1121 | [
"def",
"asML",
"(",
"self",
")",
":",
"return",
"newlinalg",
".",
"DenseMatrix",
"(",
"self",
".",
"numRows",
",",
"self",
".",
"numCols",
",",
"self",
".",
"values",
",",
"self",
".",
"isTransposed",
")"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | SparseMatrix.toArray | Return an numpy.ndarray | python/pyspark/mllib/linalg/__init__.py | def toArray(self):
"""
Return an numpy.ndarray
"""
A = np.zeros((self.numRows, self.numCols), dtype=np.float64, order='F')
for k in xrange(self.colPtrs.size - 1):
startptr = self.colPtrs[k]
endptr = self.colPtrs[k + 1]
if self.isTransposed:
A[k, self.rowIndices[startptr:endptr]] = self.values[startptr:endptr]
else:
A[self.rowIndices[startptr:endptr], k] = self.values[startptr:endptr]
return A | def toArray(self):
"""
Return an numpy.ndarray
"""
A = np.zeros((self.numRows, self.numCols), dtype=np.float64, order='F')
for k in xrange(self.colPtrs.size - 1):
startptr = self.colPtrs[k]
endptr = self.colPtrs[k + 1]
if self.isTransposed:
A[k, self.rowIndices[startptr:endptr]] = self.values[startptr:endptr]
else:
A[self.rowIndices[startptr:endptr], k] = self.values[startptr:endptr]
return A | [
"Return",
"an",
"numpy",
".",
"ndarray"
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/__init__.py#L1277-L1289 | [
"def",
"toArray",
"(",
"self",
")",
":",
"A",
"=",
"np",
".",
"zeros",
"(",
"(",
"self",
".",
"numRows",
",",
"self",
".",
"numCols",
")",
",",
"dtype",
"=",
"np",
".",
"float64",
",",
"order",
"=",
"'F'",
")",
"for",
"k",
"in",
"xrange",
"(",
"self",
".",
"colPtrs",
".",
"size",
"-",
"1",
")",
":",
"startptr",
"=",
"self",
".",
"colPtrs",
"[",
"k",
"]",
"endptr",
"=",
"self",
".",
"colPtrs",
"[",
"k",
"+",
"1",
"]",
"if",
"self",
".",
"isTransposed",
":",
"A",
"[",
"k",
",",
"self",
".",
"rowIndices",
"[",
"startptr",
":",
"endptr",
"]",
"]",
"=",
"self",
".",
"values",
"[",
"startptr",
":",
"endptr",
"]",
"else",
":",
"A",
"[",
"self",
".",
"rowIndices",
"[",
"startptr",
":",
"endptr",
"]",
",",
"k",
"]",
"=",
"self",
".",
"values",
"[",
"startptr",
":",
"endptr",
"]",
"return",
"A"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | SparseMatrix.asML | Convert this matrix to the new mllib-local representation.
This does NOT copy the data; it copies references.
:return: :py:class:`pyspark.ml.linalg.SparseMatrix`
.. versionadded:: 2.0.0 | python/pyspark/mllib/linalg/__init__.py | def asML(self):
"""
Convert this matrix to the new mllib-local representation.
This does NOT copy the data; it copies references.
:return: :py:class:`pyspark.ml.linalg.SparseMatrix`
.. versionadded:: 2.0.0
"""
return newlinalg.SparseMatrix(self.numRows, self.numCols, self.colPtrs, self.rowIndices,
self.values, self.isTransposed) | def asML(self):
"""
Convert this matrix to the new mllib-local representation.
This does NOT copy the data; it copies references.
:return: :py:class:`pyspark.ml.linalg.SparseMatrix`
.. versionadded:: 2.0.0
"""
return newlinalg.SparseMatrix(self.numRows, self.numCols, self.colPtrs, self.rowIndices,
self.values, self.isTransposed) | [
"Convert",
"this",
"matrix",
"to",
"the",
"new",
"mllib",
"-",
"local",
"representation",
".",
"This",
"does",
"NOT",
"copy",
"the",
"data",
";",
"it",
"copies",
"references",
"."
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/__init__.py#L1295-L1305 | [
"def",
"asML",
"(",
"self",
")",
":",
"return",
"newlinalg",
".",
"SparseMatrix",
"(",
"self",
".",
"numRows",
",",
"self",
".",
"numCols",
",",
"self",
".",
"colPtrs",
",",
"self",
".",
"rowIndices",
",",
"self",
".",
"values",
",",
"self",
".",
"isTransposed",
")"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | Matrices.sparse | Create a SparseMatrix | python/pyspark/mllib/linalg/__init__.py | def sparse(numRows, numCols, colPtrs, rowIndices, values):
"""
Create a SparseMatrix
"""
return SparseMatrix(numRows, numCols, colPtrs, rowIndices, values) | def sparse(numRows, numCols, colPtrs, rowIndices, values):
"""
Create a SparseMatrix
"""
return SparseMatrix(numRows, numCols, colPtrs, rowIndices, values) | [
"Create",
"a",
"SparseMatrix"
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/__init__.py#L1321-L1325 | [
"def",
"sparse",
"(",
"numRows",
",",
"numCols",
",",
"colPtrs",
",",
"rowIndices",
",",
"values",
")",
":",
"return",
"SparseMatrix",
"(",
"numRows",
",",
"numCols",
",",
"colPtrs",
",",
"rowIndices",
",",
"values",
")"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | Matrices.fromML | Convert a matrix from the new mllib-local representation.
This does NOT copy the data; it copies references.
:param mat: a :py:class:`pyspark.ml.linalg.Matrix`
:return: a :py:class:`pyspark.mllib.linalg.Matrix`
.. versionadded:: 2.0.0 | python/pyspark/mllib/linalg/__init__.py | def fromML(mat):
"""
Convert a matrix from the new mllib-local representation.
This does NOT copy the data; it copies references.
:param mat: a :py:class:`pyspark.ml.linalg.Matrix`
:return: a :py:class:`pyspark.mllib.linalg.Matrix`
.. versionadded:: 2.0.0
"""
if isinstance(mat, newlinalg.DenseMatrix):
return DenseMatrix(mat.numRows, mat.numCols, mat.values, mat.isTransposed)
elif isinstance(mat, newlinalg.SparseMatrix):
return SparseMatrix(mat.numRows, mat.numCols, mat.colPtrs, mat.rowIndices,
mat.values, mat.isTransposed)
else:
raise TypeError("Unsupported matrix type %s" % type(mat)) | def fromML(mat):
"""
Convert a matrix from the new mllib-local representation.
This does NOT copy the data; it copies references.
:param mat: a :py:class:`pyspark.ml.linalg.Matrix`
:return: a :py:class:`pyspark.mllib.linalg.Matrix`
.. versionadded:: 2.0.0
"""
if isinstance(mat, newlinalg.DenseMatrix):
return DenseMatrix(mat.numRows, mat.numCols, mat.values, mat.isTransposed)
elif isinstance(mat, newlinalg.SparseMatrix):
return SparseMatrix(mat.numRows, mat.numCols, mat.colPtrs, mat.rowIndices,
mat.values, mat.isTransposed)
else:
raise TypeError("Unsupported matrix type %s" % type(mat)) | [
"Convert",
"a",
"matrix",
"from",
"the",
"new",
"mllib",
"-",
"local",
"representation",
".",
"This",
"does",
"NOT",
"copy",
"the",
"data",
";",
"it",
"copies",
"references",
"."
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/__init__.py#L1328-L1344 | [
"def",
"fromML",
"(",
"mat",
")",
":",
"if",
"isinstance",
"(",
"mat",
",",
"newlinalg",
".",
"DenseMatrix",
")",
":",
"return",
"DenseMatrix",
"(",
"mat",
".",
"numRows",
",",
"mat",
".",
"numCols",
",",
"mat",
".",
"values",
",",
"mat",
".",
"isTransposed",
")",
"elif",
"isinstance",
"(",
"mat",
",",
"newlinalg",
".",
"SparseMatrix",
")",
":",
"return",
"SparseMatrix",
"(",
"mat",
".",
"numRows",
",",
"mat",
".",
"numCols",
",",
"mat",
".",
"colPtrs",
",",
"mat",
".",
"rowIndices",
",",
"mat",
".",
"values",
",",
"mat",
".",
"isTransposed",
")",
"else",
":",
"raise",
"TypeError",
"(",
"\"Unsupported matrix type %s\"",
"%",
"type",
"(",
"mat",
")",
")"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | LSHModel.approxNearestNeighbors | Given a large dataset and an item, approximately find at most k items which have the
closest distance to the item. If the :py:attr:`outputCol` is missing, the method will
transform the data; if the :py:attr:`outputCol` exists, it will use that. This allows
caching of the transformed data when necessary.
.. note:: This method is experimental and will likely change behavior in the next release.
:param dataset: The dataset to search for nearest neighbors of the key.
:param key: Feature vector representing the item to search for.
:param numNearestNeighbors: The maximum number of nearest neighbors.
:param distCol: Output column for storing the distance between each result row and the key.
Use "distCol" as default value if it's not specified.
:return: A dataset containing at most k items closest to the key. A column "distCol" is
added to show the distance between each row and the key. | python/pyspark/ml/feature.py | def approxNearestNeighbors(self, dataset, key, numNearestNeighbors, distCol="distCol"):
"""
Given a large dataset and an item, approximately find at most k items which have the
closest distance to the item. If the :py:attr:`outputCol` is missing, the method will
transform the data; if the :py:attr:`outputCol` exists, it will use that. This allows
caching of the transformed data when necessary.
.. note:: This method is experimental and will likely change behavior in the next release.
:param dataset: The dataset to search for nearest neighbors of the key.
:param key: Feature vector representing the item to search for.
:param numNearestNeighbors: The maximum number of nearest neighbors.
:param distCol: Output column for storing the distance between each result row and the key.
Use "distCol" as default value if it's not specified.
:return: A dataset containing at most k items closest to the key. A column "distCol" is
added to show the distance between each row and the key.
"""
return self._call_java("approxNearestNeighbors", dataset, key, numNearestNeighbors,
distCol) | def approxNearestNeighbors(self, dataset, key, numNearestNeighbors, distCol="distCol"):
"""
Given a large dataset and an item, approximately find at most k items which have the
closest distance to the item. If the :py:attr:`outputCol` is missing, the method will
transform the data; if the :py:attr:`outputCol` exists, it will use that. This allows
caching of the transformed data when necessary.
.. note:: This method is experimental and will likely change behavior in the next release.
:param dataset: The dataset to search for nearest neighbors of the key.
:param key: Feature vector representing the item to search for.
:param numNearestNeighbors: The maximum number of nearest neighbors.
:param distCol: Output column for storing the distance between each result row and the key.
Use "distCol" as default value if it's not specified.
:return: A dataset containing at most k items closest to the key. A column "distCol" is
added to show the distance between each row and the key.
"""
return self._call_java("approxNearestNeighbors", dataset, key, numNearestNeighbors,
distCol) | [
"Given",
"a",
"large",
"dataset",
"and",
"an",
"item",
"approximately",
"find",
"at",
"most",
"k",
"items",
"which",
"have",
"the",
"closest",
"distance",
"to",
"the",
"item",
".",
"If",
"the",
":",
"py",
":",
"attr",
":",
"outputCol",
"is",
"missing",
"the",
"method",
"will",
"transform",
"the",
"data",
";",
"if",
"the",
":",
"py",
":",
"attr",
":",
"outputCol",
"exists",
"it",
"will",
"use",
"that",
".",
"This",
"allows",
"caching",
"of",
"the",
"transformed",
"data",
"when",
"necessary",
"."
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/feature.py#L162-L180 | [
"def",
"approxNearestNeighbors",
"(",
"self",
",",
"dataset",
",",
"key",
",",
"numNearestNeighbors",
",",
"distCol",
"=",
"\"distCol\"",
")",
":",
"return",
"self",
".",
"_call_java",
"(",
"\"approxNearestNeighbors\"",
",",
"dataset",
",",
"key",
",",
"numNearestNeighbors",
",",
"distCol",
")"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | LSHModel.approxSimilarityJoin | Join two datasets to approximately find all pairs of rows whose distance are smaller than
the threshold. If the :py:attr:`outputCol` is missing, the method will transform the data;
if the :py:attr:`outputCol` exists, it will use that. This allows caching of the
transformed data when necessary.
:param datasetA: One of the datasets to join.
:param datasetB: Another dataset to join.
:param threshold: The threshold for the distance of row pairs.
:param distCol: Output column for storing the distance between each pair of rows. Use
"distCol" as default value if it's not specified.
:return: A joined dataset containing pairs of rows. The original rows are in columns
"datasetA" and "datasetB", and a column "distCol" is added to show the distance
between each pair. | python/pyspark/ml/feature.py | def approxSimilarityJoin(self, datasetA, datasetB, threshold, distCol="distCol"):
"""
Join two datasets to approximately find all pairs of rows whose distance are smaller than
the threshold. If the :py:attr:`outputCol` is missing, the method will transform the data;
if the :py:attr:`outputCol` exists, it will use that. This allows caching of the
transformed data when necessary.
:param datasetA: One of the datasets to join.
:param datasetB: Another dataset to join.
:param threshold: The threshold for the distance of row pairs.
:param distCol: Output column for storing the distance between each pair of rows. Use
"distCol" as default value if it's not specified.
:return: A joined dataset containing pairs of rows. The original rows are in columns
"datasetA" and "datasetB", and a column "distCol" is added to show the distance
between each pair.
"""
threshold = TypeConverters.toFloat(threshold)
return self._call_java("approxSimilarityJoin", datasetA, datasetB, threshold, distCol) | def approxSimilarityJoin(self, datasetA, datasetB, threshold, distCol="distCol"):
"""
Join two datasets to approximately find all pairs of rows whose distance are smaller than
the threshold. If the :py:attr:`outputCol` is missing, the method will transform the data;
if the :py:attr:`outputCol` exists, it will use that. This allows caching of the
transformed data when necessary.
:param datasetA: One of the datasets to join.
:param datasetB: Another dataset to join.
:param threshold: The threshold for the distance of row pairs.
:param distCol: Output column for storing the distance between each pair of rows. Use
"distCol" as default value if it's not specified.
:return: A joined dataset containing pairs of rows. The original rows are in columns
"datasetA" and "datasetB", and a column "distCol" is added to show the distance
between each pair.
"""
threshold = TypeConverters.toFloat(threshold)
return self._call_java("approxSimilarityJoin", datasetA, datasetB, threshold, distCol) | [
"Join",
"two",
"datasets",
"to",
"approximately",
"find",
"all",
"pairs",
"of",
"rows",
"whose",
"distance",
"are",
"smaller",
"than",
"the",
"threshold",
".",
"If",
"the",
":",
"py",
":",
"attr",
":",
"outputCol",
"is",
"missing",
"the",
"method",
"will",
"transform",
"the",
"data",
";",
"if",
"the",
":",
"py",
":",
"attr",
":",
"outputCol",
"exists",
"it",
"will",
"use",
"that",
".",
"This",
"allows",
"caching",
"of",
"the",
"transformed",
"data",
"when",
"necessary",
"."
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/feature.py#L182-L199 | [
"def",
"approxSimilarityJoin",
"(",
"self",
",",
"datasetA",
",",
"datasetB",
",",
"threshold",
",",
"distCol",
"=",
"\"distCol\"",
")",
":",
"threshold",
"=",
"TypeConverters",
".",
"toFloat",
"(",
"threshold",
")",
"return",
"self",
".",
"_call_java",
"(",
"\"approxSimilarityJoin\"",
",",
"datasetA",
",",
"datasetB",
",",
"threshold",
",",
"distCol",
")"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | StringIndexerModel.from_labels | Construct the model directly from an array of label strings,
requires an active SparkContext. | python/pyspark/ml/feature.py | def from_labels(cls, labels, inputCol, outputCol=None, handleInvalid=None):
"""
Construct the model directly from an array of label strings,
requires an active SparkContext.
"""
sc = SparkContext._active_spark_context
java_class = sc._gateway.jvm.java.lang.String
jlabels = StringIndexerModel._new_java_array(labels, java_class)
model = StringIndexerModel._create_from_java_class(
"org.apache.spark.ml.feature.StringIndexerModel", jlabels)
model.setInputCol(inputCol)
if outputCol is not None:
model.setOutputCol(outputCol)
if handleInvalid is not None:
model.setHandleInvalid(handleInvalid)
return model | def from_labels(cls, labels, inputCol, outputCol=None, handleInvalid=None):
"""
Construct the model directly from an array of label strings,
requires an active SparkContext.
"""
sc = SparkContext._active_spark_context
java_class = sc._gateway.jvm.java.lang.String
jlabels = StringIndexerModel._new_java_array(labels, java_class)
model = StringIndexerModel._create_from_java_class(
"org.apache.spark.ml.feature.StringIndexerModel", jlabels)
model.setInputCol(inputCol)
if outputCol is not None:
model.setOutputCol(outputCol)
if handleInvalid is not None:
model.setHandleInvalid(handleInvalid)
return model | [
"Construct",
"the",
"model",
"directly",
"from",
"an",
"array",
"of",
"label",
"strings",
"requires",
"an",
"active",
"SparkContext",
"."
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/feature.py#L2503-L2518 | [
"def",
"from_labels",
"(",
"cls",
",",
"labels",
",",
"inputCol",
",",
"outputCol",
"=",
"None",
",",
"handleInvalid",
"=",
"None",
")",
":",
"sc",
"=",
"SparkContext",
".",
"_active_spark_context",
"java_class",
"=",
"sc",
".",
"_gateway",
".",
"jvm",
".",
"java",
".",
"lang",
".",
"String",
"jlabels",
"=",
"StringIndexerModel",
".",
"_new_java_array",
"(",
"labels",
",",
"java_class",
")",
"model",
"=",
"StringIndexerModel",
".",
"_create_from_java_class",
"(",
"\"org.apache.spark.ml.feature.StringIndexerModel\"",
",",
"jlabels",
")",
"model",
".",
"setInputCol",
"(",
"inputCol",
")",
"if",
"outputCol",
"is",
"not",
"None",
":",
"model",
".",
"setOutputCol",
"(",
"outputCol",
")",
"if",
"handleInvalid",
"is",
"not",
"None",
":",
"model",
".",
"setHandleInvalid",
"(",
"handleInvalid",
")",
"return",
"model"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | StringIndexerModel.from_arrays_of_labels | Construct the model directly from an array of array of label strings,
requires an active SparkContext. | python/pyspark/ml/feature.py | def from_arrays_of_labels(cls, arrayOfLabels, inputCols, outputCols=None,
handleInvalid=None):
"""
Construct the model directly from an array of array of label strings,
requires an active SparkContext.
"""
sc = SparkContext._active_spark_context
java_class = sc._gateway.jvm.java.lang.String
jlabels = StringIndexerModel._new_java_array(arrayOfLabels, java_class)
model = StringIndexerModel._create_from_java_class(
"org.apache.spark.ml.feature.StringIndexerModel", jlabels)
model.setInputCols(inputCols)
if outputCols is not None:
model.setOutputCols(outputCols)
if handleInvalid is not None:
model.setHandleInvalid(handleInvalid)
return model | def from_arrays_of_labels(cls, arrayOfLabels, inputCols, outputCols=None,
handleInvalid=None):
"""
Construct the model directly from an array of array of label strings,
requires an active SparkContext.
"""
sc = SparkContext._active_spark_context
java_class = sc._gateway.jvm.java.lang.String
jlabels = StringIndexerModel._new_java_array(arrayOfLabels, java_class)
model = StringIndexerModel._create_from_java_class(
"org.apache.spark.ml.feature.StringIndexerModel", jlabels)
model.setInputCols(inputCols)
if outputCols is not None:
model.setOutputCols(outputCols)
if handleInvalid is not None:
model.setHandleInvalid(handleInvalid)
return model | [
"Construct",
"the",
"model",
"directly",
"from",
"an",
"array",
"of",
"array",
"of",
"label",
"strings",
"requires",
"an",
"active",
"SparkContext",
"."
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/feature.py#L2522-L2538 | [
"def",
"from_arrays_of_labels",
"(",
"cls",
",",
"arrayOfLabels",
",",
"inputCols",
",",
"outputCols",
"=",
"None",
",",
"handleInvalid",
"=",
"None",
")",
":",
"sc",
"=",
"SparkContext",
".",
"_active_spark_context",
"java_class",
"=",
"sc",
".",
"_gateway",
".",
"jvm",
".",
"java",
".",
"lang",
".",
"String",
"jlabels",
"=",
"StringIndexerModel",
".",
"_new_java_array",
"(",
"arrayOfLabels",
",",
"java_class",
")",
"model",
"=",
"StringIndexerModel",
".",
"_create_from_java_class",
"(",
"\"org.apache.spark.ml.feature.StringIndexerModel\"",
",",
"jlabels",
")",
"model",
".",
"setInputCols",
"(",
"inputCols",
")",
"if",
"outputCols",
"is",
"not",
"None",
":",
"model",
".",
"setOutputCols",
"(",
"outputCols",
")",
"if",
"handleInvalid",
"is",
"not",
"None",
":",
"model",
".",
"setHandleInvalid",
"(",
"handleInvalid",
")",
"return",
"model"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | StopWordsRemover.setParams | setParams(self, inputCol=None, outputCol=None, stopWords=None, caseSensitive=false, \
locale=None)
Sets params for this StopWordRemover. | python/pyspark/ml/feature.py | def setParams(self, inputCol=None, outputCol=None, stopWords=None, caseSensitive=False,
locale=None):
"""
setParams(self, inputCol=None, outputCol=None, stopWords=None, caseSensitive=false, \
locale=None)
Sets params for this StopWordRemover.
"""
kwargs = self._input_kwargs
return self._set(**kwargs) | def setParams(self, inputCol=None, outputCol=None, stopWords=None, caseSensitive=False,
locale=None):
"""
setParams(self, inputCol=None, outputCol=None, stopWords=None, caseSensitive=false, \
locale=None)
Sets params for this StopWordRemover.
"""
kwargs = self._input_kwargs
return self._set(**kwargs) | [
"setParams",
"(",
"self",
"inputCol",
"=",
"None",
"outputCol",
"=",
"None",
"stopWords",
"=",
"None",
"caseSensitive",
"=",
"false",
"\\",
"locale",
"=",
"None",
")",
"Sets",
"params",
"for",
"this",
"StopWordRemover",
"."
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/feature.py#L2654-L2662 | [
"def",
"setParams",
"(",
"self",
",",
"inputCol",
"=",
"None",
",",
"outputCol",
"=",
"None",
",",
"stopWords",
"=",
"None",
",",
"caseSensitive",
"=",
"False",
",",
"locale",
"=",
"None",
")",
":",
"kwargs",
"=",
"self",
".",
"_input_kwargs",
"return",
"self",
".",
"_set",
"(",
"*",
"*",
"kwargs",
")"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | StopWordsRemover.loadDefaultStopWords | Loads the default stop words for the given language.
Supported languages: danish, dutch, english, finnish, french, german, hungarian,
italian, norwegian, portuguese, russian, spanish, swedish, turkish | python/pyspark/ml/feature.py | def loadDefaultStopWords(language):
"""
Loads the default stop words for the given language.
Supported languages: danish, dutch, english, finnish, french, german, hungarian,
italian, norwegian, portuguese, russian, spanish, swedish, turkish
"""
stopWordsObj = _jvm().org.apache.spark.ml.feature.StopWordsRemover
return list(stopWordsObj.loadDefaultStopWords(language)) | def loadDefaultStopWords(language):
"""
Loads the default stop words for the given language.
Supported languages: danish, dutch, english, finnish, french, german, hungarian,
italian, norwegian, portuguese, russian, spanish, swedish, turkish
"""
stopWordsObj = _jvm().org.apache.spark.ml.feature.StopWordsRemover
return list(stopWordsObj.loadDefaultStopWords(language)) | [
"Loads",
"the",
"default",
"stop",
"words",
"for",
"the",
"given",
"language",
".",
"Supported",
"languages",
":",
"danish",
"dutch",
"english",
"finnish",
"french",
"german",
"hungarian",
"italian",
"norwegian",
"portuguese",
"russian",
"spanish",
"swedish",
"turkish"
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/feature.py#L2708-L2715 | [
"def",
"loadDefaultStopWords",
"(",
"language",
")",
":",
"stopWordsObj",
"=",
"_jvm",
"(",
")",
".",
"org",
".",
"apache",
".",
"spark",
".",
"ml",
".",
"feature",
".",
"StopWordsRemover",
"return",
"list",
"(",
"stopWordsObj",
".",
"loadDefaultStopWords",
"(",
"language",
")",
")"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | Word2VecModel.findSynonyms | Find "num" number of words closest in similarity to "word".
word can be a string or vector representation.
Returns a dataframe with two fields word and similarity (which
gives the cosine similarity). | python/pyspark/ml/feature.py | def findSynonyms(self, word, num):
"""
Find "num" number of words closest in similarity to "word".
word can be a string or vector representation.
Returns a dataframe with two fields word and similarity (which
gives the cosine similarity).
"""
if not isinstance(word, basestring):
word = _convert_to_vector(word)
return self._call_java("findSynonyms", word, num) | def findSynonyms(self, word, num):
"""
Find "num" number of words closest in similarity to "word".
word can be a string or vector representation.
Returns a dataframe with two fields word and similarity (which
gives the cosine similarity).
"""
if not isinstance(word, basestring):
word = _convert_to_vector(word)
return self._call_java("findSynonyms", word, num) | [
"Find",
"num",
"number",
"of",
"words",
"closest",
"in",
"similarity",
"to",
"word",
".",
"word",
"can",
"be",
"a",
"string",
"or",
"vector",
"representation",
".",
"Returns",
"a",
"dataframe",
"with",
"two",
"fields",
"word",
"and",
"similarity",
"(",
"which",
"gives",
"the",
"cosine",
"similarity",
")",
"."
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/feature.py#L3293-L3302 | [
"def",
"findSynonyms",
"(",
"self",
",",
"word",
",",
"num",
")",
":",
"if",
"not",
"isinstance",
"(",
"word",
",",
"basestring",
")",
":",
"word",
"=",
"_convert_to_vector",
"(",
"word",
")",
"return",
"self",
".",
"_call_java",
"(",
"\"findSynonyms\"",
",",
"word",
",",
"num",
")"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | Word2VecModel.findSynonymsArray | Find "num" number of words closest in similarity to "word".
word can be a string or vector representation.
Returns an array with two fields word and similarity (which
gives the cosine similarity). | python/pyspark/ml/feature.py | def findSynonymsArray(self, word, num):
"""
Find "num" number of words closest in similarity to "word".
word can be a string or vector representation.
Returns an array with two fields word and similarity (which
gives the cosine similarity).
"""
if not isinstance(word, basestring):
word = _convert_to_vector(word)
tuples = self._java_obj.findSynonymsArray(word, num)
return list(map(lambda st: (st._1(), st._2()), list(tuples))) | def findSynonymsArray(self, word, num):
"""
Find "num" number of words closest in similarity to "word".
word can be a string or vector representation.
Returns an array with two fields word and similarity (which
gives the cosine similarity).
"""
if not isinstance(word, basestring):
word = _convert_to_vector(word)
tuples = self._java_obj.findSynonymsArray(word, num)
return list(map(lambda st: (st._1(), st._2()), list(tuples))) | [
"Find",
"num",
"number",
"of",
"words",
"closest",
"in",
"similarity",
"to",
"word",
".",
"word",
"can",
"be",
"a",
"string",
"or",
"vector",
"representation",
".",
"Returns",
"an",
"array",
"with",
"two",
"fields",
"word",
"and",
"similarity",
"(",
"which",
"gives",
"the",
"cosine",
"similarity",
")",
"."
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/feature.py#L3305-L3315 | [
"def",
"findSynonymsArray",
"(",
"self",
",",
"word",
",",
"num",
")",
":",
"if",
"not",
"isinstance",
"(",
"word",
",",
"basestring",
")",
":",
"word",
"=",
"_convert_to_vector",
"(",
"word",
")",
"tuples",
"=",
"self",
".",
"_java_obj",
".",
"findSynonymsArray",
"(",
"word",
",",
"num",
")",
"return",
"list",
"(",
"map",
"(",
"lambda",
"st",
":",
"(",
"st",
".",
"_1",
"(",
")",
",",
"st",
".",
"_2",
"(",
")",
")",
",",
"list",
"(",
"tuples",
")",
")",
")"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | install_exception_handler | Hook an exception handler into Py4j, which could capture some SQL exceptions in Java.
When calling Java API, it will call `get_return_value` to parse the returned object.
If any exception happened in JVM, the result will be Java exception object, it raise
py4j.protocol.Py4JJavaError. We replace the original `get_return_value` with one that
could capture the Java exception and throw a Python one (with the same error message).
It's idempotent, could be called multiple times. | python/pyspark/sql/utils.py | def install_exception_handler():
"""
Hook an exception handler into Py4j, which could capture some SQL exceptions in Java.
When calling Java API, it will call `get_return_value` to parse the returned object.
If any exception happened in JVM, the result will be Java exception object, it raise
py4j.protocol.Py4JJavaError. We replace the original `get_return_value` with one that
could capture the Java exception and throw a Python one (with the same error message).
It's idempotent, could be called multiple times.
"""
original = py4j.protocol.get_return_value
# The original `get_return_value` is not patched, it's idempotent.
patched = capture_sql_exception(original)
# only patch the one used in py4j.java_gateway (call Java API)
py4j.java_gateway.get_return_value = patched | def install_exception_handler():
"""
Hook an exception handler into Py4j, which could capture some SQL exceptions in Java.
When calling Java API, it will call `get_return_value` to parse the returned object.
If any exception happened in JVM, the result will be Java exception object, it raise
py4j.protocol.Py4JJavaError. We replace the original `get_return_value` with one that
could capture the Java exception and throw a Python one (with the same error message).
It's idempotent, could be called multiple times.
"""
original = py4j.protocol.get_return_value
# The original `get_return_value` is not patched, it's idempotent.
patched = capture_sql_exception(original)
# only patch the one used in py4j.java_gateway (call Java API)
py4j.java_gateway.get_return_value = patched | [
"Hook",
"an",
"exception",
"handler",
"into",
"Py4j",
"which",
"could",
"capture",
"some",
"SQL",
"exceptions",
"in",
"Java",
"."
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/utils.py#L99-L114 | [
"def",
"install_exception_handler",
"(",
")",
":",
"original",
"=",
"py4j",
".",
"protocol",
".",
"get_return_value",
"# The original `get_return_value` is not patched, it's idempotent.",
"patched",
"=",
"capture_sql_exception",
"(",
"original",
")",
"# only patch the one used in py4j.java_gateway (call Java API)",
"py4j",
".",
"java_gateway",
".",
"get_return_value",
"=",
"patched"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | toJArray | Convert python list to java type array
:param gateway: Py4j Gateway
:param jtype: java type of element in array
:param arr: python type list | python/pyspark/sql/utils.py | def toJArray(gateway, jtype, arr):
"""
Convert python list to java type array
:param gateway: Py4j Gateway
:param jtype: java type of element in array
:param arr: python type list
"""
jarr = gateway.new_array(jtype, len(arr))
for i in range(0, len(arr)):
jarr[i] = arr[i]
return jarr | def toJArray(gateway, jtype, arr):
"""
Convert python list to java type array
:param gateway: Py4j Gateway
:param jtype: java type of element in array
:param arr: python type list
"""
jarr = gateway.new_array(jtype, len(arr))
for i in range(0, len(arr)):
jarr[i] = arr[i]
return jarr | [
"Convert",
"python",
"list",
"to",
"java",
"type",
"array",
":",
"param",
"gateway",
":",
"Py4j",
"Gateway",
":",
"param",
"jtype",
":",
"java",
"type",
"of",
"element",
"in",
"array",
":",
"param",
"arr",
":",
"python",
"type",
"list"
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/utils.py#L117-L127 | [
"def",
"toJArray",
"(",
"gateway",
",",
"jtype",
",",
"arr",
")",
":",
"jarr",
"=",
"gateway",
".",
"new_array",
"(",
"jtype",
",",
"len",
"(",
"arr",
")",
")",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"arr",
")",
")",
":",
"jarr",
"[",
"i",
"]",
"=",
"arr",
"[",
"i",
"]",
"return",
"jarr"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | require_minimum_pandas_version | Raise ImportError if minimum version of Pandas is not installed | python/pyspark/sql/utils.py | def require_minimum_pandas_version():
""" Raise ImportError if minimum version of Pandas is not installed
"""
# TODO(HyukjinKwon): Relocate and deduplicate the version specification.
minimum_pandas_version = "0.19.2"
from distutils.version import LooseVersion
try:
import pandas
have_pandas = True
except ImportError:
have_pandas = False
if not have_pandas:
raise ImportError("Pandas >= %s must be installed; however, "
"it was not found." % minimum_pandas_version)
if LooseVersion(pandas.__version__) < LooseVersion(minimum_pandas_version):
raise ImportError("Pandas >= %s must be installed; however, "
"your version was %s." % (minimum_pandas_version, pandas.__version__)) | def require_minimum_pandas_version():
""" Raise ImportError if minimum version of Pandas is not installed
"""
# TODO(HyukjinKwon): Relocate and deduplicate the version specification.
minimum_pandas_version = "0.19.2"
from distutils.version import LooseVersion
try:
import pandas
have_pandas = True
except ImportError:
have_pandas = False
if not have_pandas:
raise ImportError("Pandas >= %s must be installed; however, "
"it was not found." % minimum_pandas_version)
if LooseVersion(pandas.__version__) < LooseVersion(minimum_pandas_version):
raise ImportError("Pandas >= %s must be installed; however, "
"your version was %s." % (minimum_pandas_version, pandas.__version__)) | [
"Raise",
"ImportError",
"if",
"minimum",
"version",
"of",
"Pandas",
"is",
"not",
"installed"
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/utils.py#L130-L147 | [
"def",
"require_minimum_pandas_version",
"(",
")",
":",
"# TODO(HyukjinKwon): Relocate and deduplicate the version specification.",
"minimum_pandas_version",
"=",
"\"0.19.2\"",
"from",
"distutils",
".",
"version",
"import",
"LooseVersion",
"try",
":",
"import",
"pandas",
"have_pandas",
"=",
"True",
"except",
"ImportError",
":",
"have_pandas",
"=",
"False",
"if",
"not",
"have_pandas",
":",
"raise",
"ImportError",
"(",
"\"Pandas >= %s must be installed; however, \"",
"\"it was not found.\"",
"%",
"minimum_pandas_version",
")",
"if",
"LooseVersion",
"(",
"pandas",
".",
"__version__",
")",
"<",
"LooseVersion",
"(",
"minimum_pandas_version",
")",
":",
"raise",
"ImportError",
"(",
"\"Pandas >= %s must be installed; however, \"",
"\"your version was %s.\"",
"%",
"(",
"minimum_pandas_version",
",",
"pandas",
".",
"__version__",
")",
")"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | require_minimum_pyarrow_version | Raise ImportError if minimum version of pyarrow is not installed | python/pyspark/sql/utils.py | def require_minimum_pyarrow_version():
""" Raise ImportError if minimum version of pyarrow is not installed
"""
# TODO(HyukjinKwon): Relocate and deduplicate the version specification.
minimum_pyarrow_version = "0.12.1"
from distutils.version import LooseVersion
try:
import pyarrow
have_arrow = True
except ImportError:
have_arrow = False
if not have_arrow:
raise ImportError("PyArrow >= %s must be installed; however, "
"it was not found." % minimum_pyarrow_version)
if LooseVersion(pyarrow.__version__) < LooseVersion(minimum_pyarrow_version):
raise ImportError("PyArrow >= %s must be installed; however, "
"your version was %s." % (minimum_pyarrow_version, pyarrow.__version__)) | def require_minimum_pyarrow_version():
""" Raise ImportError if minimum version of pyarrow is not installed
"""
# TODO(HyukjinKwon): Relocate and deduplicate the version specification.
minimum_pyarrow_version = "0.12.1"
from distutils.version import LooseVersion
try:
import pyarrow
have_arrow = True
except ImportError:
have_arrow = False
if not have_arrow:
raise ImportError("PyArrow >= %s must be installed; however, "
"it was not found." % minimum_pyarrow_version)
if LooseVersion(pyarrow.__version__) < LooseVersion(minimum_pyarrow_version):
raise ImportError("PyArrow >= %s must be installed; however, "
"your version was %s." % (minimum_pyarrow_version, pyarrow.__version__)) | [
"Raise",
"ImportError",
"if",
"minimum",
"version",
"of",
"pyarrow",
"is",
"not",
"installed"
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/utils.py#L150-L167 | [
"def",
"require_minimum_pyarrow_version",
"(",
")",
":",
"# TODO(HyukjinKwon): Relocate and deduplicate the version specification.",
"minimum_pyarrow_version",
"=",
"\"0.12.1\"",
"from",
"distutils",
".",
"version",
"import",
"LooseVersion",
"try",
":",
"import",
"pyarrow",
"have_arrow",
"=",
"True",
"except",
"ImportError",
":",
"have_arrow",
"=",
"False",
"if",
"not",
"have_arrow",
":",
"raise",
"ImportError",
"(",
"\"PyArrow >= %s must be installed; however, \"",
"\"it was not found.\"",
"%",
"minimum_pyarrow_version",
")",
"if",
"LooseVersion",
"(",
"pyarrow",
".",
"__version__",
")",
"<",
"LooseVersion",
"(",
"minimum_pyarrow_version",
")",
":",
"raise",
"ImportError",
"(",
"\"PyArrow >= %s must be installed; however, \"",
"\"your version was %s.\"",
"%",
"(",
"minimum_pyarrow_version",
",",
"pyarrow",
".",
"__version__",
")",
")"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | launch_gateway | launch jvm gateway
:param conf: spark configuration passed to spark-submit
:param popen_kwargs: Dictionary of kwargs to pass to Popen when spawning
the py4j JVM. This is a developer feature intended for use in
customizing how pyspark interacts with the py4j JVM (e.g., capturing
stdout/stderr).
:return: | python/pyspark/java_gateway.py | def launch_gateway(conf=None, popen_kwargs=None):
"""
launch jvm gateway
:param conf: spark configuration passed to spark-submit
:param popen_kwargs: Dictionary of kwargs to pass to Popen when spawning
the py4j JVM. This is a developer feature intended for use in
customizing how pyspark interacts with the py4j JVM (e.g., capturing
stdout/stderr).
:return:
"""
if "PYSPARK_GATEWAY_PORT" in os.environ:
gateway_port = int(os.environ["PYSPARK_GATEWAY_PORT"])
gateway_secret = os.environ["PYSPARK_GATEWAY_SECRET"]
# Process already exists
proc = None
else:
SPARK_HOME = _find_spark_home()
# Launch the Py4j gateway using Spark's run command so that we pick up the
# proper classpath and settings from spark-env.sh
on_windows = platform.system() == "Windows"
script = "./bin/spark-submit.cmd" if on_windows else "./bin/spark-submit"
command = [os.path.join(SPARK_HOME, script)]
if conf:
for k, v in conf.getAll():
command += ['--conf', '%s=%s' % (k, v)]
submit_args = os.environ.get("PYSPARK_SUBMIT_ARGS", "pyspark-shell")
if os.environ.get("SPARK_TESTING"):
submit_args = ' '.join([
"--conf spark.ui.enabled=false",
submit_args
])
command = command + shlex.split(submit_args)
# Create a temporary directory where the gateway server should write the connection
# information.
conn_info_dir = tempfile.mkdtemp()
try:
fd, conn_info_file = tempfile.mkstemp(dir=conn_info_dir)
os.close(fd)
os.unlink(conn_info_file)
env = dict(os.environ)
env["_PYSPARK_DRIVER_CONN_INFO_PATH"] = conn_info_file
# Launch the Java gateway.
popen_kwargs = {} if popen_kwargs is None else popen_kwargs
# We open a pipe to stdin so that the Java gateway can die when the pipe is broken
popen_kwargs['stdin'] = PIPE
# We always set the necessary environment variables.
popen_kwargs['env'] = env
if not on_windows:
# Don't send ctrl-c / SIGINT to the Java gateway:
def preexec_func():
signal.signal(signal.SIGINT, signal.SIG_IGN)
popen_kwargs['preexec_fn'] = preexec_func
proc = Popen(command, **popen_kwargs)
else:
# preexec_fn not supported on Windows
proc = Popen(command, **popen_kwargs)
# Wait for the file to appear, or for the process to exit, whichever happens first.
while not proc.poll() and not os.path.isfile(conn_info_file):
time.sleep(0.1)
if not os.path.isfile(conn_info_file):
raise Exception("Java gateway process exited before sending its port number")
with open(conn_info_file, "rb") as info:
gateway_port = read_int(info)
gateway_secret = UTF8Deserializer().loads(info)
finally:
shutil.rmtree(conn_info_dir)
# In Windows, ensure the Java child processes do not linger after Python has exited.
# In UNIX-based systems, the child process can kill itself on broken pipe (i.e. when
# the parent process' stdin sends an EOF). In Windows, however, this is not possible
# because java.lang.Process reads directly from the parent process' stdin, contending
# with any opportunity to read an EOF from the parent. Note that this is only best
# effort and will not take effect if the python process is violently terminated.
if on_windows:
# In Windows, the child process here is "spark-submit.cmd", not the JVM itself
# (because the UNIX "exec" command is not available). This means we cannot simply
# call proc.kill(), which kills only the "spark-submit.cmd" process but not the
# JVMs. Instead, we use "taskkill" with the tree-kill option "/t" to terminate all
# child processes in the tree (http://technet.microsoft.com/en-us/library/bb491009.aspx)
def killChild():
Popen(["cmd", "/c", "taskkill", "/f", "/t", "/pid", str(proc.pid)])
atexit.register(killChild)
# Connect to the gateway
gateway = JavaGateway(
gateway_parameters=GatewayParameters(port=gateway_port, auth_token=gateway_secret,
auto_convert=True))
# Store a reference to the Popen object for use by the caller (e.g., in reading stdout/stderr)
gateway.proc = proc
# Import the classes used by PySpark
java_import(gateway.jvm, "org.apache.spark.SparkConf")
java_import(gateway.jvm, "org.apache.spark.api.java.*")
java_import(gateway.jvm, "org.apache.spark.api.python.*")
java_import(gateway.jvm, "org.apache.spark.ml.python.*")
java_import(gateway.jvm, "org.apache.spark.mllib.api.python.*")
# TODO(davies): move into sql
java_import(gateway.jvm, "org.apache.spark.sql.*")
java_import(gateway.jvm, "org.apache.spark.sql.api.python.*")
java_import(gateway.jvm, "org.apache.spark.sql.hive.*")
java_import(gateway.jvm, "scala.Tuple2")
return gateway | def launch_gateway(conf=None, popen_kwargs=None):
"""
launch jvm gateway
:param conf: spark configuration passed to spark-submit
:param popen_kwargs: Dictionary of kwargs to pass to Popen when spawning
the py4j JVM. This is a developer feature intended for use in
customizing how pyspark interacts with the py4j JVM (e.g., capturing
stdout/stderr).
:return:
"""
if "PYSPARK_GATEWAY_PORT" in os.environ:
gateway_port = int(os.environ["PYSPARK_GATEWAY_PORT"])
gateway_secret = os.environ["PYSPARK_GATEWAY_SECRET"]
# Process already exists
proc = None
else:
SPARK_HOME = _find_spark_home()
# Launch the Py4j gateway using Spark's run command so that we pick up the
# proper classpath and settings from spark-env.sh
on_windows = platform.system() == "Windows"
script = "./bin/spark-submit.cmd" if on_windows else "./bin/spark-submit"
command = [os.path.join(SPARK_HOME, script)]
if conf:
for k, v in conf.getAll():
command += ['--conf', '%s=%s' % (k, v)]
submit_args = os.environ.get("PYSPARK_SUBMIT_ARGS", "pyspark-shell")
if os.environ.get("SPARK_TESTING"):
submit_args = ' '.join([
"--conf spark.ui.enabled=false",
submit_args
])
command = command + shlex.split(submit_args)
# Create a temporary directory where the gateway server should write the connection
# information.
conn_info_dir = tempfile.mkdtemp()
try:
fd, conn_info_file = tempfile.mkstemp(dir=conn_info_dir)
os.close(fd)
os.unlink(conn_info_file)
env = dict(os.environ)
env["_PYSPARK_DRIVER_CONN_INFO_PATH"] = conn_info_file
# Launch the Java gateway.
popen_kwargs = {} if popen_kwargs is None else popen_kwargs
# We open a pipe to stdin so that the Java gateway can die when the pipe is broken
popen_kwargs['stdin'] = PIPE
# We always set the necessary environment variables.
popen_kwargs['env'] = env
if not on_windows:
# Don't send ctrl-c / SIGINT to the Java gateway:
def preexec_func():
signal.signal(signal.SIGINT, signal.SIG_IGN)
popen_kwargs['preexec_fn'] = preexec_func
proc = Popen(command, **popen_kwargs)
else:
# preexec_fn not supported on Windows
proc = Popen(command, **popen_kwargs)
# Wait for the file to appear, or for the process to exit, whichever happens first.
while not proc.poll() and not os.path.isfile(conn_info_file):
time.sleep(0.1)
if not os.path.isfile(conn_info_file):
raise Exception("Java gateway process exited before sending its port number")
with open(conn_info_file, "rb") as info:
gateway_port = read_int(info)
gateway_secret = UTF8Deserializer().loads(info)
finally:
shutil.rmtree(conn_info_dir)
# In Windows, ensure the Java child processes do not linger after Python has exited.
# In UNIX-based systems, the child process can kill itself on broken pipe (i.e. when
# the parent process' stdin sends an EOF). In Windows, however, this is not possible
# because java.lang.Process reads directly from the parent process' stdin, contending
# with any opportunity to read an EOF from the parent. Note that this is only best
# effort and will not take effect if the python process is violently terminated.
if on_windows:
# In Windows, the child process here is "spark-submit.cmd", not the JVM itself
# (because the UNIX "exec" command is not available). This means we cannot simply
# call proc.kill(), which kills only the "spark-submit.cmd" process but not the
# JVMs. Instead, we use "taskkill" with the tree-kill option "/t" to terminate all
# child processes in the tree (http://technet.microsoft.com/en-us/library/bb491009.aspx)
def killChild():
Popen(["cmd", "/c", "taskkill", "/f", "/t", "/pid", str(proc.pid)])
atexit.register(killChild)
# Connect to the gateway
gateway = JavaGateway(
gateway_parameters=GatewayParameters(port=gateway_port, auth_token=gateway_secret,
auto_convert=True))
# Store a reference to the Popen object for use by the caller (e.g., in reading stdout/stderr)
gateway.proc = proc
# Import the classes used by PySpark
java_import(gateway.jvm, "org.apache.spark.SparkConf")
java_import(gateway.jvm, "org.apache.spark.api.java.*")
java_import(gateway.jvm, "org.apache.spark.api.python.*")
java_import(gateway.jvm, "org.apache.spark.ml.python.*")
java_import(gateway.jvm, "org.apache.spark.mllib.api.python.*")
# TODO(davies): move into sql
java_import(gateway.jvm, "org.apache.spark.sql.*")
java_import(gateway.jvm, "org.apache.spark.sql.api.python.*")
java_import(gateway.jvm, "org.apache.spark.sql.hive.*")
java_import(gateway.jvm, "scala.Tuple2")
return gateway | [
"launch",
"jvm",
"gateway",
":",
"param",
"conf",
":",
"spark",
"configuration",
"passed",
"to",
"spark",
"-",
"submit",
":",
"param",
"popen_kwargs",
":",
"Dictionary",
"of",
"kwargs",
"to",
"pass",
"to",
"Popen",
"when",
"spawning",
"the",
"py4j",
"JVM",
".",
"This",
"is",
"a",
"developer",
"feature",
"intended",
"for",
"use",
"in",
"customizing",
"how",
"pyspark",
"interacts",
"with",
"the",
"py4j",
"JVM",
"(",
"e",
".",
"g",
".",
"capturing",
"stdout",
"/",
"stderr",
")",
".",
":",
"return",
":"
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/java_gateway.py#L39-L147 | [
"def",
"launch_gateway",
"(",
"conf",
"=",
"None",
",",
"popen_kwargs",
"=",
"None",
")",
":",
"if",
"\"PYSPARK_GATEWAY_PORT\"",
"in",
"os",
".",
"environ",
":",
"gateway_port",
"=",
"int",
"(",
"os",
".",
"environ",
"[",
"\"PYSPARK_GATEWAY_PORT\"",
"]",
")",
"gateway_secret",
"=",
"os",
".",
"environ",
"[",
"\"PYSPARK_GATEWAY_SECRET\"",
"]",
"# Process already exists",
"proc",
"=",
"None",
"else",
":",
"SPARK_HOME",
"=",
"_find_spark_home",
"(",
")",
"# Launch the Py4j gateway using Spark's run command so that we pick up the",
"# proper classpath and settings from spark-env.sh",
"on_windows",
"=",
"platform",
".",
"system",
"(",
")",
"==",
"\"Windows\"",
"script",
"=",
"\"./bin/spark-submit.cmd\"",
"if",
"on_windows",
"else",
"\"./bin/spark-submit\"",
"command",
"=",
"[",
"os",
".",
"path",
".",
"join",
"(",
"SPARK_HOME",
",",
"script",
")",
"]",
"if",
"conf",
":",
"for",
"k",
",",
"v",
"in",
"conf",
".",
"getAll",
"(",
")",
":",
"command",
"+=",
"[",
"'--conf'",
",",
"'%s=%s'",
"%",
"(",
"k",
",",
"v",
")",
"]",
"submit_args",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"\"PYSPARK_SUBMIT_ARGS\"",
",",
"\"pyspark-shell\"",
")",
"if",
"os",
".",
"environ",
".",
"get",
"(",
"\"SPARK_TESTING\"",
")",
":",
"submit_args",
"=",
"' '",
".",
"join",
"(",
"[",
"\"--conf spark.ui.enabled=false\"",
",",
"submit_args",
"]",
")",
"command",
"=",
"command",
"+",
"shlex",
".",
"split",
"(",
"submit_args",
")",
"# Create a temporary directory where the gateway server should write the connection",
"# information.",
"conn_info_dir",
"=",
"tempfile",
".",
"mkdtemp",
"(",
")",
"try",
":",
"fd",
",",
"conn_info_file",
"=",
"tempfile",
".",
"mkstemp",
"(",
"dir",
"=",
"conn_info_dir",
")",
"os",
".",
"close",
"(",
"fd",
")",
"os",
".",
"unlink",
"(",
"conn_info_file",
")",
"env",
"=",
"dict",
"(",
"os",
".",
"environ",
")",
"env",
"[",
"\"_PYSPARK_DRIVER_CONN_INFO_PATH\"",
"]",
"=",
"conn_info_file",
"# Launch the Java gateway.",
"popen_kwargs",
"=",
"{",
"}",
"if",
"popen_kwargs",
"is",
"None",
"else",
"popen_kwargs",
"# We open a pipe to stdin so that the Java gateway can die when the pipe is broken",
"popen_kwargs",
"[",
"'stdin'",
"]",
"=",
"PIPE",
"# We always set the necessary environment variables.",
"popen_kwargs",
"[",
"'env'",
"]",
"=",
"env",
"if",
"not",
"on_windows",
":",
"# Don't send ctrl-c / SIGINT to the Java gateway:",
"def",
"preexec_func",
"(",
")",
":",
"signal",
".",
"signal",
"(",
"signal",
".",
"SIGINT",
",",
"signal",
".",
"SIG_IGN",
")",
"popen_kwargs",
"[",
"'preexec_fn'",
"]",
"=",
"preexec_func",
"proc",
"=",
"Popen",
"(",
"command",
",",
"*",
"*",
"popen_kwargs",
")",
"else",
":",
"# preexec_fn not supported on Windows",
"proc",
"=",
"Popen",
"(",
"command",
",",
"*",
"*",
"popen_kwargs",
")",
"# Wait for the file to appear, or for the process to exit, whichever happens first.",
"while",
"not",
"proc",
".",
"poll",
"(",
")",
"and",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"conn_info_file",
")",
":",
"time",
".",
"sleep",
"(",
"0.1",
")",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"conn_info_file",
")",
":",
"raise",
"Exception",
"(",
"\"Java gateway process exited before sending its port number\"",
")",
"with",
"open",
"(",
"conn_info_file",
",",
"\"rb\"",
")",
"as",
"info",
":",
"gateway_port",
"=",
"read_int",
"(",
"info",
")",
"gateway_secret",
"=",
"UTF8Deserializer",
"(",
")",
".",
"loads",
"(",
"info",
")",
"finally",
":",
"shutil",
".",
"rmtree",
"(",
"conn_info_dir",
")",
"# In Windows, ensure the Java child processes do not linger after Python has exited.",
"# In UNIX-based systems, the child process can kill itself on broken pipe (i.e. when",
"# the parent process' stdin sends an EOF). In Windows, however, this is not possible",
"# because java.lang.Process reads directly from the parent process' stdin, contending",
"# with any opportunity to read an EOF from the parent. Note that this is only best",
"# effort and will not take effect if the python process is violently terminated.",
"if",
"on_windows",
":",
"# In Windows, the child process here is \"spark-submit.cmd\", not the JVM itself",
"# (because the UNIX \"exec\" command is not available). This means we cannot simply",
"# call proc.kill(), which kills only the \"spark-submit.cmd\" process but not the",
"# JVMs. Instead, we use \"taskkill\" with the tree-kill option \"/t\" to terminate all",
"# child processes in the tree (http://technet.microsoft.com/en-us/library/bb491009.aspx)",
"def",
"killChild",
"(",
")",
":",
"Popen",
"(",
"[",
"\"cmd\"",
",",
"\"/c\"",
",",
"\"taskkill\"",
",",
"\"/f\"",
",",
"\"/t\"",
",",
"\"/pid\"",
",",
"str",
"(",
"proc",
".",
"pid",
")",
"]",
")",
"atexit",
".",
"register",
"(",
"killChild",
")",
"# Connect to the gateway",
"gateway",
"=",
"JavaGateway",
"(",
"gateway_parameters",
"=",
"GatewayParameters",
"(",
"port",
"=",
"gateway_port",
",",
"auth_token",
"=",
"gateway_secret",
",",
"auto_convert",
"=",
"True",
")",
")",
"# Store a reference to the Popen object for use by the caller (e.g., in reading stdout/stderr)",
"gateway",
".",
"proc",
"=",
"proc",
"# Import the classes used by PySpark",
"java_import",
"(",
"gateway",
".",
"jvm",
",",
"\"org.apache.spark.SparkConf\"",
")",
"java_import",
"(",
"gateway",
".",
"jvm",
",",
"\"org.apache.spark.api.java.*\"",
")",
"java_import",
"(",
"gateway",
".",
"jvm",
",",
"\"org.apache.spark.api.python.*\"",
")",
"java_import",
"(",
"gateway",
".",
"jvm",
",",
"\"org.apache.spark.ml.python.*\"",
")",
"java_import",
"(",
"gateway",
".",
"jvm",
",",
"\"org.apache.spark.mllib.api.python.*\"",
")",
"# TODO(davies): move into sql",
"java_import",
"(",
"gateway",
".",
"jvm",
",",
"\"org.apache.spark.sql.*\"",
")",
"java_import",
"(",
"gateway",
".",
"jvm",
",",
"\"org.apache.spark.sql.api.python.*\"",
")",
"java_import",
"(",
"gateway",
".",
"jvm",
",",
"\"org.apache.spark.sql.hive.*\"",
")",
"java_import",
"(",
"gateway",
".",
"jvm",
",",
"\"scala.Tuple2\"",
")",
"return",
"gateway"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | _do_server_auth | Performs the authentication protocol defined by the SocketAuthHelper class on the given
file-like object 'conn'. | python/pyspark/java_gateway.py | def _do_server_auth(conn, auth_secret):
"""
Performs the authentication protocol defined by the SocketAuthHelper class on the given
file-like object 'conn'.
"""
write_with_length(auth_secret.encode("utf-8"), conn)
conn.flush()
reply = UTF8Deserializer().loads(conn)
if reply != "ok":
conn.close()
raise Exception("Unexpected reply from iterator server.") | def _do_server_auth(conn, auth_secret):
"""
Performs the authentication protocol defined by the SocketAuthHelper class on the given
file-like object 'conn'.
"""
write_with_length(auth_secret.encode("utf-8"), conn)
conn.flush()
reply = UTF8Deserializer().loads(conn)
if reply != "ok":
conn.close()
raise Exception("Unexpected reply from iterator server.") | [
"Performs",
"the",
"authentication",
"protocol",
"defined",
"by",
"the",
"SocketAuthHelper",
"class",
"on",
"the",
"given",
"file",
"-",
"like",
"object",
"conn",
"."
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/java_gateway.py#L150-L160 | [
"def",
"_do_server_auth",
"(",
"conn",
",",
"auth_secret",
")",
":",
"write_with_length",
"(",
"auth_secret",
".",
"encode",
"(",
"\"utf-8\"",
")",
",",
"conn",
")",
"conn",
".",
"flush",
"(",
")",
"reply",
"=",
"UTF8Deserializer",
"(",
")",
".",
"loads",
"(",
"conn",
")",
"if",
"reply",
"!=",
"\"ok\"",
":",
"conn",
".",
"close",
"(",
")",
"raise",
"Exception",
"(",
"\"Unexpected reply from iterator server.\"",
")"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | local_connect_and_auth | Connect to local host, authenticate with it, and return a (sockfile,sock) for that connection.
Handles IPV4 & IPV6, does some error handling.
:param port
:param auth_secret
:return: a tuple with (sockfile, sock) | python/pyspark/java_gateway.py | def local_connect_and_auth(port, auth_secret):
"""
Connect to local host, authenticate with it, and return a (sockfile,sock) for that connection.
Handles IPV4 & IPV6, does some error handling.
:param port
:param auth_secret
:return: a tuple with (sockfile, sock)
"""
sock = None
errors = []
# Support for both IPv4 and IPv6.
# On most of IPv6-ready systems, IPv6 will take precedence.
for res in socket.getaddrinfo("127.0.0.1", port, socket.AF_UNSPEC, socket.SOCK_STREAM):
af, socktype, proto, _, sa = res
try:
sock = socket.socket(af, socktype, proto)
sock.settimeout(15)
sock.connect(sa)
sockfile = sock.makefile("rwb", 65536)
_do_server_auth(sockfile, auth_secret)
return (sockfile, sock)
except socket.error as e:
emsg = _exception_message(e)
errors.append("tried to connect to %s, but an error occured: %s" % (sa, emsg))
sock.close()
sock = None
raise Exception("could not open socket: %s" % errors) | def local_connect_and_auth(port, auth_secret):
"""
Connect to local host, authenticate with it, and return a (sockfile,sock) for that connection.
Handles IPV4 & IPV6, does some error handling.
:param port
:param auth_secret
:return: a tuple with (sockfile, sock)
"""
sock = None
errors = []
# Support for both IPv4 and IPv6.
# On most of IPv6-ready systems, IPv6 will take precedence.
for res in socket.getaddrinfo("127.0.0.1", port, socket.AF_UNSPEC, socket.SOCK_STREAM):
af, socktype, proto, _, sa = res
try:
sock = socket.socket(af, socktype, proto)
sock.settimeout(15)
sock.connect(sa)
sockfile = sock.makefile("rwb", 65536)
_do_server_auth(sockfile, auth_secret)
return (sockfile, sock)
except socket.error as e:
emsg = _exception_message(e)
errors.append("tried to connect to %s, but an error occured: %s" % (sa, emsg))
sock.close()
sock = None
raise Exception("could not open socket: %s" % errors) | [
"Connect",
"to",
"local",
"host",
"authenticate",
"with",
"it",
"and",
"return",
"a",
"(",
"sockfile",
"sock",
")",
"for",
"that",
"connection",
".",
"Handles",
"IPV4",
"&",
"IPV6",
"does",
"some",
"error",
"handling",
".",
":",
"param",
"port",
":",
"param",
"auth_secret",
":",
"return",
":",
"a",
"tuple",
"with",
"(",
"sockfile",
"sock",
")"
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/java_gateway.py#L163-L189 | [
"def",
"local_connect_and_auth",
"(",
"port",
",",
"auth_secret",
")",
":",
"sock",
"=",
"None",
"errors",
"=",
"[",
"]",
"# Support for both IPv4 and IPv6.",
"# On most of IPv6-ready systems, IPv6 will take precedence.",
"for",
"res",
"in",
"socket",
".",
"getaddrinfo",
"(",
"\"127.0.0.1\"",
",",
"port",
",",
"socket",
".",
"AF_UNSPEC",
",",
"socket",
".",
"SOCK_STREAM",
")",
":",
"af",
",",
"socktype",
",",
"proto",
",",
"_",
",",
"sa",
"=",
"res",
"try",
":",
"sock",
"=",
"socket",
".",
"socket",
"(",
"af",
",",
"socktype",
",",
"proto",
")",
"sock",
".",
"settimeout",
"(",
"15",
")",
"sock",
".",
"connect",
"(",
"sa",
")",
"sockfile",
"=",
"sock",
".",
"makefile",
"(",
"\"rwb\"",
",",
"65536",
")",
"_do_server_auth",
"(",
"sockfile",
",",
"auth_secret",
")",
"return",
"(",
"sockfile",
",",
"sock",
")",
"except",
"socket",
".",
"error",
"as",
"e",
":",
"emsg",
"=",
"_exception_message",
"(",
"e",
")",
"errors",
".",
"append",
"(",
"\"tried to connect to %s, but an error occured: %s\"",
"%",
"(",
"sa",
",",
"emsg",
")",
")",
"sock",
".",
"close",
"(",
")",
"sock",
"=",
"None",
"raise",
"Exception",
"(",
"\"could not open socket: %s\"",
"%",
"errors",
")"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | ensure_callback_server_started | Start callback server if not already started. The callback server is needed if the Java
driver process needs to callback into the Python driver process to execute Python code. | python/pyspark/java_gateway.py | def ensure_callback_server_started(gw):
"""
Start callback server if not already started. The callback server is needed if the Java
driver process needs to callback into the Python driver process to execute Python code.
"""
# getattr will fallback to JVM, so we cannot test by hasattr()
if "_callback_server" not in gw.__dict__ or gw._callback_server is None:
gw.callback_server_parameters.eager_load = True
gw.callback_server_parameters.daemonize = True
gw.callback_server_parameters.daemonize_connections = True
gw.callback_server_parameters.port = 0
gw.start_callback_server(gw.callback_server_parameters)
cbport = gw._callback_server.server_socket.getsockname()[1]
gw._callback_server.port = cbport
# gateway with real port
gw._python_proxy_port = gw._callback_server.port
# get the GatewayServer object in JVM by ID
jgws = JavaObject("GATEWAY_SERVER", gw._gateway_client)
# update the port of CallbackClient with real port
jgws.resetCallbackClient(jgws.getCallbackClient().getAddress(), gw._python_proxy_port) | def ensure_callback_server_started(gw):
"""
Start callback server if not already started. The callback server is needed if the Java
driver process needs to callback into the Python driver process to execute Python code.
"""
# getattr will fallback to JVM, so we cannot test by hasattr()
if "_callback_server" not in gw.__dict__ or gw._callback_server is None:
gw.callback_server_parameters.eager_load = True
gw.callback_server_parameters.daemonize = True
gw.callback_server_parameters.daemonize_connections = True
gw.callback_server_parameters.port = 0
gw.start_callback_server(gw.callback_server_parameters)
cbport = gw._callback_server.server_socket.getsockname()[1]
gw._callback_server.port = cbport
# gateway with real port
gw._python_proxy_port = gw._callback_server.port
# get the GatewayServer object in JVM by ID
jgws = JavaObject("GATEWAY_SERVER", gw._gateway_client)
# update the port of CallbackClient with real port
jgws.resetCallbackClient(jgws.getCallbackClient().getAddress(), gw._python_proxy_port) | [
"Start",
"callback",
"server",
"if",
"not",
"already",
"started",
".",
"The",
"callback",
"server",
"is",
"needed",
"if",
"the",
"Java",
"driver",
"process",
"needs",
"to",
"callback",
"into",
"the",
"Python",
"driver",
"process",
"to",
"execute",
"Python",
"code",
"."
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/java_gateway.py#L192-L212 | [
"def",
"ensure_callback_server_started",
"(",
"gw",
")",
":",
"# getattr will fallback to JVM, so we cannot test by hasattr()",
"if",
"\"_callback_server\"",
"not",
"in",
"gw",
".",
"__dict__",
"or",
"gw",
".",
"_callback_server",
"is",
"None",
":",
"gw",
".",
"callback_server_parameters",
".",
"eager_load",
"=",
"True",
"gw",
".",
"callback_server_parameters",
".",
"daemonize",
"=",
"True",
"gw",
".",
"callback_server_parameters",
".",
"daemonize_connections",
"=",
"True",
"gw",
".",
"callback_server_parameters",
".",
"port",
"=",
"0",
"gw",
".",
"start_callback_server",
"(",
"gw",
".",
"callback_server_parameters",
")",
"cbport",
"=",
"gw",
".",
"_callback_server",
".",
"server_socket",
".",
"getsockname",
"(",
")",
"[",
"1",
"]",
"gw",
".",
"_callback_server",
".",
"port",
"=",
"cbport",
"# gateway with real port",
"gw",
".",
"_python_proxy_port",
"=",
"gw",
".",
"_callback_server",
".",
"port",
"# get the GatewayServer object in JVM by ID",
"jgws",
"=",
"JavaObject",
"(",
"\"GATEWAY_SERVER\"",
",",
"gw",
".",
"_gateway_client",
")",
"# update the port of CallbackClient with real port",
"jgws",
".",
"resetCallbackClient",
"(",
"jgws",
".",
"getCallbackClient",
"(",
")",
".",
"getAddress",
"(",
")",
",",
"gw",
".",
"_python_proxy_port",
")"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | _find_spark_home | Find the SPARK_HOME. | python/pyspark/find_spark_home.py | def _find_spark_home():
"""Find the SPARK_HOME."""
# If the environment has SPARK_HOME set trust it.
if "SPARK_HOME" in os.environ:
return os.environ["SPARK_HOME"]
def is_spark_home(path):
"""Takes a path and returns true if the provided path could be a reasonable SPARK_HOME"""
return (os.path.isfile(os.path.join(path, "bin/spark-submit")) and
(os.path.isdir(os.path.join(path, "jars")) or
os.path.isdir(os.path.join(path, "assembly"))))
paths = ["../", os.path.dirname(os.path.realpath(__file__))]
# Add the path of the PySpark module if it exists
if sys.version < "3":
import imp
try:
module_home = imp.find_module("pyspark")[1]
paths.append(module_home)
# If we are installed in edit mode also look two dirs up
paths.append(os.path.join(module_home, "../../"))
except ImportError:
# Not pip installed no worries
pass
else:
from importlib.util import find_spec
try:
module_home = os.path.dirname(find_spec("pyspark").origin)
paths.append(module_home)
# If we are installed in edit mode also look two dirs up
paths.append(os.path.join(module_home, "../../"))
except ImportError:
# Not pip installed no worries
pass
# Normalize the paths
paths = [os.path.abspath(p) for p in paths]
try:
return next(path for path in paths if is_spark_home(path))
except StopIteration:
print("Could not find valid SPARK_HOME while searching {0}".format(paths), file=sys.stderr)
sys.exit(-1) | def _find_spark_home():
"""Find the SPARK_HOME."""
# If the environment has SPARK_HOME set trust it.
if "SPARK_HOME" in os.environ:
return os.environ["SPARK_HOME"]
def is_spark_home(path):
"""Takes a path and returns true if the provided path could be a reasonable SPARK_HOME"""
return (os.path.isfile(os.path.join(path, "bin/spark-submit")) and
(os.path.isdir(os.path.join(path, "jars")) or
os.path.isdir(os.path.join(path, "assembly"))))
paths = ["../", os.path.dirname(os.path.realpath(__file__))]
# Add the path of the PySpark module if it exists
if sys.version < "3":
import imp
try:
module_home = imp.find_module("pyspark")[1]
paths.append(module_home)
# If we are installed in edit mode also look two dirs up
paths.append(os.path.join(module_home, "../../"))
except ImportError:
# Not pip installed no worries
pass
else:
from importlib.util import find_spec
try:
module_home = os.path.dirname(find_spec("pyspark").origin)
paths.append(module_home)
# If we are installed in edit mode also look two dirs up
paths.append(os.path.join(module_home, "../../"))
except ImportError:
# Not pip installed no worries
pass
# Normalize the paths
paths = [os.path.abspath(p) for p in paths]
try:
return next(path for path in paths if is_spark_home(path))
except StopIteration:
print("Could not find valid SPARK_HOME while searching {0}".format(paths), file=sys.stderr)
sys.exit(-1) | [
"Find",
"the",
"SPARK_HOME",
"."
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/find_spark_home.py#L28-L71 | [
"def",
"_find_spark_home",
"(",
")",
":",
"# If the environment has SPARK_HOME set trust it.",
"if",
"\"SPARK_HOME\"",
"in",
"os",
".",
"environ",
":",
"return",
"os",
".",
"environ",
"[",
"\"SPARK_HOME\"",
"]",
"def",
"is_spark_home",
"(",
"path",
")",
":",
"\"\"\"Takes a path and returns true if the provided path could be a reasonable SPARK_HOME\"\"\"",
"return",
"(",
"os",
".",
"path",
".",
"isfile",
"(",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"\"bin/spark-submit\"",
")",
")",
"and",
"(",
"os",
".",
"path",
".",
"isdir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"\"jars\"",
")",
")",
"or",
"os",
".",
"path",
".",
"isdir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"\"assembly\"",
")",
")",
")",
")",
"paths",
"=",
"[",
"\"../\"",
",",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"realpath",
"(",
"__file__",
")",
")",
"]",
"# Add the path of the PySpark module if it exists",
"if",
"sys",
".",
"version",
"<",
"\"3\"",
":",
"import",
"imp",
"try",
":",
"module_home",
"=",
"imp",
".",
"find_module",
"(",
"\"pyspark\"",
")",
"[",
"1",
"]",
"paths",
".",
"append",
"(",
"module_home",
")",
"# If we are installed in edit mode also look two dirs up",
"paths",
".",
"append",
"(",
"os",
".",
"path",
".",
"join",
"(",
"module_home",
",",
"\"../../\"",
")",
")",
"except",
"ImportError",
":",
"# Not pip installed no worries",
"pass",
"else",
":",
"from",
"importlib",
".",
"util",
"import",
"find_spec",
"try",
":",
"module_home",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"find_spec",
"(",
"\"pyspark\"",
")",
".",
"origin",
")",
"paths",
".",
"append",
"(",
"module_home",
")",
"# If we are installed in edit mode also look two dirs up",
"paths",
".",
"append",
"(",
"os",
".",
"path",
".",
"join",
"(",
"module_home",
",",
"\"../../\"",
")",
")",
"except",
"ImportError",
":",
"# Not pip installed no worries",
"pass",
"# Normalize the paths",
"paths",
"=",
"[",
"os",
".",
"path",
".",
"abspath",
"(",
"p",
")",
"for",
"p",
"in",
"paths",
"]",
"try",
":",
"return",
"next",
"(",
"path",
"for",
"path",
"in",
"paths",
"if",
"is_spark_home",
"(",
"path",
")",
")",
"except",
"StopIteration",
":",
"print",
"(",
"\"Could not find valid SPARK_HOME while searching {0}\"",
".",
"format",
"(",
"paths",
")",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"sys",
".",
"exit",
"(",
"-",
"1",
")"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | computeContribs | Calculates URL contributions to the rank of other URLs. | examples/src/main/python/pagerank.py | def computeContribs(urls, rank):
"""Calculates URL contributions to the rank of other URLs."""
num_urls = len(urls)
for url in urls:
yield (url, rank / num_urls) | def computeContribs(urls, rank):
"""Calculates URL contributions to the rank of other URLs."""
num_urls = len(urls)
for url in urls:
yield (url, rank / num_urls) | [
"Calculates",
"URL",
"contributions",
"to",
"the",
"rank",
"of",
"other",
"URLs",
"."
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/examples/src/main/python/pagerank.py#L34-L38 | [
"def",
"computeContribs",
"(",
"urls",
",",
"rank",
")",
":",
"num_urls",
"=",
"len",
"(",
"urls",
")",
"for",
"url",
"in",
"urls",
":",
"yield",
"(",
"url",
",",
"rank",
"/",
"num_urls",
")"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | GaussianMixtureModel.summary | Gets summary (e.g. cluster assignments, cluster sizes) of the model trained on the
training set. An exception is thrown if no summary exists. | python/pyspark/ml/clustering.py | def summary(self):
"""
Gets summary (e.g. cluster assignments, cluster sizes) of the model trained on the
training set. An exception is thrown if no summary exists.
"""
if self.hasSummary:
return GaussianMixtureSummary(super(GaussianMixtureModel, self).summary)
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__) | def summary(self):
"""
Gets summary (e.g. cluster assignments, cluster sizes) of the model trained on the
training set. An exception is thrown if no summary exists.
"""
if self.hasSummary:
return GaussianMixtureSummary(super(GaussianMixtureModel, self).summary)
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__) | [
"Gets",
"summary",
"(",
"e",
".",
"g",
".",
"cluster",
"assignments",
"cluster",
"sizes",
")",
"of",
"the",
"model",
"trained",
"on",
"the",
"training",
"set",
".",
"An",
"exception",
"is",
"thrown",
"if",
"no",
"summary",
"exists",
"."
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/clustering.py#L129-L138 | [
"def",
"summary",
"(",
"self",
")",
":",
"if",
"self",
".",
"hasSummary",
":",
"return",
"GaussianMixtureSummary",
"(",
"super",
"(",
"GaussianMixtureModel",
",",
"self",
")",
".",
"summary",
")",
"else",
":",
"raise",
"RuntimeError",
"(",
"\"No training summary available for this %s\"",
"%",
"self",
".",
"__class__",
".",
"__name__",
")"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | KMeansModel.summary | Gets summary (e.g. cluster assignments, cluster sizes) of the model trained on the
training set. An exception is thrown if no summary exists. | python/pyspark/ml/clustering.py | def summary(self):
"""
Gets summary (e.g. cluster assignments, cluster sizes) of the model trained on the
training set. An exception is thrown if no summary exists.
"""
if self.hasSummary:
return KMeansSummary(super(KMeansModel, self).summary)
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__) | def summary(self):
"""
Gets summary (e.g. cluster assignments, cluster sizes) of the model trained on the
training set. An exception is thrown if no summary exists.
"""
if self.hasSummary:
return KMeansSummary(super(KMeansModel, self).summary)
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__) | [
"Gets",
"summary",
"(",
"e",
".",
"g",
".",
"cluster",
"assignments",
"cluster",
"sizes",
")",
"of",
"the",
"model",
"trained",
"on",
"the",
"training",
"set",
".",
"An",
"exception",
"is",
"thrown",
"if",
"no",
"summary",
"exists",
"."
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/clustering.py#L331-L340 | [
"def",
"summary",
"(",
"self",
")",
":",
"if",
"self",
".",
"hasSummary",
":",
"return",
"KMeansSummary",
"(",
"super",
"(",
"KMeansModel",
",",
"self",
")",
".",
"summary",
")",
"else",
":",
"raise",
"RuntimeError",
"(",
"\"No training summary available for this %s\"",
"%",
"self",
".",
"__class__",
".",
"__name__",
")"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | BisectingKMeansModel.summary | Gets summary (e.g. cluster assignments, cluster sizes) of the model trained on the
training set. An exception is thrown if no summary exists. | python/pyspark/ml/clustering.py | def summary(self):
"""
Gets summary (e.g. cluster assignments, cluster sizes) of the model trained on the
training set. An exception is thrown if no summary exists.
"""
if self.hasSummary:
return BisectingKMeansSummary(super(BisectingKMeansModel, self).summary)
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__) | def summary(self):
"""
Gets summary (e.g. cluster assignments, cluster sizes) of the model trained on the
training set. An exception is thrown if no summary exists.
"""
if self.hasSummary:
return BisectingKMeansSummary(super(BisectingKMeansModel, self).summary)
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__) | [
"Gets",
"summary",
"(",
"e",
".",
"g",
".",
"cluster",
"assignments",
"cluster",
"sizes",
")",
"of",
"the",
"model",
"trained",
"on",
"the",
"training",
"set",
".",
"An",
"exception",
"is",
"thrown",
"if",
"no",
"summary",
"exists",
"."
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/clustering.py#L522-L531 | [
"def",
"summary",
"(",
"self",
")",
":",
"if",
"self",
".",
"hasSummary",
":",
"return",
"BisectingKMeansSummary",
"(",
"super",
"(",
"BisectingKMeansModel",
",",
"self",
")",
".",
"summary",
")",
"else",
":",
"raise",
"RuntimeError",
"(",
"\"No training summary available for this %s\"",
"%",
"self",
".",
"__class__",
".",
"__name__",
")"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | _ImageSchema.imageSchema | Returns the image schema.
:return: a :class:`StructType` with a single column of images
named "image" (nullable) and having the same type returned by :meth:`columnSchema`.
.. versionadded:: 2.3.0 | python/pyspark/ml/image.py | def imageSchema(self):
"""
Returns the image schema.
:return: a :class:`StructType` with a single column of images
named "image" (nullable) and having the same type returned by :meth:`columnSchema`.
.. versionadded:: 2.3.0
"""
if self._imageSchema is None:
ctx = SparkContext._active_spark_context
jschema = ctx._jvm.org.apache.spark.ml.image.ImageSchema.imageSchema()
self._imageSchema = _parse_datatype_json_string(jschema.json())
return self._imageSchema | def imageSchema(self):
"""
Returns the image schema.
:return: a :class:`StructType` with a single column of images
named "image" (nullable) and having the same type returned by :meth:`columnSchema`.
.. versionadded:: 2.3.0
"""
if self._imageSchema is None:
ctx = SparkContext._active_spark_context
jschema = ctx._jvm.org.apache.spark.ml.image.ImageSchema.imageSchema()
self._imageSchema = _parse_datatype_json_string(jschema.json())
return self._imageSchema | [
"Returns",
"the",
"image",
"schema",
"."
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/image.py#L55-L69 | [
"def",
"imageSchema",
"(",
"self",
")",
":",
"if",
"self",
".",
"_imageSchema",
"is",
"None",
":",
"ctx",
"=",
"SparkContext",
".",
"_active_spark_context",
"jschema",
"=",
"ctx",
".",
"_jvm",
".",
"org",
".",
"apache",
".",
"spark",
".",
"ml",
".",
"image",
".",
"ImageSchema",
".",
"imageSchema",
"(",
")",
"self",
".",
"_imageSchema",
"=",
"_parse_datatype_json_string",
"(",
"jschema",
".",
"json",
"(",
")",
")",
"return",
"self",
".",
"_imageSchema"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | _ImageSchema.ocvTypes | Returns the OpenCV type mapping supported.
:return: a dictionary containing the OpenCV type mapping supported.
.. versionadded:: 2.3.0 | python/pyspark/ml/image.py | def ocvTypes(self):
"""
Returns the OpenCV type mapping supported.
:return: a dictionary containing the OpenCV type mapping supported.
.. versionadded:: 2.3.0
"""
if self._ocvTypes is None:
ctx = SparkContext._active_spark_context
self._ocvTypes = dict(ctx._jvm.org.apache.spark.ml.image.ImageSchema.javaOcvTypes())
return self._ocvTypes | def ocvTypes(self):
"""
Returns the OpenCV type mapping supported.
:return: a dictionary containing the OpenCV type mapping supported.
.. versionadded:: 2.3.0
"""
if self._ocvTypes is None:
ctx = SparkContext._active_spark_context
self._ocvTypes = dict(ctx._jvm.org.apache.spark.ml.image.ImageSchema.javaOcvTypes())
return self._ocvTypes | [
"Returns",
"the",
"OpenCV",
"type",
"mapping",
"supported",
"."
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/image.py#L72-L84 | [
"def",
"ocvTypes",
"(",
"self",
")",
":",
"if",
"self",
".",
"_ocvTypes",
"is",
"None",
":",
"ctx",
"=",
"SparkContext",
".",
"_active_spark_context",
"self",
".",
"_ocvTypes",
"=",
"dict",
"(",
"ctx",
".",
"_jvm",
".",
"org",
".",
"apache",
".",
"spark",
".",
"ml",
".",
"image",
".",
"ImageSchema",
".",
"javaOcvTypes",
"(",
")",
")",
"return",
"self",
".",
"_ocvTypes"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | _ImageSchema.columnSchema | Returns the schema for the image column.
:return: a :class:`StructType` for image column,
``struct<origin:string, height:int, width:int, nChannels:int, mode:int, data:binary>``.
.. versionadded:: 2.4.0 | python/pyspark/ml/image.py | def columnSchema(self):
"""
Returns the schema for the image column.
:return: a :class:`StructType` for image column,
``struct<origin:string, height:int, width:int, nChannels:int, mode:int, data:binary>``.
.. versionadded:: 2.4.0
"""
if self._columnSchema is None:
ctx = SparkContext._active_spark_context
jschema = ctx._jvm.org.apache.spark.ml.image.ImageSchema.columnSchema()
self._columnSchema = _parse_datatype_json_string(jschema.json())
return self._columnSchema | def columnSchema(self):
"""
Returns the schema for the image column.
:return: a :class:`StructType` for image column,
``struct<origin:string, height:int, width:int, nChannels:int, mode:int, data:binary>``.
.. versionadded:: 2.4.0
"""
if self._columnSchema is None:
ctx = SparkContext._active_spark_context
jschema = ctx._jvm.org.apache.spark.ml.image.ImageSchema.columnSchema()
self._columnSchema = _parse_datatype_json_string(jschema.json())
return self._columnSchema | [
"Returns",
"the",
"schema",
"for",
"the",
"image",
"column",
"."
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/image.py#L87-L101 | [
"def",
"columnSchema",
"(",
"self",
")",
":",
"if",
"self",
".",
"_columnSchema",
"is",
"None",
":",
"ctx",
"=",
"SparkContext",
".",
"_active_spark_context",
"jschema",
"=",
"ctx",
".",
"_jvm",
".",
"org",
".",
"apache",
".",
"spark",
".",
"ml",
".",
"image",
".",
"ImageSchema",
".",
"columnSchema",
"(",
")",
"self",
".",
"_columnSchema",
"=",
"_parse_datatype_json_string",
"(",
"jschema",
".",
"json",
"(",
")",
")",
"return",
"self",
".",
"_columnSchema"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | _ImageSchema.imageFields | Returns field names of image columns.
:return: a list of field names.
.. versionadded:: 2.3.0 | python/pyspark/ml/image.py | def imageFields(self):
"""
Returns field names of image columns.
:return: a list of field names.
.. versionadded:: 2.3.0
"""
if self._imageFields is None:
ctx = SparkContext._active_spark_context
self._imageFields = list(ctx._jvm.org.apache.spark.ml.image.ImageSchema.imageFields())
return self._imageFields | def imageFields(self):
"""
Returns field names of image columns.
:return: a list of field names.
.. versionadded:: 2.3.0
"""
if self._imageFields is None:
ctx = SparkContext._active_spark_context
self._imageFields = list(ctx._jvm.org.apache.spark.ml.image.ImageSchema.imageFields())
return self._imageFields | [
"Returns",
"field",
"names",
"of",
"image",
"columns",
"."
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/image.py#L104-L116 | [
"def",
"imageFields",
"(",
"self",
")",
":",
"if",
"self",
".",
"_imageFields",
"is",
"None",
":",
"ctx",
"=",
"SparkContext",
".",
"_active_spark_context",
"self",
".",
"_imageFields",
"=",
"list",
"(",
"ctx",
".",
"_jvm",
".",
"org",
".",
"apache",
".",
"spark",
".",
"ml",
".",
"image",
".",
"ImageSchema",
".",
"imageFields",
"(",
")",
")",
"return",
"self",
".",
"_imageFields"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | _ImageSchema.undefinedImageType | Returns the name of undefined image type for the invalid image.
.. versionadded:: 2.3.0 | python/pyspark/ml/image.py | def undefinedImageType(self):
"""
Returns the name of undefined image type for the invalid image.
.. versionadded:: 2.3.0
"""
if self._undefinedImageType is None:
ctx = SparkContext._active_spark_context
self._undefinedImageType = \
ctx._jvm.org.apache.spark.ml.image.ImageSchema.undefinedImageType()
return self._undefinedImageType | def undefinedImageType(self):
"""
Returns the name of undefined image type for the invalid image.
.. versionadded:: 2.3.0
"""
if self._undefinedImageType is None:
ctx = SparkContext._active_spark_context
self._undefinedImageType = \
ctx._jvm.org.apache.spark.ml.image.ImageSchema.undefinedImageType()
return self._undefinedImageType | [
"Returns",
"the",
"name",
"of",
"undefined",
"image",
"type",
"for",
"the",
"invalid",
"image",
"."
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/image.py#L119-L130 | [
"def",
"undefinedImageType",
"(",
"self",
")",
":",
"if",
"self",
".",
"_undefinedImageType",
"is",
"None",
":",
"ctx",
"=",
"SparkContext",
".",
"_active_spark_context",
"self",
".",
"_undefinedImageType",
"=",
"ctx",
".",
"_jvm",
".",
"org",
".",
"apache",
".",
"spark",
".",
"ml",
".",
"image",
".",
"ImageSchema",
".",
"undefinedImageType",
"(",
")",
"return",
"self",
".",
"_undefinedImageType"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | _ImageSchema.toNDArray | Converts an image to an array with metadata.
:param `Row` image: A row that contains the image to be converted. It should
have the attributes specified in `ImageSchema.imageSchema`.
:return: a `numpy.ndarray` that is an image.
.. versionadded:: 2.3.0 | python/pyspark/ml/image.py | def toNDArray(self, image):
"""
Converts an image to an array with metadata.
:param `Row` image: A row that contains the image to be converted. It should
have the attributes specified in `ImageSchema.imageSchema`.
:return: a `numpy.ndarray` that is an image.
.. versionadded:: 2.3.0
"""
if not isinstance(image, Row):
raise TypeError(
"image argument should be pyspark.sql.types.Row; however, "
"it got [%s]." % type(image))
if any(not hasattr(image, f) for f in self.imageFields):
raise ValueError(
"image argument should have attributes specified in "
"ImageSchema.imageSchema [%s]." % ", ".join(self.imageFields))
height = image.height
width = image.width
nChannels = image.nChannels
return np.ndarray(
shape=(height, width, nChannels),
dtype=np.uint8,
buffer=image.data,
strides=(width * nChannels, nChannels, 1)) | def toNDArray(self, image):
"""
Converts an image to an array with metadata.
:param `Row` image: A row that contains the image to be converted. It should
have the attributes specified in `ImageSchema.imageSchema`.
:return: a `numpy.ndarray` that is an image.
.. versionadded:: 2.3.0
"""
if not isinstance(image, Row):
raise TypeError(
"image argument should be pyspark.sql.types.Row; however, "
"it got [%s]." % type(image))
if any(not hasattr(image, f) for f in self.imageFields):
raise ValueError(
"image argument should have attributes specified in "
"ImageSchema.imageSchema [%s]." % ", ".join(self.imageFields))
height = image.height
width = image.width
nChannels = image.nChannels
return np.ndarray(
shape=(height, width, nChannels),
dtype=np.uint8,
buffer=image.data,
strides=(width * nChannels, nChannels, 1)) | [
"Converts",
"an",
"image",
"to",
"an",
"array",
"with",
"metadata",
"."
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/image.py#L132-L160 | [
"def",
"toNDArray",
"(",
"self",
",",
"image",
")",
":",
"if",
"not",
"isinstance",
"(",
"image",
",",
"Row",
")",
":",
"raise",
"TypeError",
"(",
"\"image argument should be pyspark.sql.types.Row; however, \"",
"\"it got [%s].\"",
"%",
"type",
"(",
"image",
")",
")",
"if",
"any",
"(",
"not",
"hasattr",
"(",
"image",
",",
"f",
")",
"for",
"f",
"in",
"self",
".",
"imageFields",
")",
":",
"raise",
"ValueError",
"(",
"\"image argument should have attributes specified in \"",
"\"ImageSchema.imageSchema [%s].\"",
"%",
"\", \"",
".",
"join",
"(",
"self",
".",
"imageFields",
")",
")",
"height",
"=",
"image",
".",
"height",
"width",
"=",
"image",
".",
"width",
"nChannels",
"=",
"image",
".",
"nChannels",
"return",
"np",
".",
"ndarray",
"(",
"shape",
"=",
"(",
"height",
",",
"width",
",",
"nChannels",
")",
",",
"dtype",
"=",
"np",
".",
"uint8",
",",
"buffer",
"=",
"image",
".",
"data",
",",
"strides",
"=",
"(",
"width",
"*",
"nChannels",
",",
"nChannels",
",",
"1",
")",
")"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | _ImageSchema.toImage | Converts an array with metadata to a two-dimensional image.
:param `numpy.ndarray` array: The array to convert to image.
:param str origin: Path to the image, optional.
:return: a :class:`Row` that is a two dimensional image.
.. versionadded:: 2.3.0 | python/pyspark/ml/image.py | def toImage(self, array, origin=""):
"""
Converts an array with metadata to a two-dimensional image.
:param `numpy.ndarray` array: The array to convert to image.
:param str origin: Path to the image, optional.
:return: a :class:`Row` that is a two dimensional image.
.. versionadded:: 2.3.0
"""
if not isinstance(array, np.ndarray):
raise TypeError(
"array argument should be numpy.ndarray; however, it got [%s]." % type(array))
if array.ndim != 3:
raise ValueError("Invalid array shape")
height, width, nChannels = array.shape
ocvTypes = ImageSchema.ocvTypes
if nChannels == 1:
mode = ocvTypes["CV_8UC1"]
elif nChannels == 3:
mode = ocvTypes["CV_8UC3"]
elif nChannels == 4:
mode = ocvTypes["CV_8UC4"]
else:
raise ValueError("Invalid number of channels")
# Running `bytearray(numpy.array([1]))` fails in specific Python versions
# with a specific Numpy version, for example in Python 3.6.0 and NumPy 1.13.3.
# Here, it avoids it by converting it to bytes.
if LooseVersion(np.__version__) >= LooseVersion('1.9'):
data = bytearray(array.astype(dtype=np.uint8).ravel().tobytes())
else:
# Numpy prior to 1.9 don't have `tobytes` method.
data = bytearray(array.astype(dtype=np.uint8).ravel())
# Creating new Row with _create_row(), because Row(name = value, ... )
# orders fields by name, which conflicts with expected schema order
# when the new DataFrame is created by UDF
return _create_row(self.imageFields,
[origin, height, width, nChannels, mode, data]) | def toImage(self, array, origin=""):
"""
Converts an array with metadata to a two-dimensional image.
:param `numpy.ndarray` array: The array to convert to image.
:param str origin: Path to the image, optional.
:return: a :class:`Row` that is a two dimensional image.
.. versionadded:: 2.3.0
"""
if not isinstance(array, np.ndarray):
raise TypeError(
"array argument should be numpy.ndarray; however, it got [%s]." % type(array))
if array.ndim != 3:
raise ValueError("Invalid array shape")
height, width, nChannels = array.shape
ocvTypes = ImageSchema.ocvTypes
if nChannels == 1:
mode = ocvTypes["CV_8UC1"]
elif nChannels == 3:
mode = ocvTypes["CV_8UC3"]
elif nChannels == 4:
mode = ocvTypes["CV_8UC4"]
else:
raise ValueError("Invalid number of channels")
# Running `bytearray(numpy.array([1]))` fails in specific Python versions
# with a specific Numpy version, for example in Python 3.6.0 and NumPy 1.13.3.
# Here, it avoids it by converting it to bytes.
if LooseVersion(np.__version__) >= LooseVersion('1.9'):
data = bytearray(array.astype(dtype=np.uint8).ravel().tobytes())
else:
# Numpy prior to 1.9 don't have `tobytes` method.
data = bytearray(array.astype(dtype=np.uint8).ravel())
# Creating new Row with _create_row(), because Row(name = value, ... )
# orders fields by name, which conflicts with expected schema order
# when the new DataFrame is created by UDF
return _create_row(self.imageFields,
[origin, height, width, nChannels, mode, data]) | [
"Converts",
"an",
"array",
"with",
"metadata",
"to",
"a",
"two",
"-",
"dimensional",
"image",
"."
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/image.py#L162-L204 | [
"def",
"toImage",
"(",
"self",
",",
"array",
",",
"origin",
"=",
"\"\"",
")",
":",
"if",
"not",
"isinstance",
"(",
"array",
",",
"np",
".",
"ndarray",
")",
":",
"raise",
"TypeError",
"(",
"\"array argument should be numpy.ndarray; however, it got [%s].\"",
"%",
"type",
"(",
"array",
")",
")",
"if",
"array",
".",
"ndim",
"!=",
"3",
":",
"raise",
"ValueError",
"(",
"\"Invalid array shape\"",
")",
"height",
",",
"width",
",",
"nChannels",
"=",
"array",
".",
"shape",
"ocvTypes",
"=",
"ImageSchema",
".",
"ocvTypes",
"if",
"nChannels",
"==",
"1",
":",
"mode",
"=",
"ocvTypes",
"[",
"\"CV_8UC1\"",
"]",
"elif",
"nChannels",
"==",
"3",
":",
"mode",
"=",
"ocvTypes",
"[",
"\"CV_8UC3\"",
"]",
"elif",
"nChannels",
"==",
"4",
":",
"mode",
"=",
"ocvTypes",
"[",
"\"CV_8UC4\"",
"]",
"else",
":",
"raise",
"ValueError",
"(",
"\"Invalid number of channels\"",
")",
"# Running `bytearray(numpy.array([1]))` fails in specific Python versions",
"# with a specific Numpy version, for example in Python 3.6.0 and NumPy 1.13.3.",
"# Here, it avoids it by converting it to bytes.",
"if",
"LooseVersion",
"(",
"np",
".",
"__version__",
")",
">=",
"LooseVersion",
"(",
"'1.9'",
")",
":",
"data",
"=",
"bytearray",
"(",
"array",
".",
"astype",
"(",
"dtype",
"=",
"np",
".",
"uint8",
")",
".",
"ravel",
"(",
")",
".",
"tobytes",
"(",
")",
")",
"else",
":",
"# Numpy prior to 1.9 don't have `tobytes` method.",
"data",
"=",
"bytearray",
"(",
"array",
".",
"astype",
"(",
"dtype",
"=",
"np",
".",
"uint8",
")",
".",
"ravel",
"(",
")",
")",
"# Creating new Row with _create_row(), because Row(name = value, ... )",
"# orders fields by name, which conflicts with expected schema order",
"# when the new DataFrame is created by UDF",
"return",
"_create_row",
"(",
"self",
".",
"imageFields",
",",
"[",
"origin",
",",
"height",
",",
"width",
",",
"nChannels",
",",
"mode",
",",
"data",
"]",
")"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | _ImageSchema.readImages | Reads the directory of images from the local or remote source.
.. note:: If multiple jobs are run in parallel with different sampleRatio or recursive flag,
there may be a race condition where one job overwrites the hadoop configs of another.
.. note:: If sample ratio is less than 1, sampling uses a PathFilter that is efficient but
potentially non-deterministic.
.. note:: Deprecated in 2.4.0. Use `spark.read.format("image").load(path)` instead and
this `readImages` will be removed in 3.0.0.
:param str path: Path to the image directory.
:param bool recursive: Recursive search flag.
:param int numPartitions: Number of DataFrame partitions.
:param bool dropImageFailures: Drop the files that are not valid images.
:param float sampleRatio: Fraction of the images loaded.
:param int seed: Random number seed.
:return: a :class:`DataFrame` with a single column of "images",
see ImageSchema for details.
>>> df = ImageSchema.readImages('data/mllib/images/origin/kittens', recursive=True)
>>> df.count()
5
.. versionadded:: 2.3.0 | python/pyspark/ml/image.py | def readImages(self, path, recursive=False, numPartitions=-1,
dropImageFailures=False, sampleRatio=1.0, seed=0):
"""
Reads the directory of images from the local or remote source.
.. note:: If multiple jobs are run in parallel with different sampleRatio or recursive flag,
there may be a race condition where one job overwrites the hadoop configs of another.
.. note:: If sample ratio is less than 1, sampling uses a PathFilter that is efficient but
potentially non-deterministic.
.. note:: Deprecated in 2.4.0. Use `spark.read.format("image").load(path)` instead and
this `readImages` will be removed in 3.0.0.
:param str path: Path to the image directory.
:param bool recursive: Recursive search flag.
:param int numPartitions: Number of DataFrame partitions.
:param bool dropImageFailures: Drop the files that are not valid images.
:param float sampleRatio: Fraction of the images loaded.
:param int seed: Random number seed.
:return: a :class:`DataFrame` with a single column of "images",
see ImageSchema for details.
>>> df = ImageSchema.readImages('data/mllib/images/origin/kittens', recursive=True)
>>> df.count()
5
.. versionadded:: 2.3.0
"""
warnings.warn("`ImageSchema.readImage` is deprecated. " +
"Use `spark.read.format(\"image\").load(path)` instead.", DeprecationWarning)
spark = SparkSession.builder.getOrCreate()
image_schema = spark._jvm.org.apache.spark.ml.image.ImageSchema
jsession = spark._jsparkSession
jresult = image_schema.readImages(path, jsession, recursive, numPartitions,
dropImageFailures, float(sampleRatio), seed)
return DataFrame(jresult, spark._wrapped) | def readImages(self, path, recursive=False, numPartitions=-1,
dropImageFailures=False, sampleRatio=1.0, seed=0):
"""
Reads the directory of images from the local or remote source.
.. note:: If multiple jobs are run in parallel with different sampleRatio or recursive flag,
there may be a race condition where one job overwrites the hadoop configs of another.
.. note:: If sample ratio is less than 1, sampling uses a PathFilter that is efficient but
potentially non-deterministic.
.. note:: Deprecated in 2.4.0. Use `spark.read.format("image").load(path)` instead and
this `readImages` will be removed in 3.0.0.
:param str path: Path to the image directory.
:param bool recursive: Recursive search flag.
:param int numPartitions: Number of DataFrame partitions.
:param bool dropImageFailures: Drop the files that are not valid images.
:param float sampleRatio: Fraction of the images loaded.
:param int seed: Random number seed.
:return: a :class:`DataFrame` with a single column of "images",
see ImageSchema for details.
>>> df = ImageSchema.readImages('data/mllib/images/origin/kittens', recursive=True)
>>> df.count()
5
.. versionadded:: 2.3.0
"""
warnings.warn("`ImageSchema.readImage` is deprecated. " +
"Use `spark.read.format(\"image\").load(path)` instead.", DeprecationWarning)
spark = SparkSession.builder.getOrCreate()
image_schema = spark._jvm.org.apache.spark.ml.image.ImageSchema
jsession = spark._jsparkSession
jresult = image_schema.readImages(path, jsession, recursive, numPartitions,
dropImageFailures, float(sampleRatio), seed)
return DataFrame(jresult, spark._wrapped) | [
"Reads",
"the",
"directory",
"of",
"images",
"from",
"the",
"local",
"or",
"remote",
"source",
"."
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/image.py#L206-L242 | [
"def",
"readImages",
"(",
"self",
",",
"path",
",",
"recursive",
"=",
"False",
",",
"numPartitions",
"=",
"-",
"1",
",",
"dropImageFailures",
"=",
"False",
",",
"sampleRatio",
"=",
"1.0",
",",
"seed",
"=",
"0",
")",
":",
"warnings",
".",
"warn",
"(",
"\"`ImageSchema.readImage` is deprecated. \"",
"+",
"\"Use `spark.read.format(\\\"image\\\").load(path)` instead.\"",
",",
"DeprecationWarning",
")",
"spark",
"=",
"SparkSession",
".",
"builder",
".",
"getOrCreate",
"(",
")",
"image_schema",
"=",
"spark",
".",
"_jvm",
".",
"org",
".",
"apache",
".",
"spark",
".",
"ml",
".",
"image",
".",
"ImageSchema",
"jsession",
"=",
"spark",
".",
"_jsparkSession",
"jresult",
"=",
"image_schema",
".",
"readImages",
"(",
"path",
",",
"jsession",
",",
"recursive",
",",
"numPartitions",
",",
"dropImageFailures",
",",
"float",
"(",
"sampleRatio",
")",
",",
"seed",
")",
"return",
"DataFrame",
"(",
"jresult",
",",
"spark",
".",
"_wrapped",
")"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | JavaWrapper._create_from_java_class | Construct this object from given Java classname and arguments | python/pyspark/ml/wrapper.py | def _create_from_java_class(cls, java_class, *args):
"""
Construct this object from given Java classname and arguments
"""
java_obj = JavaWrapper._new_java_obj(java_class, *args)
return cls(java_obj) | def _create_from_java_class(cls, java_class, *args):
"""
Construct this object from given Java classname and arguments
"""
java_obj = JavaWrapper._new_java_obj(java_class, *args)
return cls(java_obj) | [
"Construct",
"this",
"object",
"from",
"given",
"Java",
"classname",
"and",
"arguments"
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/wrapper.py#L44-L49 | [
"def",
"_create_from_java_class",
"(",
"cls",
",",
"java_class",
",",
"*",
"args",
")",
":",
"java_obj",
"=",
"JavaWrapper",
".",
"_new_java_obj",
"(",
"java_class",
",",
"*",
"args",
")",
"return",
"cls",
"(",
"java_obj",
")"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | JavaWrapper._new_java_array | Create a Java array of given java_class type. Useful for
calling a method with a Scala Array from Python with Py4J.
If the param pylist is a 2D array, then a 2D java array will be returned.
The returned 2D java array is a square, non-jagged 2D array that is big
enough for all elements. The empty slots in the inner Java arrays will
be filled with null to make the non-jagged 2D array.
:param pylist:
Python list to convert to a Java Array.
:param java_class:
Java class to specify the type of Array. Should be in the
form of sc._gateway.jvm.* (sc is a valid Spark Context).
:return:
Java Array of converted pylist.
Example primitive Java classes:
- basestring -> sc._gateway.jvm.java.lang.String
- int -> sc._gateway.jvm.java.lang.Integer
- float -> sc._gateway.jvm.java.lang.Double
- bool -> sc._gateway.jvm.java.lang.Boolean | python/pyspark/ml/wrapper.py | def _new_java_array(pylist, java_class):
"""
Create a Java array of given java_class type. Useful for
calling a method with a Scala Array from Python with Py4J.
If the param pylist is a 2D array, then a 2D java array will be returned.
The returned 2D java array is a square, non-jagged 2D array that is big
enough for all elements. The empty slots in the inner Java arrays will
be filled with null to make the non-jagged 2D array.
:param pylist:
Python list to convert to a Java Array.
:param java_class:
Java class to specify the type of Array. Should be in the
form of sc._gateway.jvm.* (sc is a valid Spark Context).
:return:
Java Array of converted pylist.
Example primitive Java classes:
- basestring -> sc._gateway.jvm.java.lang.String
- int -> sc._gateway.jvm.java.lang.Integer
- float -> sc._gateway.jvm.java.lang.Double
- bool -> sc._gateway.jvm.java.lang.Boolean
"""
sc = SparkContext._active_spark_context
java_array = None
if len(pylist) > 0 and isinstance(pylist[0], list):
# If pylist is a 2D array, then a 2D java array will be created.
# The 2D array is a square, non-jagged 2D array that is big enough for all elements.
inner_array_length = 0
for i in xrange(len(pylist)):
inner_array_length = max(inner_array_length, len(pylist[i]))
java_array = sc._gateway.new_array(java_class, len(pylist), inner_array_length)
for i in xrange(len(pylist)):
for j in xrange(len(pylist[i])):
java_array[i][j] = pylist[i][j]
else:
java_array = sc._gateway.new_array(java_class, len(pylist))
for i in xrange(len(pylist)):
java_array[i] = pylist[i]
return java_array | def _new_java_array(pylist, java_class):
"""
Create a Java array of given java_class type. Useful for
calling a method with a Scala Array from Python with Py4J.
If the param pylist is a 2D array, then a 2D java array will be returned.
The returned 2D java array is a square, non-jagged 2D array that is big
enough for all elements. The empty slots in the inner Java arrays will
be filled with null to make the non-jagged 2D array.
:param pylist:
Python list to convert to a Java Array.
:param java_class:
Java class to specify the type of Array. Should be in the
form of sc._gateway.jvm.* (sc is a valid Spark Context).
:return:
Java Array of converted pylist.
Example primitive Java classes:
- basestring -> sc._gateway.jvm.java.lang.String
- int -> sc._gateway.jvm.java.lang.Integer
- float -> sc._gateway.jvm.java.lang.Double
- bool -> sc._gateway.jvm.java.lang.Boolean
"""
sc = SparkContext._active_spark_context
java_array = None
if len(pylist) > 0 and isinstance(pylist[0], list):
# If pylist is a 2D array, then a 2D java array will be created.
# The 2D array is a square, non-jagged 2D array that is big enough for all elements.
inner_array_length = 0
for i in xrange(len(pylist)):
inner_array_length = max(inner_array_length, len(pylist[i]))
java_array = sc._gateway.new_array(java_class, len(pylist), inner_array_length)
for i in xrange(len(pylist)):
for j in xrange(len(pylist[i])):
java_array[i][j] = pylist[i][j]
else:
java_array = sc._gateway.new_array(java_class, len(pylist))
for i in xrange(len(pylist)):
java_array[i] = pylist[i]
return java_array | [
"Create",
"a",
"Java",
"array",
"of",
"given",
"java_class",
"type",
".",
"Useful",
"for",
"calling",
"a",
"method",
"with",
"a",
"Scala",
"Array",
"from",
"Python",
"with",
"Py4J",
".",
"If",
"the",
"param",
"pylist",
"is",
"a",
"2D",
"array",
"then",
"a",
"2D",
"java",
"array",
"will",
"be",
"returned",
".",
"The",
"returned",
"2D",
"java",
"array",
"is",
"a",
"square",
"non",
"-",
"jagged",
"2D",
"array",
"that",
"is",
"big",
"enough",
"for",
"all",
"elements",
".",
"The",
"empty",
"slots",
"in",
"the",
"inner",
"Java",
"arrays",
"will",
"be",
"filled",
"with",
"null",
"to",
"make",
"the",
"non",
"-",
"jagged",
"2D",
"array",
"."
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/wrapper.py#L70-L109 | [
"def",
"_new_java_array",
"(",
"pylist",
",",
"java_class",
")",
":",
"sc",
"=",
"SparkContext",
".",
"_active_spark_context",
"java_array",
"=",
"None",
"if",
"len",
"(",
"pylist",
")",
">",
"0",
"and",
"isinstance",
"(",
"pylist",
"[",
"0",
"]",
",",
"list",
")",
":",
"# If pylist is a 2D array, then a 2D java array will be created.",
"# The 2D array is a square, non-jagged 2D array that is big enough for all elements.",
"inner_array_length",
"=",
"0",
"for",
"i",
"in",
"xrange",
"(",
"len",
"(",
"pylist",
")",
")",
":",
"inner_array_length",
"=",
"max",
"(",
"inner_array_length",
",",
"len",
"(",
"pylist",
"[",
"i",
"]",
")",
")",
"java_array",
"=",
"sc",
".",
"_gateway",
".",
"new_array",
"(",
"java_class",
",",
"len",
"(",
"pylist",
")",
",",
"inner_array_length",
")",
"for",
"i",
"in",
"xrange",
"(",
"len",
"(",
"pylist",
")",
")",
":",
"for",
"j",
"in",
"xrange",
"(",
"len",
"(",
"pylist",
"[",
"i",
"]",
")",
")",
":",
"java_array",
"[",
"i",
"]",
"[",
"j",
"]",
"=",
"pylist",
"[",
"i",
"]",
"[",
"j",
"]",
"else",
":",
"java_array",
"=",
"sc",
".",
"_gateway",
".",
"new_array",
"(",
"java_class",
",",
"len",
"(",
"pylist",
")",
")",
"for",
"i",
"in",
"xrange",
"(",
"len",
"(",
"pylist",
")",
")",
":",
"java_array",
"[",
"i",
"]",
"=",
"pylist",
"[",
"i",
"]",
"return",
"java_array"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | _convert_epytext | >>> _convert_epytext("L{A}")
:class:`A` | python/docs/epytext.py | def _convert_epytext(line):
"""
>>> _convert_epytext("L{A}")
:class:`A`
"""
line = line.replace('@', ':')
for p, sub in RULES:
line = re.sub(p, sub, line)
return line | def _convert_epytext(line):
"""
>>> _convert_epytext("L{A}")
:class:`A`
"""
line = line.replace('@', ':')
for p, sub in RULES:
line = re.sub(p, sub, line)
return line | [
">>>",
"_convert_epytext",
"(",
"L",
"{",
"A",
"}",
")",
":",
"class",
":",
"A"
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/docs/epytext.py#L13-L21 | [
"def",
"_convert_epytext",
"(",
"line",
")",
":",
"line",
"=",
"line",
".",
"replace",
"(",
"'@'",
",",
"':'",
")",
"for",
"p",
",",
"sub",
"in",
"RULES",
":",
"line",
"=",
"re",
".",
"sub",
"(",
"p",
",",
"sub",
",",
"line",
")",
"return",
"line"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | rddToFileName | Return string prefix-time(.suffix)
>>> rddToFileName("spark", None, 12345678910)
'spark-12345678910'
>>> rddToFileName("spark", "tmp", 12345678910)
'spark-12345678910.tmp' | python/pyspark/streaming/util.py | def rddToFileName(prefix, suffix, timestamp):
"""
Return string prefix-time(.suffix)
>>> rddToFileName("spark", None, 12345678910)
'spark-12345678910'
>>> rddToFileName("spark", "tmp", 12345678910)
'spark-12345678910.tmp'
"""
if isinstance(timestamp, datetime):
seconds = time.mktime(timestamp.timetuple())
timestamp = int(seconds * 1000) + timestamp.microsecond // 1000
if suffix is None:
return prefix + "-" + str(timestamp)
else:
return prefix + "-" + str(timestamp) + "." + suffix | def rddToFileName(prefix, suffix, timestamp):
"""
Return string prefix-time(.suffix)
>>> rddToFileName("spark", None, 12345678910)
'spark-12345678910'
>>> rddToFileName("spark", "tmp", 12345678910)
'spark-12345678910.tmp'
"""
if isinstance(timestamp, datetime):
seconds = time.mktime(timestamp.timetuple())
timestamp = int(seconds * 1000) + timestamp.microsecond // 1000
if suffix is None:
return prefix + "-" + str(timestamp)
else:
return prefix + "-" + str(timestamp) + "." + suffix | [
"Return",
"string",
"prefix",
"-",
"time",
"(",
".",
"suffix",
")"
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/streaming/util.py#L138-L153 | [
"def",
"rddToFileName",
"(",
"prefix",
",",
"suffix",
",",
"timestamp",
")",
":",
"if",
"isinstance",
"(",
"timestamp",
",",
"datetime",
")",
":",
"seconds",
"=",
"time",
".",
"mktime",
"(",
"timestamp",
".",
"timetuple",
"(",
")",
")",
"timestamp",
"=",
"int",
"(",
"seconds",
"*",
"1000",
")",
"+",
"timestamp",
".",
"microsecond",
"//",
"1000",
"if",
"suffix",
"is",
"None",
":",
"return",
"prefix",
"+",
"\"-\"",
"+",
"str",
"(",
"timestamp",
")",
"else",
":",
"return",
"prefix",
"+",
"\"-\"",
"+",
"str",
"(",
"timestamp",
")",
"+",
"\".\"",
"+",
"suffix"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | ProfilerCollector.add_profiler | Add a profiler for RDD `id` | python/pyspark/profiler.py | def add_profiler(self, id, profiler):
""" Add a profiler for RDD `id` """
if not self.profilers:
if self.profile_dump_path:
atexit.register(self.dump_profiles, self.profile_dump_path)
else:
atexit.register(self.show_profiles)
self.profilers.append([id, profiler, False]) | def add_profiler(self, id, profiler):
""" Add a profiler for RDD `id` """
if not self.profilers:
if self.profile_dump_path:
atexit.register(self.dump_profiles, self.profile_dump_path)
else:
atexit.register(self.show_profiles)
self.profilers.append([id, profiler, False]) | [
"Add",
"a",
"profiler",
"for",
"RDD",
"id"
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/profiler.py#L43-L51 | [
"def",
"add_profiler",
"(",
"self",
",",
"id",
",",
"profiler",
")",
":",
"if",
"not",
"self",
".",
"profilers",
":",
"if",
"self",
".",
"profile_dump_path",
":",
"atexit",
".",
"register",
"(",
"self",
".",
"dump_profiles",
",",
"self",
".",
"profile_dump_path",
")",
"else",
":",
"atexit",
".",
"register",
"(",
"self",
".",
"show_profiles",
")",
"self",
".",
"profilers",
".",
"append",
"(",
"[",
"id",
",",
"profiler",
",",
"False",
"]",
")"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | ProfilerCollector.dump_profiles | Dump the profile stats into directory `path` | python/pyspark/profiler.py | def dump_profiles(self, path):
""" Dump the profile stats into directory `path` """
for id, profiler, _ in self.profilers:
profiler.dump(id, path)
self.profilers = [] | def dump_profiles(self, path):
""" Dump the profile stats into directory `path` """
for id, profiler, _ in self.profilers:
profiler.dump(id, path)
self.profilers = [] | [
"Dump",
"the",
"profile",
"stats",
"into",
"directory",
"path"
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/profiler.py#L53-L57 | [
"def",
"dump_profiles",
"(",
"self",
",",
"path",
")",
":",
"for",
"id",
",",
"profiler",
",",
"_",
"in",
"self",
".",
"profilers",
":",
"profiler",
".",
"dump",
"(",
"id",
",",
"path",
")",
"self",
".",
"profilers",
"=",
"[",
"]"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
train | ProfilerCollector.show_profiles | Print the profile stats to stdout | python/pyspark/profiler.py | def show_profiles(self):
""" Print the profile stats to stdout """
for i, (id, profiler, showed) in enumerate(self.profilers):
if not showed and profiler:
profiler.show(id)
# mark it as showed
self.profilers[i][2] = True | def show_profiles(self):
""" Print the profile stats to stdout """
for i, (id, profiler, showed) in enumerate(self.profilers):
if not showed and profiler:
profiler.show(id)
# mark it as showed
self.profilers[i][2] = True | [
"Print",
"the",
"profile",
"stats",
"to",
"stdout"
] | apache/spark | python | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/profiler.py#L59-L65 | [
"def",
"show_profiles",
"(",
"self",
")",
":",
"for",
"i",
",",
"(",
"id",
",",
"profiler",
",",
"showed",
")",
"in",
"enumerate",
"(",
"self",
".",
"profilers",
")",
":",
"if",
"not",
"showed",
"and",
"profiler",
":",
"profiler",
".",
"show",
"(",
"id",
")",
"# mark it as showed",
"self",
".",
"profilers",
"[",
"i",
"]",
"[",
"2",
"]",
"=",
"True"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.