id
int32 0
252k
| repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
|
|---|---|---|---|---|---|---|---|---|---|---|---|
19,200
|
apache/spark
|
python/pyspark/sql/functions.py
|
sequence
|
def sequence(start, stop, step=None):
"""
Generate a sequence of integers from `start` to `stop`, incrementing by `step`.
If `step` is not set, incrementing by 1 if `start` is less than or equal to `stop`,
otherwise -1.
>>> df1 = spark.createDataFrame([(-2, 2)], ('C1', 'C2'))
>>> df1.select(sequence('C1', 'C2').alias('r')).collect()
[Row(r=[-2, -1, 0, 1, 2])]
>>> df2 = spark.createDataFrame([(4, -4, -2)], ('C1', 'C2', 'C3'))
>>> df2.select(sequence('C1', 'C2', 'C3').alias('r')).collect()
[Row(r=[4, 2, 0, -2, -4])]
"""
sc = SparkContext._active_spark_context
if step is None:
return Column(sc._jvm.functions.sequence(_to_java_column(start), _to_java_column(stop)))
else:
return Column(sc._jvm.functions.sequence(
_to_java_column(start), _to_java_column(stop), _to_java_column(step)))
|
python
|
def sequence(start, stop, step=None):
"""
Generate a sequence of integers from `start` to `stop`, incrementing by `step`.
If `step` is not set, incrementing by 1 if `start` is less than or equal to `stop`,
otherwise -1.
>>> df1 = spark.createDataFrame([(-2, 2)], ('C1', 'C2'))
>>> df1.select(sequence('C1', 'C2').alias('r')).collect()
[Row(r=[-2, -1, 0, 1, 2])]
>>> df2 = spark.createDataFrame([(4, -4, -2)], ('C1', 'C2', 'C3'))
>>> df2.select(sequence('C1', 'C2', 'C3').alias('r')).collect()
[Row(r=[4, 2, 0, -2, -4])]
"""
sc = SparkContext._active_spark_context
if step is None:
return Column(sc._jvm.functions.sequence(_to_java_column(start), _to_java_column(stop)))
else:
return Column(sc._jvm.functions.sequence(
_to_java_column(start), _to_java_column(stop), _to_java_column(step)))
|
[
"def",
"sequence",
"(",
"start",
",",
"stop",
",",
"step",
"=",
"None",
")",
":",
"sc",
"=",
"SparkContext",
".",
"_active_spark_context",
"if",
"step",
"is",
"None",
":",
"return",
"Column",
"(",
"sc",
".",
"_jvm",
".",
"functions",
".",
"sequence",
"(",
"_to_java_column",
"(",
"start",
")",
",",
"_to_java_column",
"(",
"stop",
")",
")",
")",
"else",
":",
"return",
"Column",
"(",
"sc",
".",
"_jvm",
".",
"functions",
".",
"sequence",
"(",
"_to_java_column",
"(",
"start",
")",
",",
"_to_java_column",
"(",
"stop",
")",
",",
"_to_java_column",
"(",
"step",
")",
")",
")"
] |
Generate a sequence of integers from `start` to `stop`, incrementing by `step`.
If `step` is not set, incrementing by 1 if `start` is less than or equal to `stop`,
otherwise -1.
>>> df1 = spark.createDataFrame([(-2, 2)], ('C1', 'C2'))
>>> df1.select(sequence('C1', 'C2').alias('r')).collect()
[Row(r=[-2, -1, 0, 1, 2])]
>>> df2 = spark.createDataFrame([(4, -4, -2)], ('C1', 'C2', 'C3'))
>>> df2.select(sequence('C1', 'C2', 'C3').alias('r')).collect()
[Row(r=[4, 2, 0, -2, -4])]
|
[
"Generate",
"a",
"sequence",
"of",
"integers",
"from",
"start",
"to",
"stop",
"incrementing",
"by",
"step",
".",
"If",
"step",
"is",
"not",
"set",
"incrementing",
"by",
"1",
"if",
"start",
"is",
"less",
"than",
"or",
"equal",
"to",
"stop",
"otherwise",
"-",
"1",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/functions.py#L2739-L2757
|
19,201
|
apache/spark
|
python/pyspark/sql/functions.py
|
from_csv
|
def from_csv(col, schema, options={}):
"""
Parses a column containing a CSV string to a row with the specified schema.
Returns `null`, in the case of an unparseable string.
:param col: string column in CSV format
:param schema: a string with schema in DDL format to use when parsing the CSV column.
:param options: options to control parsing. accepts the same options as the CSV datasource
>>> data = [("1,2,3",)]
>>> df = spark.createDataFrame(data, ("value",))
>>> df.select(from_csv(df.value, "a INT, b INT, c INT").alias("csv")).collect()
[Row(csv=Row(a=1, b=2, c=3))]
>>> value = data[0][0]
>>> df.select(from_csv(df.value, schema_of_csv(value)).alias("csv")).collect()
[Row(csv=Row(_c0=1, _c1=2, _c2=3))]
"""
sc = SparkContext._active_spark_context
if isinstance(schema, basestring):
schema = _create_column_from_literal(schema)
elif isinstance(schema, Column):
schema = _to_java_column(schema)
else:
raise TypeError("schema argument should be a column or string")
jc = sc._jvm.functions.from_csv(_to_java_column(col), schema, options)
return Column(jc)
|
python
|
def from_csv(col, schema, options={}):
"""
Parses a column containing a CSV string to a row with the specified schema.
Returns `null`, in the case of an unparseable string.
:param col: string column in CSV format
:param schema: a string with schema in DDL format to use when parsing the CSV column.
:param options: options to control parsing. accepts the same options as the CSV datasource
>>> data = [("1,2,3",)]
>>> df = spark.createDataFrame(data, ("value",))
>>> df.select(from_csv(df.value, "a INT, b INT, c INT").alias("csv")).collect()
[Row(csv=Row(a=1, b=2, c=3))]
>>> value = data[0][0]
>>> df.select(from_csv(df.value, schema_of_csv(value)).alias("csv")).collect()
[Row(csv=Row(_c0=1, _c1=2, _c2=3))]
"""
sc = SparkContext._active_spark_context
if isinstance(schema, basestring):
schema = _create_column_from_literal(schema)
elif isinstance(schema, Column):
schema = _to_java_column(schema)
else:
raise TypeError("schema argument should be a column or string")
jc = sc._jvm.functions.from_csv(_to_java_column(col), schema, options)
return Column(jc)
|
[
"def",
"from_csv",
"(",
"col",
",",
"schema",
",",
"options",
"=",
"{",
"}",
")",
":",
"sc",
"=",
"SparkContext",
".",
"_active_spark_context",
"if",
"isinstance",
"(",
"schema",
",",
"basestring",
")",
":",
"schema",
"=",
"_create_column_from_literal",
"(",
"schema",
")",
"elif",
"isinstance",
"(",
"schema",
",",
"Column",
")",
":",
"schema",
"=",
"_to_java_column",
"(",
"schema",
")",
"else",
":",
"raise",
"TypeError",
"(",
"\"schema argument should be a column or string\"",
")",
"jc",
"=",
"sc",
".",
"_jvm",
".",
"functions",
".",
"from_csv",
"(",
"_to_java_column",
"(",
"col",
")",
",",
"schema",
",",
"options",
")",
"return",
"Column",
"(",
"jc",
")"
] |
Parses a column containing a CSV string to a row with the specified schema.
Returns `null`, in the case of an unparseable string.
:param col: string column in CSV format
:param schema: a string with schema in DDL format to use when parsing the CSV column.
:param options: options to control parsing. accepts the same options as the CSV datasource
>>> data = [("1,2,3",)]
>>> df = spark.createDataFrame(data, ("value",))
>>> df.select(from_csv(df.value, "a INT, b INT, c INT").alias("csv")).collect()
[Row(csv=Row(a=1, b=2, c=3))]
>>> value = data[0][0]
>>> df.select(from_csv(df.value, schema_of_csv(value)).alias("csv")).collect()
[Row(csv=Row(_c0=1, _c1=2, _c2=3))]
|
[
"Parses",
"a",
"column",
"containing",
"a",
"CSV",
"string",
"to",
"a",
"row",
"with",
"the",
"specified",
"schema",
".",
"Returns",
"null",
"in",
"the",
"case",
"of",
"an",
"unparseable",
"string",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/functions.py#L2762-L2789
|
19,202
|
apache/spark
|
python/pyspark/sql/readwriter.py
|
DataFrameReader.format
|
def format(self, source):
"""Specifies the input data source format.
:param source: string, name of the data source, e.g. 'json', 'parquet'.
>>> df = spark.read.format('json').load('python/test_support/sql/people.json')
>>> df.dtypes
[('age', 'bigint'), ('name', 'string')]
"""
self._jreader = self._jreader.format(source)
return self
|
python
|
def format(self, source):
"""Specifies the input data source format.
:param source: string, name of the data source, e.g. 'json', 'parquet'.
>>> df = spark.read.format('json').load('python/test_support/sql/people.json')
>>> df.dtypes
[('age', 'bigint'), ('name', 'string')]
"""
self._jreader = self._jreader.format(source)
return self
|
[
"def",
"format",
"(",
"self",
",",
"source",
")",
":",
"self",
".",
"_jreader",
"=",
"self",
".",
"_jreader",
".",
"format",
"(",
"source",
")",
"return",
"self"
] |
Specifies the input data source format.
:param source: string, name of the data source, e.g. 'json', 'parquet'.
>>> df = spark.read.format('json').load('python/test_support/sql/people.json')
>>> df.dtypes
[('age', 'bigint'), ('name', 'string')]
|
[
"Specifies",
"the",
"input",
"data",
"source",
"format",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/readwriter.py#L78-L89
|
19,203
|
apache/spark
|
python/pyspark/sql/readwriter.py
|
DataFrameReader.schema
|
def schema(self, schema):
"""Specifies the input schema.
Some data sources (e.g. JSON) can infer the input schema automatically from data.
By specifying the schema here, the underlying data source can skip the schema
inference step, and thus speed up data loading.
:param schema: a :class:`pyspark.sql.types.StructType` object or a DDL-formatted string
(For example ``col0 INT, col1 DOUBLE``).
>>> s = spark.read.schema("col0 INT, col1 DOUBLE")
"""
from pyspark.sql import SparkSession
spark = SparkSession.builder.getOrCreate()
if isinstance(schema, StructType):
jschema = spark._jsparkSession.parseDataType(schema.json())
self._jreader = self._jreader.schema(jschema)
elif isinstance(schema, basestring):
self._jreader = self._jreader.schema(schema)
else:
raise TypeError("schema should be StructType or string")
return self
|
python
|
def schema(self, schema):
"""Specifies the input schema.
Some data sources (e.g. JSON) can infer the input schema automatically from data.
By specifying the schema here, the underlying data source can skip the schema
inference step, and thus speed up data loading.
:param schema: a :class:`pyspark.sql.types.StructType` object or a DDL-formatted string
(For example ``col0 INT, col1 DOUBLE``).
>>> s = spark.read.schema("col0 INT, col1 DOUBLE")
"""
from pyspark.sql import SparkSession
spark = SparkSession.builder.getOrCreate()
if isinstance(schema, StructType):
jschema = spark._jsparkSession.parseDataType(schema.json())
self._jreader = self._jreader.schema(jschema)
elif isinstance(schema, basestring):
self._jreader = self._jreader.schema(schema)
else:
raise TypeError("schema should be StructType or string")
return self
|
[
"def",
"schema",
"(",
"self",
",",
"schema",
")",
":",
"from",
"pyspark",
".",
"sql",
"import",
"SparkSession",
"spark",
"=",
"SparkSession",
".",
"builder",
".",
"getOrCreate",
"(",
")",
"if",
"isinstance",
"(",
"schema",
",",
"StructType",
")",
":",
"jschema",
"=",
"spark",
".",
"_jsparkSession",
".",
"parseDataType",
"(",
"schema",
".",
"json",
"(",
")",
")",
"self",
".",
"_jreader",
"=",
"self",
".",
"_jreader",
".",
"schema",
"(",
"jschema",
")",
"elif",
"isinstance",
"(",
"schema",
",",
"basestring",
")",
":",
"self",
".",
"_jreader",
"=",
"self",
".",
"_jreader",
".",
"schema",
"(",
"schema",
")",
"else",
":",
"raise",
"TypeError",
"(",
"\"schema should be StructType or string\"",
")",
"return",
"self"
] |
Specifies the input schema.
Some data sources (e.g. JSON) can infer the input schema automatically from data.
By specifying the schema here, the underlying data source can skip the schema
inference step, and thus speed up data loading.
:param schema: a :class:`pyspark.sql.types.StructType` object or a DDL-formatted string
(For example ``col0 INT, col1 DOUBLE``).
>>> s = spark.read.schema("col0 INT, col1 DOUBLE")
|
[
"Specifies",
"the",
"input",
"schema",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/readwriter.py#L92-L113
|
19,204
|
apache/spark
|
python/pyspark/sql/readwriter.py
|
DataFrameReader.option
|
def option(self, key, value):
"""Adds an input option for the underlying data source.
You can set the following option(s) for reading files:
* ``timeZone``: sets the string that indicates a timezone to be used to parse timestamps
in the JSON/CSV datasources or partition values.
If it isn't set, it uses the default value, session local timezone.
"""
self._jreader = self._jreader.option(key, to_str(value))
return self
|
python
|
def option(self, key, value):
"""Adds an input option for the underlying data source.
You can set the following option(s) for reading files:
* ``timeZone``: sets the string that indicates a timezone to be used to parse timestamps
in the JSON/CSV datasources or partition values.
If it isn't set, it uses the default value, session local timezone.
"""
self._jreader = self._jreader.option(key, to_str(value))
return self
|
[
"def",
"option",
"(",
"self",
",",
"key",
",",
"value",
")",
":",
"self",
".",
"_jreader",
"=",
"self",
".",
"_jreader",
".",
"option",
"(",
"key",
",",
"to_str",
"(",
"value",
")",
")",
"return",
"self"
] |
Adds an input option for the underlying data source.
You can set the following option(s) for reading files:
* ``timeZone``: sets the string that indicates a timezone to be used to parse timestamps
in the JSON/CSV datasources or partition values.
If it isn't set, it uses the default value, session local timezone.
|
[
"Adds",
"an",
"input",
"option",
"for",
"the",
"underlying",
"data",
"source",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/readwriter.py#L116-L125
|
19,205
|
apache/spark
|
python/pyspark/sql/readwriter.py
|
DataFrameReader.options
|
def options(self, **options):
"""Adds input options for the underlying data source.
You can set the following option(s) for reading files:
* ``timeZone``: sets the string that indicates a timezone to be used to parse timestamps
in the JSON/CSV datasources or partition values.
If it isn't set, it uses the default value, session local timezone.
"""
for k in options:
self._jreader = self._jreader.option(k, to_str(options[k]))
return self
|
python
|
def options(self, **options):
"""Adds input options for the underlying data source.
You can set the following option(s) for reading files:
* ``timeZone``: sets the string that indicates a timezone to be used to parse timestamps
in the JSON/CSV datasources or partition values.
If it isn't set, it uses the default value, session local timezone.
"""
for k in options:
self._jreader = self._jreader.option(k, to_str(options[k]))
return self
|
[
"def",
"options",
"(",
"self",
",",
"*",
"*",
"options",
")",
":",
"for",
"k",
"in",
"options",
":",
"self",
".",
"_jreader",
"=",
"self",
".",
"_jreader",
".",
"option",
"(",
"k",
",",
"to_str",
"(",
"options",
"[",
"k",
"]",
")",
")",
"return",
"self"
] |
Adds input options for the underlying data source.
You can set the following option(s) for reading files:
* ``timeZone``: sets the string that indicates a timezone to be used to parse timestamps
in the JSON/CSV datasources or partition values.
If it isn't set, it uses the default value, session local timezone.
|
[
"Adds",
"input",
"options",
"for",
"the",
"underlying",
"data",
"source",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/readwriter.py#L128-L138
|
19,206
|
apache/spark
|
python/pyspark/sql/readwriter.py
|
DataFrameWriter.mode
|
def mode(self, saveMode):
"""Specifies the behavior when data or table already exists.
Options include:
* `append`: Append contents of this :class:`DataFrame` to existing data.
* `overwrite`: Overwrite existing data.
* `error` or `errorifexists`: Throw an exception if data already exists.
* `ignore`: Silently ignore this operation if data already exists.
>>> df.write.mode('append').parquet(os.path.join(tempfile.mkdtemp(), 'data'))
"""
# At the JVM side, the default value of mode is already set to "error".
# So, if the given saveMode is None, we will not call JVM-side's mode method.
if saveMode is not None:
self._jwrite = self._jwrite.mode(saveMode)
return self
|
python
|
def mode(self, saveMode):
"""Specifies the behavior when data or table already exists.
Options include:
* `append`: Append contents of this :class:`DataFrame` to existing data.
* `overwrite`: Overwrite existing data.
* `error` or `errorifexists`: Throw an exception if data already exists.
* `ignore`: Silently ignore this operation if data already exists.
>>> df.write.mode('append').parquet(os.path.join(tempfile.mkdtemp(), 'data'))
"""
# At the JVM side, the default value of mode is already set to "error".
# So, if the given saveMode is None, we will not call JVM-side's mode method.
if saveMode is not None:
self._jwrite = self._jwrite.mode(saveMode)
return self
|
[
"def",
"mode",
"(",
"self",
",",
"saveMode",
")",
":",
"# At the JVM side, the default value of mode is already set to \"error\".",
"# So, if the given saveMode is None, we will not call JVM-side's mode method.",
"if",
"saveMode",
"is",
"not",
"None",
":",
"self",
".",
"_jwrite",
"=",
"self",
".",
"_jwrite",
".",
"mode",
"(",
"saveMode",
")",
"return",
"self"
] |
Specifies the behavior when data or table already exists.
Options include:
* `append`: Append contents of this :class:`DataFrame` to existing data.
* `overwrite`: Overwrite existing data.
* `error` or `errorifexists`: Throw an exception if data already exists.
* `ignore`: Silently ignore this operation if data already exists.
>>> df.write.mode('append').parquet(os.path.join(tempfile.mkdtemp(), 'data'))
|
[
"Specifies",
"the",
"behavior",
"when",
"data",
"or",
"table",
"already",
"exists",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/readwriter.py#L590-L606
|
19,207
|
apache/spark
|
python/pyspark/sql/readwriter.py
|
DataFrameWriter.format
|
def format(self, source):
"""Specifies the underlying output data source.
:param source: string, name of the data source, e.g. 'json', 'parquet'.
>>> df.write.format('json').save(os.path.join(tempfile.mkdtemp(), 'data'))
"""
self._jwrite = self._jwrite.format(source)
return self
|
python
|
def format(self, source):
"""Specifies the underlying output data source.
:param source: string, name of the data source, e.g. 'json', 'parquet'.
>>> df.write.format('json').save(os.path.join(tempfile.mkdtemp(), 'data'))
"""
self._jwrite = self._jwrite.format(source)
return self
|
[
"def",
"format",
"(",
"self",
",",
"source",
")",
":",
"self",
".",
"_jwrite",
"=",
"self",
".",
"_jwrite",
".",
"format",
"(",
"source",
")",
"return",
"self"
] |
Specifies the underlying output data source.
:param source: string, name of the data source, e.g. 'json', 'parquet'.
>>> df.write.format('json').save(os.path.join(tempfile.mkdtemp(), 'data'))
|
[
"Specifies",
"the",
"underlying",
"output",
"data",
"source",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/readwriter.py#L609-L617
|
19,208
|
apache/spark
|
python/pyspark/sql/readwriter.py
|
DataFrameWriter.option
|
def option(self, key, value):
"""Adds an output option for the underlying data source.
You can set the following option(s) for writing files:
* ``timeZone``: sets the string that indicates a timezone to be used to format
timestamps in the JSON/CSV datasources or partition values.
If it isn't set, it uses the default value, session local timezone.
"""
self._jwrite = self._jwrite.option(key, to_str(value))
return self
|
python
|
def option(self, key, value):
"""Adds an output option for the underlying data source.
You can set the following option(s) for writing files:
* ``timeZone``: sets the string that indicates a timezone to be used to format
timestamps in the JSON/CSV datasources or partition values.
If it isn't set, it uses the default value, session local timezone.
"""
self._jwrite = self._jwrite.option(key, to_str(value))
return self
|
[
"def",
"option",
"(",
"self",
",",
"key",
",",
"value",
")",
":",
"self",
".",
"_jwrite",
"=",
"self",
".",
"_jwrite",
".",
"option",
"(",
"key",
",",
"to_str",
"(",
"value",
")",
")",
"return",
"self"
] |
Adds an output option for the underlying data source.
You can set the following option(s) for writing files:
* ``timeZone``: sets the string that indicates a timezone to be used to format
timestamps in the JSON/CSV datasources or partition values.
If it isn't set, it uses the default value, session local timezone.
|
[
"Adds",
"an",
"output",
"option",
"for",
"the",
"underlying",
"data",
"source",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/readwriter.py#L620-L629
|
19,209
|
apache/spark
|
python/pyspark/sql/readwriter.py
|
DataFrameWriter.options
|
def options(self, **options):
"""Adds output options for the underlying data source.
You can set the following option(s) for writing files:
* ``timeZone``: sets the string that indicates a timezone to be used to format
timestamps in the JSON/CSV datasources or partition values.
If it isn't set, it uses the default value, session local timezone.
"""
for k in options:
self._jwrite = self._jwrite.option(k, to_str(options[k]))
return self
|
python
|
def options(self, **options):
"""Adds output options for the underlying data source.
You can set the following option(s) for writing files:
* ``timeZone``: sets the string that indicates a timezone to be used to format
timestamps in the JSON/CSV datasources or partition values.
If it isn't set, it uses the default value, session local timezone.
"""
for k in options:
self._jwrite = self._jwrite.option(k, to_str(options[k]))
return self
|
[
"def",
"options",
"(",
"self",
",",
"*",
"*",
"options",
")",
":",
"for",
"k",
"in",
"options",
":",
"self",
".",
"_jwrite",
"=",
"self",
".",
"_jwrite",
".",
"option",
"(",
"k",
",",
"to_str",
"(",
"options",
"[",
"k",
"]",
")",
")",
"return",
"self"
] |
Adds output options for the underlying data source.
You can set the following option(s) for writing files:
* ``timeZone``: sets the string that indicates a timezone to be used to format
timestamps in the JSON/CSV datasources or partition values.
If it isn't set, it uses the default value, session local timezone.
|
[
"Adds",
"output",
"options",
"for",
"the",
"underlying",
"data",
"source",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/readwriter.py#L632-L642
|
19,210
|
apache/spark
|
python/pyspark/sql/readwriter.py
|
DataFrameWriter.partitionBy
|
def partitionBy(self, *cols):
"""Partitions the output by the given columns on the file system.
If specified, the output is laid out on the file system similar
to Hive's partitioning scheme.
:param cols: name of columns
>>> df.write.partitionBy('year', 'month').parquet(os.path.join(tempfile.mkdtemp(), 'data'))
"""
if len(cols) == 1 and isinstance(cols[0], (list, tuple)):
cols = cols[0]
self._jwrite = self._jwrite.partitionBy(_to_seq(self._spark._sc, cols))
return self
|
python
|
def partitionBy(self, *cols):
"""Partitions the output by the given columns on the file system.
If specified, the output is laid out on the file system similar
to Hive's partitioning scheme.
:param cols: name of columns
>>> df.write.partitionBy('year', 'month').parquet(os.path.join(tempfile.mkdtemp(), 'data'))
"""
if len(cols) == 1 and isinstance(cols[0], (list, tuple)):
cols = cols[0]
self._jwrite = self._jwrite.partitionBy(_to_seq(self._spark._sc, cols))
return self
|
[
"def",
"partitionBy",
"(",
"self",
",",
"*",
"cols",
")",
":",
"if",
"len",
"(",
"cols",
")",
"==",
"1",
"and",
"isinstance",
"(",
"cols",
"[",
"0",
"]",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"cols",
"=",
"cols",
"[",
"0",
"]",
"self",
".",
"_jwrite",
"=",
"self",
".",
"_jwrite",
".",
"partitionBy",
"(",
"_to_seq",
"(",
"self",
".",
"_spark",
".",
"_sc",
",",
"cols",
")",
")",
"return",
"self"
] |
Partitions the output by the given columns on the file system.
If specified, the output is laid out on the file system similar
to Hive's partitioning scheme.
:param cols: name of columns
>>> df.write.partitionBy('year', 'month').parquet(os.path.join(tempfile.mkdtemp(), 'data'))
|
[
"Partitions",
"the",
"output",
"by",
"the",
"given",
"columns",
"on",
"the",
"file",
"system",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/readwriter.py#L645-L658
|
19,211
|
apache/spark
|
python/pyspark/sql/readwriter.py
|
DataFrameWriter.sortBy
|
def sortBy(self, col, *cols):
"""Sorts the output in each bucket by the given columns on the file system.
:param col: a name of a column, or a list of names.
:param cols: additional names (optional). If `col` is a list it should be empty.
>>> (df.write.format('parquet') # doctest: +SKIP
... .bucketBy(100, 'year', 'month')
... .sortBy('day')
... .mode("overwrite")
... .saveAsTable('sorted_bucketed_table'))
"""
if isinstance(col, (list, tuple)):
if cols:
raise ValueError("col is a {0} but cols are not empty".format(type(col)))
col, cols = col[0], col[1:]
if not all(isinstance(c, basestring) for c in cols) or not(isinstance(col, basestring)):
raise TypeError("all names should be `str`")
self._jwrite = self._jwrite.sortBy(col, _to_seq(self._spark._sc, cols))
return self
|
python
|
def sortBy(self, col, *cols):
"""Sorts the output in each bucket by the given columns on the file system.
:param col: a name of a column, or a list of names.
:param cols: additional names (optional). If `col` is a list it should be empty.
>>> (df.write.format('parquet') # doctest: +SKIP
... .bucketBy(100, 'year', 'month')
... .sortBy('day')
... .mode("overwrite")
... .saveAsTable('sorted_bucketed_table'))
"""
if isinstance(col, (list, tuple)):
if cols:
raise ValueError("col is a {0} but cols are not empty".format(type(col)))
col, cols = col[0], col[1:]
if not all(isinstance(c, basestring) for c in cols) or not(isinstance(col, basestring)):
raise TypeError("all names should be `str`")
self._jwrite = self._jwrite.sortBy(col, _to_seq(self._spark._sc, cols))
return self
|
[
"def",
"sortBy",
"(",
"self",
",",
"col",
",",
"*",
"cols",
")",
":",
"if",
"isinstance",
"(",
"col",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"if",
"cols",
":",
"raise",
"ValueError",
"(",
"\"col is a {0} but cols are not empty\"",
".",
"format",
"(",
"type",
"(",
"col",
")",
")",
")",
"col",
",",
"cols",
"=",
"col",
"[",
"0",
"]",
",",
"col",
"[",
"1",
":",
"]",
"if",
"not",
"all",
"(",
"isinstance",
"(",
"c",
",",
"basestring",
")",
"for",
"c",
"in",
"cols",
")",
"or",
"not",
"(",
"isinstance",
"(",
"col",
",",
"basestring",
")",
")",
":",
"raise",
"TypeError",
"(",
"\"all names should be `str`\"",
")",
"self",
".",
"_jwrite",
"=",
"self",
".",
"_jwrite",
".",
"sortBy",
"(",
"col",
",",
"_to_seq",
"(",
"self",
".",
"_spark",
".",
"_sc",
",",
"cols",
")",
")",
"return",
"self"
] |
Sorts the output in each bucket by the given columns on the file system.
:param col: a name of a column, or a list of names.
:param cols: additional names (optional). If `col` is a list it should be empty.
>>> (df.write.format('parquet') # doctest: +SKIP
... .bucketBy(100, 'year', 'month')
... .sortBy('day')
... .mode("overwrite")
... .saveAsTable('sorted_bucketed_table'))
|
[
"Sorts",
"the",
"output",
"in",
"each",
"bucket",
"by",
"the",
"given",
"columns",
"on",
"the",
"file",
"system",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/readwriter.py#L693-L715
|
19,212
|
apache/spark
|
python/pyspark/sql/readwriter.py
|
DataFrameWriter.text
|
def text(self, path, compression=None, lineSep=None):
"""Saves the content of the DataFrame in a text file at the specified path.
The text files will be encoded as UTF-8.
:param path: the path in any Hadoop supported file system
:param compression: compression codec to use when saving to file. This can be one of the
known case-insensitive shorten names (none, bzip2, gzip, lz4,
snappy and deflate).
:param lineSep: defines the line separator that should be used for writing. If None is
set, it uses the default value, ``\\n``.
The DataFrame must have only one column that is of string type.
Each row becomes a new line in the output file.
"""
self._set_opts(compression=compression, lineSep=lineSep)
self._jwrite.text(path)
|
python
|
def text(self, path, compression=None, lineSep=None):
"""Saves the content of the DataFrame in a text file at the specified path.
The text files will be encoded as UTF-8.
:param path: the path in any Hadoop supported file system
:param compression: compression codec to use when saving to file. This can be one of the
known case-insensitive shorten names (none, bzip2, gzip, lz4,
snappy and deflate).
:param lineSep: defines the line separator that should be used for writing. If None is
set, it uses the default value, ``\\n``.
The DataFrame must have only one column that is of string type.
Each row becomes a new line in the output file.
"""
self._set_opts(compression=compression, lineSep=lineSep)
self._jwrite.text(path)
|
[
"def",
"text",
"(",
"self",
",",
"path",
",",
"compression",
"=",
"None",
",",
"lineSep",
"=",
"None",
")",
":",
"self",
".",
"_set_opts",
"(",
"compression",
"=",
"compression",
",",
"lineSep",
"=",
"lineSep",
")",
"self",
".",
"_jwrite",
".",
"text",
"(",
"path",
")"
] |
Saves the content of the DataFrame in a text file at the specified path.
The text files will be encoded as UTF-8.
:param path: the path in any Hadoop supported file system
:param compression: compression codec to use when saving to file. This can be one of the
known case-insensitive shorten names (none, bzip2, gzip, lz4,
snappy and deflate).
:param lineSep: defines the line separator that should be used for writing. If None is
set, it uses the default value, ``\\n``.
The DataFrame must have only one column that is of string type.
Each row becomes a new line in the output file.
|
[
"Saves",
"the",
"content",
"of",
"the",
"DataFrame",
"in",
"a",
"text",
"file",
"at",
"the",
"specified",
"path",
".",
"The",
"text",
"files",
"will",
"be",
"encoded",
"as",
"UTF",
"-",
"8",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/readwriter.py#L856-L871
|
19,213
|
apache/spark
|
dev/merge_spark_pr.py
|
choose_jira_assignee
|
def choose_jira_assignee(issue, asf_jira):
"""
Prompt the user to choose who to assign the issue to in jira, given a list of candidates,
including the original reporter and all commentors
"""
while True:
try:
reporter = issue.fields.reporter
commentors = map(lambda x: x.author, issue.fields.comment.comments)
candidates = set(commentors)
candidates.add(reporter)
candidates = list(candidates)
print("JIRA is unassigned, choose assignee")
for idx, author in enumerate(candidates):
if author.key == "apachespark":
continue
annotations = ["Reporter"] if author == reporter else []
if author in commentors:
annotations.append("Commentor")
print("[%d] %s (%s)" % (idx, author.displayName, ",".join(annotations)))
raw_assignee = input(
"Enter number of user, or userid, to assign to (blank to leave unassigned):")
if raw_assignee == "":
return None
else:
try:
id = int(raw_assignee)
assignee = candidates[id]
except:
# assume it's a user id, and try to assign (might fail, we just prompt again)
assignee = asf_jira.user(raw_assignee)
asf_jira.assign_issue(issue.key, assignee.key)
return assignee
except KeyboardInterrupt:
raise
except:
traceback.print_exc()
print("Error assigning JIRA, try again (or leave blank and fix manually)")
|
python
|
def choose_jira_assignee(issue, asf_jira):
"""
Prompt the user to choose who to assign the issue to in jira, given a list of candidates,
including the original reporter and all commentors
"""
while True:
try:
reporter = issue.fields.reporter
commentors = map(lambda x: x.author, issue.fields.comment.comments)
candidates = set(commentors)
candidates.add(reporter)
candidates = list(candidates)
print("JIRA is unassigned, choose assignee")
for idx, author in enumerate(candidates):
if author.key == "apachespark":
continue
annotations = ["Reporter"] if author == reporter else []
if author in commentors:
annotations.append("Commentor")
print("[%d] %s (%s)" % (idx, author.displayName, ",".join(annotations)))
raw_assignee = input(
"Enter number of user, or userid, to assign to (blank to leave unassigned):")
if raw_assignee == "":
return None
else:
try:
id = int(raw_assignee)
assignee = candidates[id]
except:
# assume it's a user id, and try to assign (might fail, we just prompt again)
assignee = asf_jira.user(raw_assignee)
asf_jira.assign_issue(issue.key, assignee.key)
return assignee
except KeyboardInterrupt:
raise
except:
traceback.print_exc()
print("Error assigning JIRA, try again (or leave blank and fix manually)")
|
[
"def",
"choose_jira_assignee",
"(",
"issue",
",",
"asf_jira",
")",
":",
"while",
"True",
":",
"try",
":",
"reporter",
"=",
"issue",
".",
"fields",
".",
"reporter",
"commentors",
"=",
"map",
"(",
"lambda",
"x",
":",
"x",
".",
"author",
",",
"issue",
".",
"fields",
".",
"comment",
".",
"comments",
")",
"candidates",
"=",
"set",
"(",
"commentors",
")",
"candidates",
".",
"add",
"(",
"reporter",
")",
"candidates",
"=",
"list",
"(",
"candidates",
")",
"print",
"(",
"\"JIRA is unassigned, choose assignee\"",
")",
"for",
"idx",
",",
"author",
"in",
"enumerate",
"(",
"candidates",
")",
":",
"if",
"author",
".",
"key",
"==",
"\"apachespark\"",
":",
"continue",
"annotations",
"=",
"[",
"\"Reporter\"",
"]",
"if",
"author",
"==",
"reporter",
"else",
"[",
"]",
"if",
"author",
"in",
"commentors",
":",
"annotations",
".",
"append",
"(",
"\"Commentor\"",
")",
"print",
"(",
"\"[%d] %s (%s)\"",
"%",
"(",
"idx",
",",
"author",
".",
"displayName",
",",
"\",\"",
".",
"join",
"(",
"annotations",
")",
")",
")",
"raw_assignee",
"=",
"input",
"(",
"\"Enter number of user, or userid, to assign to (blank to leave unassigned):\"",
")",
"if",
"raw_assignee",
"==",
"\"\"",
":",
"return",
"None",
"else",
":",
"try",
":",
"id",
"=",
"int",
"(",
"raw_assignee",
")",
"assignee",
"=",
"candidates",
"[",
"id",
"]",
"except",
":",
"# assume it's a user id, and try to assign (might fail, we just prompt again)",
"assignee",
"=",
"asf_jira",
".",
"user",
"(",
"raw_assignee",
")",
"asf_jira",
".",
"assign_issue",
"(",
"issue",
".",
"key",
",",
"assignee",
".",
"key",
")",
"return",
"assignee",
"except",
"KeyboardInterrupt",
":",
"raise",
"except",
":",
"traceback",
".",
"print_exc",
"(",
")",
"print",
"(",
"\"Error assigning JIRA, try again (or leave blank and fix manually)\"",
")"
] |
Prompt the user to choose who to assign the issue to in jira, given a list of candidates,
including the original reporter and all commentors
|
[
"Prompt",
"the",
"user",
"to",
"choose",
"who",
"to",
"assign",
"the",
"issue",
"to",
"in",
"jira",
"given",
"a",
"list",
"of",
"candidates",
"including",
"the",
"original",
"reporter",
"and",
"all",
"commentors"
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/dev/merge_spark_pr.py#L325-L362
|
19,214
|
apache/spark
|
python/pyspark/mllib/util.py
|
MLUtils._convert_labeled_point_to_libsvm
|
def _convert_labeled_point_to_libsvm(p):
"""Converts a LabeledPoint to a string in LIBSVM format."""
from pyspark.mllib.regression import LabeledPoint
assert isinstance(p, LabeledPoint)
items = [str(p.label)]
v = _convert_to_vector(p.features)
if isinstance(v, SparseVector):
nnz = len(v.indices)
for i in xrange(nnz):
items.append(str(v.indices[i] + 1) + ":" + str(v.values[i]))
else:
for i in xrange(len(v)):
items.append(str(i + 1) + ":" + str(v[i]))
return " ".join(items)
|
python
|
def _convert_labeled_point_to_libsvm(p):
"""Converts a LabeledPoint to a string in LIBSVM format."""
from pyspark.mllib.regression import LabeledPoint
assert isinstance(p, LabeledPoint)
items = [str(p.label)]
v = _convert_to_vector(p.features)
if isinstance(v, SparseVector):
nnz = len(v.indices)
for i in xrange(nnz):
items.append(str(v.indices[i] + 1) + ":" + str(v.values[i]))
else:
for i in xrange(len(v)):
items.append(str(i + 1) + ":" + str(v[i]))
return " ".join(items)
|
[
"def",
"_convert_labeled_point_to_libsvm",
"(",
"p",
")",
":",
"from",
"pyspark",
".",
"mllib",
".",
"regression",
"import",
"LabeledPoint",
"assert",
"isinstance",
"(",
"p",
",",
"LabeledPoint",
")",
"items",
"=",
"[",
"str",
"(",
"p",
".",
"label",
")",
"]",
"v",
"=",
"_convert_to_vector",
"(",
"p",
".",
"features",
")",
"if",
"isinstance",
"(",
"v",
",",
"SparseVector",
")",
":",
"nnz",
"=",
"len",
"(",
"v",
".",
"indices",
")",
"for",
"i",
"in",
"xrange",
"(",
"nnz",
")",
":",
"items",
".",
"append",
"(",
"str",
"(",
"v",
".",
"indices",
"[",
"i",
"]",
"+",
"1",
")",
"+",
"\":\"",
"+",
"str",
"(",
"v",
".",
"values",
"[",
"i",
"]",
")",
")",
"else",
":",
"for",
"i",
"in",
"xrange",
"(",
"len",
"(",
"v",
")",
")",
":",
"items",
".",
"append",
"(",
"str",
"(",
"i",
"+",
"1",
")",
"+",
"\":\"",
"+",
"str",
"(",
"v",
"[",
"i",
"]",
")",
")",
"return",
"\" \"",
".",
"join",
"(",
"items",
")"
] |
Converts a LabeledPoint to a string in LIBSVM format.
|
[
"Converts",
"a",
"LabeledPoint",
"to",
"a",
"string",
"in",
"LIBSVM",
"format",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/util.py#L56-L69
|
19,215
|
apache/spark
|
python/pyspark/mllib/util.py
|
MLUtils.saveAsLibSVMFile
|
def saveAsLibSVMFile(data, dir):
"""
Save labeled data in LIBSVM format.
:param data: an RDD of LabeledPoint to be saved
:param dir: directory to save the data
>>> from tempfile import NamedTemporaryFile
>>> from fileinput import input
>>> from pyspark.mllib.regression import LabeledPoint
>>> from glob import glob
>>> from pyspark.mllib.util import MLUtils
>>> examples = [LabeledPoint(1.1, Vectors.sparse(3, [(0, 1.23), (2, 4.56)])),
... LabeledPoint(0.0, Vectors.dense([1.01, 2.02, 3.03]))]
>>> tempFile = NamedTemporaryFile(delete=True)
>>> tempFile.close()
>>> MLUtils.saveAsLibSVMFile(sc.parallelize(examples), tempFile.name)
>>> ''.join(sorted(input(glob(tempFile.name + "/part-0000*"))))
'0.0 1:1.01 2:2.02 3:3.03\\n1.1 1:1.23 3:4.56\\n'
"""
lines = data.map(lambda p: MLUtils._convert_labeled_point_to_libsvm(p))
lines.saveAsTextFile(dir)
|
python
|
def saveAsLibSVMFile(data, dir):
"""
Save labeled data in LIBSVM format.
:param data: an RDD of LabeledPoint to be saved
:param dir: directory to save the data
>>> from tempfile import NamedTemporaryFile
>>> from fileinput import input
>>> from pyspark.mllib.regression import LabeledPoint
>>> from glob import glob
>>> from pyspark.mllib.util import MLUtils
>>> examples = [LabeledPoint(1.1, Vectors.sparse(3, [(0, 1.23), (2, 4.56)])),
... LabeledPoint(0.0, Vectors.dense([1.01, 2.02, 3.03]))]
>>> tempFile = NamedTemporaryFile(delete=True)
>>> tempFile.close()
>>> MLUtils.saveAsLibSVMFile(sc.parallelize(examples), tempFile.name)
>>> ''.join(sorted(input(glob(tempFile.name + "/part-0000*"))))
'0.0 1:1.01 2:2.02 3:3.03\\n1.1 1:1.23 3:4.56\\n'
"""
lines = data.map(lambda p: MLUtils._convert_labeled_point_to_libsvm(p))
lines.saveAsTextFile(dir)
|
[
"def",
"saveAsLibSVMFile",
"(",
"data",
",",
"dir",
")",
":",
"lines",
"=",
"data",
".",
"map",
"(",
"lambda",
"p",
":",
"MLUtils",
".",
"_convert_labeled_point_to_libsvm",
"(",
"p",
")",
")",
"lines",
".",
"saveAsTextFile",
"(",
"dir",
")"
] |
Save labeled data in LIBSVM format.
:param data: an RDD of LabeledPoint to be saved
:param dir: directory to save the data
>>> from tempfile import NamedTemporaryFile
>>> from fileinput import input
>>> from pyspark.mllib.regression import LabeledPoint
>>> from glob import glob
>>> from pyspark.mllib.util import MLUtils
>>> examples = [LabeledPoint(1.1, Vectors.sparse(3, [(0, 1.23), (2, 4.56)])),
... LabeledPoint(0.0, Vectors.dense([1.01, 2.02, 3.03]))]
>>> tempFile = NamedTemporaryFile(delete=True)
>>> tempFile.close()
>>> MLUtils.saveAsLibSVMFile(sc.parallelize(examples), tempFile.name)
>>> ''.join(sorted(input(glob(tempFile.name + "/part-0000*"))))
'0.0 1:1.01 2:2.02 3:3.03\\n1.1 1:1.23 3:4.56\\n'
|
[
"Save",
"labeled",
"data",
"in",
"LIBSVM",
"format",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/util.py#L126-L147
|
19,216
|
apache/spark
|
python/pyspark/mllib/util.py
|
MLUtils.loadLabeledPoints
|
def loadLabeledPoints(sc, path, minPartitions=None):
"""
Load labeled points saved using RDD.saveAsTextFile.
:param sc: Spark context
:param path: file or directory path in any Hadoop-supported file
system URI
:param minPartitions: min number of partitions
@return: labeled data stored as an RDD of LabeledPoint
>>> from tempfile import NamedTemporaryFile
>>> from pyspark.mllib.util import MLUtils
>>> from pyspark.mllib.regression import LabeledPoint
>>> examples = [LabeledPoint(1.1, Vectors.sparse(3, [(0, -1.23), (2, 4.56e-7)])),
... LabeledPoint(0.0, Vectors.dense([1.01, 2.02, 3.03]))]
>>> tempFile = NamedTemporaryFile(delete=True)
>>> tempFile.close()
>>> sc.parallelize(examples, 1).saveAsTextFile(tempFile.name)
>>> MLUtils.loadLabeledPoints(sc, tempFile.name).collect()
[LabeledPoint(1.1, (3,[0,2],[-1.23,4.56e-07])), LabeledPoint(0.0, [1.01,2.02,3.03])]
"""
minPartitions = minPartitions or min(sc.defaultParallelism, 2)
return callMLlibFunc("loadLabeledPoints", sc, path, minPartitions)
|
python
|
def loadLabeledPoints(sc, path, minPartitions=None):
"""
Load labeled points saved using RDD.saveAsTextFile.
:param sc: Spark context
:param path: file or directory path in any Hadoop-supported file
system URI
:param minPartitions: min number of partitions
@return: labeled data stored as an RDD of LabeledPoint
>>> from tempfile import NamedTemporaryFile
>>> from pyspark.mllib.util import MLUtils
>>> from pyspark.mllib.regression import LabeledPoint
>>> examples = [LabeledPoint(1.1, Vectors.sparse(3, [(0, -1.23), (2, 4.56e-7)])),
... LabeledPoint(0.0, Vectors.dense([1.01, 2.02, 3.03]))]
>>> tempFile = NamedTemporaryFile(delete=True)
>>> tempFile.close()
>>> sc.parallelize(examples, 1).saveAsTextFile(tempFile.name)
>>> MLUtils.loadLabeledPoints(sc, tempFile.name).collect()
[LabeledPoint(1.1, (3,[0,2],[-1.23,4.56e-07])), LabeledPoint(0.0, [1.01,2.02,3.03])]
"""
minPartitions = minPartitions or min(sc.defaultParallelism, 2)
return callMLlibFunc("loadLabeledPoints", sc, path, minPartitions)
|
[
"def",
"loadLabeledPoints",
"(",
"sc",
",",
"path",
",",
"minPartitions",
"=",
"None",
")",
":",
"minPartitions",
"=",
"minPartitions",
"or",
"min",
"(",
"sc",
".",
"defaultParallelism",
",",
"2",
")",
"return",
"callMLlibFunc",
"(",
"\"loadLabeledPoints\"",
",",
"sc",
",",
"path",
",",
"minPartitions",
")"
] |
Load labeled points saved using RDD.saveAsTextFile.
:param sc: Spark context
:param path: file or directory path in any Hadoop-supported file
system URI
:param minPartitions: min number of partitions
@return: labeled data stored as an RDD of LabeledPoint
>>> from tempfile import NamedTemporaryFile
>>> from pyspark.mllib.util import MLUtils
>>> from pyspark.mllib.regression import LabeledPoint
>>> examples = [LabeledPoint(1.1, Vectors.sparse(3, [(0, -1.23), (2, 4.56e-7)])),
... LabeledPoint(0.0, Vectors.dense([1.01, 2.02, 3.03]))]
>>> tempFile = NamedTemporaryFile(delete=True)
>>> tempFile.close()
>>> sc.parallelize(examples, 1).saveAsTextFile(tempFile.name)
>>> MLUtils.loadLabeledPoints(sc, tempFile.name).collect()
[LabeledPoint(1.1, (3,[0,2],[-1.23,4.56e-07])), LabeledPoint(0.0, [1.01,2.02,3.03])]
|
[
"Load",
"labeled",
"points",
"saved",
"using",
"RDD",
".",
"saveAsTextFile",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/util.py#L151-L173
|
19,217
|
apache/spark
|
python/pyspark/mllib/util.py
|
LinearDataGenerator.generateLinearRDD
|
def generateLinearRDD(sc, nexamples, nfeatures, eps,
nParts=2, intercept=0.0):
"""
Generate an RDD of LabeledPoints.
"""
return callMLlibFunc(
"generateLinearRDDWrapper", sc, int(nexamples), int(nfeatures),
float(eps), int(nParts), float(intercept))
|
python
|
def generateLinearRDD(sc, nexamples, nfeatures, eps,
nParts=2, intercept=0.0):
"""
Generate an RDD of LabeledPoints.
"""
return callMLlibFunc(
"generateLinearRDDWrapper", sc, int(nexamples), int(nfeatures),
float(eps), int(nParts), float(intercept))
|
[
"def",
"generateLinearRDD",
"(",
"sc",
",",
"nexamples",
",",
"nfeatures",
",",
"eps",
",",
"nParts",
"=",
"2",
",",
"intercept",
"=",
"0.0",
")",
":",
"return",
"callMLlibFunc",
"(",
"\"generateLinearRDDWrapper\"",
",",
"sc",
",",
"int",
"(",
"nexamples",
")",
",",
"int",
"(",
"nfeatures",
")",
",",
"float",
"(",
"eps",
")",
",",
"int",
"(",
"nParts",
")",
",",
"float",
"(",
"intercept",
")",
")"
] |
Generate an RDD of LabeledPoints.
|
[
"Generate",
"an",
"RDD",
"of",
"LabeledPoints",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/util.py#L494-L501
|
19,218
|
apache/spark
|
python/pyspark/mllib/regression.py
|
IsotonicRegressionModel.save
|
def save(self, sc, path):
"""Save an IsotonicRegressionModel."""
java_boundaries = _py2java(sc, self.boundaries.tolist())
java_predictions = _py2java(sc, self.predictions.tolist())
java_model = sc._jvm.org.apache.spark.mllib.regression.IsotonicRegressionModel(
java_boundaries, java_predictions, self.isotonic)
java_model.save(sc._jsc.sc(), path)
|
python
|
def save(self, sc, path):
"""Save an IsotonicRegressionModel."""
java_boundaries = _py2java(sc, self.boundaries.tolist())
java_predictions = _py2java(sc, self.predictions.tolist())
java_model = sc._jvm.org.apache.spark.mllib.regression.IsotonicRegressionModel(
java_boundaries, java_predictions, self.isotonic)
java_model.save(sc._jsc.sc(), path)
|
[
"def",
"save",
"(",
"self",
",",
"sc",
",",
"path",
")",
":",
"java_boundaries",
"=",
"_py2java",
"(",
"sc",
",",
"self",
".",
"boundaries",
".",
"tolist",
"(",
")",
")",
"java_predictions",
"=",
"_py2java",
"(",
"sc",
",",
"self",
".",
"predictions",
".",
"tolist",
"(",
")",
")",
"java_model",
"=",
"sc",
".",
"_jvm",
".",
"org",
".",
"apache",
".",
"spark",
".",
"mllib",
".",
"regression",
".",
"IsotonicRegressionModel",
"(",
"java_boundaries",
",",
"java_predictions",
",",
"self",
".",
"isotonic",
")",
"java_model",
".",
"save",
"(",
"sc",
".",
"_jsc",
".",
"sc",
"(",
")",
",",
"path",
")"
] |
Save an IsotonicRegressionModel.
|
[
"Save",
"an",
"IsotonicRegressionModel",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/regression.py#L654-L660
|
19,219
|
apache/spark
|
python/pyspark/mllib/regression.py
|
IsotonicRegressionModel.load
|
def load(cls, sc, path):
"""Load an IsotonicRegressionModel."""
java_model = sc._jvm.org.apache.spark.mllib.regression.IsotonicRegressionModel.load(
sc._jsc.sc(), path)
py_boundaries = _java2py(sc, java_model.boundaryVector()).toArray()
py_predictions = _java2py(sc, java_model.predictionVector()).toArray()
return IsotonicRegressionModel(py_boundaries, py_predictions, java_model.isotonic)
|
python
|
def load(cls, sc, path):
"""Load an IsotonicRegressionModel."""
java_model = sc._jvm.org.apache.spark.mllib.regression.IsotonicRegressionModel.load(
sc._jsc.sc(), path)
py_boundaries = _java2py(sc, java_model.boundaryVector()).toArray()
py_predictions = _java2py(sc, java_model.predictionVector()).toArray()
return IsotonicRegressionModel(py_boundaries, py_predictions, java_model.isotonic)
|
[
"def",
"load",
"(",
"cls",
",",
"sc",
",",
"path",
")",
":",
"java_model",
"=",
"sc",
".",
"_jvm",
".",
"org",
".",
"apache",
".",
"spark",
".",
"mllib",
".",
"regression",
".",
"IsotonicRegressionModel",
".",
"load",
"(",
"sc",
".",
"_jsc",
".",
"sc",
"(",
")",
",",
"path",
")",
"py_boundaries",
"=",
"_java2py",
"(",
"sc",
",",
"java_model",
".",
"boundaryVector",
"(",
")",
")",
".",
"toArray",
"(",
")",
"py_predictions",
"=",
"_java2py",
"(",
"sc",
",",
"java_model",
".",
"predictionVector",
"(",
")",
")",
".",
"toArray",
"(",
")",
"return",
"IsotonicRegressionModel",
"(",
"py_boundaries",
",",
"py_predictions",
",",
"java_model",
".",
"isotonic",
")"
] |
Load an IsotonicRegressionModel.
|
[
"Load",
"an",
"IsotonicRegressionModel",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/regression.py#L664-L670
|
19,220
|
apache/spark
|
python/pyspark/mllib/regression.py
|
IsotonicRegression.train
|
def train(cls, data, isotonic=True):
"""
Train an isotonic regression model on the given data.
:param data:
RDD of (label, feature, weight) tuples.
:param isotonic:
Whether this is isotonic (which is default) or antitonic.
(default: True)
"""
boundaries, predictions = callMLlibFunc("trainIsotonicRegressionModel",
data.map(_convert_to_vector), bool(isotonic))
return IsotonicRegressionModel(boundaries.toArray(), predictions.toArray(), isotonic)
|
python
|
def train(cls, data, isotonic=True):
"""
Train an isotonic regression model on the given data.
:param data:
RDD of (label, feature, weight) tuples.
:param isotonic:
Whether this is isotonic (which is default) or antitonic.
(default: True)
"""
boundaries, predictions = callMLlibFunc("trainIsotonicRegressionModel",
data.map(_convert_to_vector), bool(isotonic))
return IsotonicRegressionModel(boundaries.toArray(), predictions.toArray(), isotonic)
|
[
"def",
"train",
"(",
"cls",
",",
"data",
",",
"isotonic",
"=",
"True",
")",
":",
"boundaries",
",",
"predictions",
"=",
"callMLlibFunc",
"(",
"\"trainIsotonicRegressionModel\"",
",",
"data",
".",
"map",
"(",
"_convert_to_vector",
")",
",",
"bool",
"(",
"isotonic",
")",
")",
"return",
"IsotonicRegressionModel",
"(",
"boundaries",
".",
"toArray",
"(",
")",
",",
"predictions",
".",
"toArray",
"(",
")",
",",
"isotonic",
")"
] |
Train an isotonic regression model on the given data.
:param data:
RDD of (label, feature, weight) tuples.
:param isotonic:
Whether this is isotonic (which is default) or antitonic.
(default: True)
|
[
"Train",
"an",
"isotonic",
"regression",
"model",
"on",
"the",
"given",
"data",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/regression.py#L699-L711
|
19,221
|
apache/spark
|
python/pyspark/mllib/linalg/distributed.py
|
RowMatrix.columnSimilarities
|
def columnSimilarities(self, threshold=0.0):
"""
Compute similarities between columns of this matrix.
The threshold parameter is a trade-off knob between estimate
quality and computational cost.
The default threshold setting of 0 guarantees deterministically
correct results, but uses the brute-force approach of computing
normalized dot products.
Setting the threshold to positive values uses a sampling
approach and incurs strictly less computational cost than the
brute-force approach. However the similarities computed will
be estimates.
The sampling guarantees relative-error correctness for those
pairs of columns that have similarity greater than the given
similarity threshold.
To describe the guarantee, we set some notation:
* Let A be the smallest in magnitude non-zero element of
this matrix.
* Let B be the largest in magnitude non-zero element of
this matrix.
* Let L be the maximum number of non-zeros per row.
For example, for {0,1} matrices: A=B=1.
Another example, for the Netflix matrix: A=1, B=5
For those column pairs that are above the threshold, the
computed similarity is correct to within 20% relative error
with probability at least 1 - (0.981)^10/B^
The shuffle size is bounded by the *smaller* of the following
two expressions:
* O(n log(n) L / (threshold * A))
* O(m L^2^)
The latter is the cost of the brute-force approach, so for
non-zero thresholds, the cost is always cheaper than the
brute-force approach.
:param: threshold: Set to 0 for deterministic guaranteed
correctness. Similarities above this
threshold are estimated with the cost vs
estimate quality trade-off described above.
:return: An n x n sparse upper-triangular CoordinateMatrix of
cosine similarities between columns of this matrix.
>>> rows = sc.parallelize([[1, 2], [1, 5]])
>>> mat = RowMatrix(rows)
>>> sims = mat.columnSimilarities()
>>> sims.entries.first().value
0.91914503...
"""
java_sims_mat = self._java_matrix_wrapper.call("columnSimilarities", float(threshold))
return CoordinateMatrix(java_sims_mat)
|
python
|
def columnSimilarities(self, threshold=0.0):
"""
Compute similarities between columns of this matrix.
The threshold parameter is a trade-off knob between estimate
quality and computational cost.
The default threshold setting of 0 guarantees deterministically
correct results, but uses the brute-force approach of computing
normalized dot products.
Setting the threshold to positive values uses a sampling
approach and incurs strictly less computational cost than the
brute-force approach. However the similarities computed will
be estimates.
The sampling guarantees relative-error correctness for those
pairs of columns that have similarity greater than the given
similarity threshold.
To describe the guarantee, we set some notation:
* Let A be the smallest in magnitude non-zero element of
this matrix.
* Let B be the largest in magnitude non-zero element of
this matrix.
* Let L be the maximum number of non-zeros per row.
For example, for {0,1} matrices: A=B=1.
Another example, for the Netflix matrix: A=1, B=5
For those column pairs that are above the threshold, the
computed similarity is correct to within 20% relative error
with probability at least 1 - (0.981)^10/B^
The shuffle size is bounded by the *smaller* of the following
two expressions:
* O(n log(n) L / (threshold * A))
* O(m L^2^)
The latter is the cost of the brute-force approach, so for
non-zero thresholds, the cost is always cheaper than the
brute-force approach.
:param: threshold: Set to 0 for deterministic guaranteed
correctness. Similarities above this
threshold are estimated with the cost vs
estimate quality trade-off described above.
:return: An n x n sparse upper-triangular CoordinateMatrix of
cosine similarities between columns of this matrix.
>>> rows = sc.parallelize([[1, 2], [1, 5]])
>>> mat = RowMatrix(rows)
>>> sims = mat.columnSimilarities()
>>> sims.entries.first().value
0.91914503...
"""
java_sims_mat = self._java_matrix_wrapper.call("columnSimilarities", float(threshold))
return CoordinateMatrix(java_sims_mat)
|
[
"def",
"columnSimilarities",
"(",
"self",
",",
"threshold",
"=",
"0.0",
")",
":",
"java_sims_mat",
"=",
"self",
".",
"_java_matrix_wrapper",
".",
"call",
"(",
"\"columnSimilarities\"",
",",
"float",
"(",
"threshold",
")",
")",
"return",
"CoordinateMatrix",
"(",
"java_sims_mat",
")"
] |
Compute similarities between columns of this matrix.
The threshold parameter is a trade-off knob between estimate
quality and computational cost.
The default threshold setting of 0 guarantees deterministically
correct results, but uses the brute-force approach of computing
normalized dot products.
Setting the threshold to positive values uses a sampling
approach and incurs strictly less computational cost than the
brute-force approach. However the similarities computed will
be estimates.
The sampling guarantees relative-error correctness for those
pairs of columns that have similarity greater than the given
similarity threshold.
To describe the guarantee, we set some notation:
* Let A be the smallest in magnitude non-zero element of
this matrix.
* Let B be the largest in magnitude non-zero element of
this matrix.
* Let L be the maximum number of non-zeros per row.
For example, for {0,1} matrices: A=B=1.
Another example, for the Netflix matrix: A=1, B=5
For those column pairs that are above the threshold, the
computed similarity is correct to within 20% relative error
with probability at least 1 - (0.981)^10/B^
The shuffle size is bounded by the *smaller* of the following
two expressions:
* O(n log(n) L / (threshold * A))
* O(m L^2^)
The latter is the cost of the brute-force approach, so for
non-zero thresholds, the cost is always cheaper than the
brute-force approach.
:param: threshold: Set to 0 for deterministic guaranteed
correctness. Similarities above this
threshold are estimated with the cost vs
estimate quality trade-off described above.
:return: An n x n sparse upper-triangular CoordinateMatrix of
cosine similarities between columns of this matrix.
>>> rows = sc.parallelize([[1, 2], [1, 5]])
>>> mat = RowMatrix(rows)
>>> sims = mat.columnSimilarities()
>>> sims.entries.first().value
0.91914503...
|
[
"Compute",
"similarities",
"between",
"columns",
"of",
"this",
"matrix",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/distributed.py#L201-L260
|
19,222
|
apache/spark
|
python/pyspark/mllib/linalg/distributed.py
|
RowMatrix.tallSkinnyQR
|
def tallSkinnyQR(self, computeQ=False):
"""
Compute the QR decomposition of this RowMatrix.
The implementation is designed to optimize the QR decomposition
(factorization) for the RowMatrix of a tall and skinny shape.
Reference:
Paul G. Constantine, David F. Gleich. "Tall and skinny QR
factorizations in MapReduce architectures"
([[https://doi.org/10.1145/1996092.1996103]])
:param: computeQ: whether to computeQ
:return: QRDecomposition(Q: RowMatrix, R: Matrix), where
Q = None if computeQ = false.
>>> rows = sc.parallelize([[3, -6], [4, -8], [0, 1]])
>>> mat = RowMatrix(rows)
>>> decomp = mat.tallSkinnyQR(True)
>>> Q = decomp.Q
>>> R = decomp.R
>>> # Test with absolute values
>>> absQRows = Q.rows.map(lambda row: abs(row.toArray()).tolist())
>>> absQRows.collect()
[[0.6..., 0.0], [0.8..., 0.0], [0.0, 1.0]]
>>> # Test with absolute values
>>> abs(R.toArray()).tolist()
[[5.0, 10.0], [0.0, 1.0]]
"""
decomp = JavaModelWrapper(self._java_matrix_wrapper.call("tallSkinnyQR", computeQ))
if computeQ:
java_Q = decomp.call("Q")
Q = RowMatrix(java_Q)
else:
Q = None
R = decomp.call("R")
return QRDecomposition(Q, R)
|
python
|
def tallSkinnyQR(self, computeQ=False):
"""
Compute the QR decomposition of this RowMatrix.
The implementation is designed to optimize the QR decomposition
(factorization) for the RowMatrix of a tall and skinny shape.
Reference:
Paul G. Constantine, David F. Gleich. "Tall and skinny QR
factorizations in MapReduce architectures"
([[https://doi.org/10.1145/1996092.1996103]])
:param: computeQ: whether to computeQ
:return: QRDecomposition(Q: RowMatrix, R: Matrix), where
Q = None if computeQ = false.
>>> rows = sc.parallelize([[3, -6], [4, -8], [0, 1]])
>>> mat = RowMatrix(rows)
>>> decomp = mat.tallSkinnyQR(True)
>>> Q = decomp.Q
>>> R = decomp.R
>>> # Test with absolute values
>>> absQRows = Q.rows.map(lambda row: abs(row.toArray()).tolist())
>>> absQRows.collect()
[[0.6..., 0.0], [0.8..., 0.0], [0.0, 1.0]]
>>> # Test with absolute values
>>> abs(R.toArray()).tolist()
[[5.0, 10.0], [0.0, 1.0]]
"""
decomp = JavaModelWrapper(self._java_matrix_wrapper.call("tallSkinnyQR", computeQ))
if computeQ:
java_Q = decomp.call("Q")
Q = RowMatrix(java_Q)
else:
Q = None
R = decomp.call("R")
return QRDecomposition(Q, R)
|
[
"def",
"tallSkinnyQR",
"(",
"self",
",",
"computeQ",
"=",
"False",
")",
":",
"decomp",
"=",
"JavaModelWrapper",
"(",
"self",
".",
"_java_matrix_wrapper",
".",
"call",
"(",
"\"tallSkinnyQR\"",
",",
"computeQ",
")",
")",
"if",
"computeQ",
":",
"java_Q",
"=",
"decomp",
".",
"call",
"(",
"\"Q\"",
")",
"Q",
"=",
"RowMatrix",
"(",
"java_Q",
")",
"else",
":",
"Q",
"=",
"None",
"R",
"=",
"decomp",
".",
"call",
"(",
"\"R\"",
")",
"return",
"QRDecomposition",
"(",
"Q",
",",
"R",
")"
] |
Compute the QR decomposition of this RowMatrix.
The implementation is designed to optimize the QR decomposition
(factorization) for the RowMatrix of a tall and skinny shape.
Reference:
Paul G. Constantine, David F. Gleich. "Tall and skinny QR
factorizations in MapReduce architectures"
([[https://doi.org/10.1145/1996092.1996103]])
:param: computeQ: whether to computeQ
:return: QRDecomposition(Q: RowMatrix, R: Matrix), where
Q = None if computeQ = false.
>>> rows = sc.parallelize([[3, -6], [4, -8], [0, 1]])
>>> mat = RowMatrix(rows)
>>> decomp = mat.tallSkinnyQR(True)
>>> Q = decomp.Q
>>> R = decomp.R
>>> # Test with absolute values
>>> absQRows = Q.rows.map(lambda row: abs(row.toArray()).tolist())
>>> absQRows.collect()
[[0.6..., 0.0], [0.8..., 0.0], [0.0, 1.0]]
>>> # Test with absolute values
>>> abs(R.toArray()).tolist()
[[5.0, 10.0], [0.0, 1.0]]
|
[
"Compute",
"the",
"QR",
"decomposition",
"of",
"this",
"RowMatrix",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/distributed.py#L263-L301
|
19,223
|
apache/spark
|
python/pyspark/mllib/linalg/distributed.py
|
RowMatrix.computeSVD
|
def computeSVD(self, k, computeU=False, rCond=1e-9):
"""
Computes the singular value decomposition of the RowMatrix.
The given row matrix A of dimension (m X n) is decomposed into
U * s * V'T where
* U: (m X k) (left singular vectors) is a RowMatrix whose
columns are the eigenvectors of (A X A')
* s: DenseVector consisting of square root of the eigenvalues
(singular values) in descending order.
* v: (n X k) (right singular vectors) is a Matrix whose columns
are the eigenvectors of (A' X A)
For more specific details on implementation, please refer
the Scala documentation.
:param k: Number of leading singular values to keep (`0 < k <= n`).
It might return less than k if there are numerically zero singular values
or there are not enough Ritz values converged before the maximum number of
Arnoldi update iterations is reached (in case that matrix A is ill-conditioned).
:param computeU: Whether or not to compute U. If set to be
True, then U is computed by A * V * s^-1
:param rCond: Reciprocal condition number. All singular values
smaller than rCond * s[0] are treated as zero
where s[0] is the largest singular value.
:returns: :py:class:`SingularValueDecomposition`
>>> rows = sc.parallelize([[3, 1, 1], [-1, 3, 1]])
>>> rm = RowMatrix(rows)
>>> svd_model = rm.computeSVD(2, True)
>>> svd_model.U.rows.collect()
[DenseVector([-0.7071, 0.7071]), DenseVector([-0.7071, -0.7071])]
>>> svd_model.s
DenseVector([3.4641, 3.1623])
>>> svd_model.V
DenseMatrix(3, 2, [-0.4082, -0.8165, -0.4082, 0.8944, -0.4472, 0.0], 0)
"""
j_model = self._java_matrix_wrapper.call(
"computeSVD", int(k), bool(computeU), float(rCond))
return SingularValueDecomposition(j_model)
|
python
|
def computeSVD(self, k, computeU=False, rCond=1e-9):
"""
Computes the singular value decomposition of the RowMatrix.
The given row matrix A of dimension (m X n) is decomposed into
U * s * V'T where
* U: (m X k) (left singular vectors) is a RowMatrix whose
columns are the eigenvectors of (A X A')
* s: DenseVector consisting of square root of the eigenvalues
(singular values) in descending order.
* v: (n X k) (right singular vectors) is a Matrix whose columns
are the eigenvectors of (A' X A)
For more specific details on implementation, please refer
the Scala documentation.
:param k: Number of leading singular values to keep (`0 < k <= n`).
It might return less than k if there are numerically zero singular values
or there are not enough Ritz values converged before the maximum number of
Arnoldi update iterations is reached (in case that matrix A is ill-conditioned).
:param computeU: Whether or not to compute U. If set to be
True, then U is computed by A * V * s^-1
:param rCond: Reciprocal condition number. All singular values
smaller than rCond * s[0] are treated as zero
where s[0] is the largest singular value.
:returns: :py:class:`SingularValueDecomposition`
>>> rows = sc.parallelize([[3, 1, 1], [-1, 3, 1]])
>>> rm = RowMatrix(rows)
>>> svd_model = rm.computeSVD(2, True)
>>> svd_model.U.rows.collect()
[DenseVector([-0.7071, 0.7071]), DenseVector([-0.7071, -0.7071])]
>>> svd_model.s
DenseVector([3.4641, 3.1623])
>>> svd_model.V
DenseMatrix(3, 2, [-0.4082, -0.8165, -0.4082, 0.8944, -0.4472, 0.0], 0)
"""
j_model = self._java_matrix_wrapper.call(
"computeSVD", int(k), bool(computeU), float(rCond))
return SingularValueDecomposition(j_model)
|
[
"def",
"computeSVD",
"(",
"self",
",",
"k",
",",
"computeU",
"=",
"False",
",",
"rCond",
"=",
"1e-9",
")",
":",
"j_model",
"=",
"self",
".",
"_java_matrix_wrapper",
".",
"call",
"(",
"\"computeSVD\"",
",",
"int",
"(",
"k",
")",
",",
"bool",
"(",
"computeU",
")",
",",
"float",
"(",
"rCond",
")",
")",
"return",
"SingularValueDecomposition",
"(",
"j_model",
")"
] |
Computes the singular value decomposition of the RowMatrix.
The given row matrix A of dimension (m X n) is decomposed into
U * s * V'T where
* U: (m X k) (left singular vectors) is a RowMatrix whose
columns are the eigenvectors of (A X A')
* s: DenseVector consisting of square root of the eigenvalues
(singular values) in descending order.
* v: (n X k) (right singular vectors) is a Matrix whose columns
are the eigenvectors of (A' X A)
For more specific details on implementation, please refer
the Scala documentation.
:param k: Number of leading singular values to keep (`0 < k <= n`).
It might return less than k if there are numerically zero singular values
or there are not enough Ritz values converged before the maximum number of
Arnoldi update iterations is reached (in case that matrix A is ill-conditioned).
:param computeU: Whether or not to compute U. If set to be
True, then U is computed by A * V * s^-1
:param rCond: Reciprocal condition number. All singular values
smaller than rCond * s[0] are treated as zero
where s[0] is the largest singular value.
:returns: :py:class:`SingularValueDecomposition`
>>> rows = sc.parallelize([[3, 1, 1], [-1, 3, 1]])
>>> rm = RowMatrix(rows)
>>> svd_model = rm.computeSVD(2, True)
>>> svd_model.U.rows.collect()
[DenseVector([-0.7071, 0.7071]), DenseVector([-0.7071, -0.7071])]
>>> svd_model.s
DenseVector([3.4641, 3.1623])
>>> svd_model.V
DenseMatrix(3, 2, [-0.4082, -0.8165, -0.4082, 0.8944, -0.4472, 0.0], 0)
|
[
"Computes",
"the",
"singular",
"value",
"decomposition",
"of",
"the",
"RowMatrix",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/distributed.py#L304-L345
|
19,224
|
apache/spark
|
python/pyspark/mllib/linalg/distributed.py
|
SingularValueDecomposition.U
|
def U(self):
"""
Returns a distributed matrix whose columns are the left
singular vectors of the SingularValueDecomposition if computeU was set to be True.
"""
u = self.call("U")
if u is not None:
mat_name = u.getClass().getSimpleName()
if mat_name == "RowMatrix":
return RowMatrix(u)
elif mat_name == "IndexedRowMatrix":
return IndexedRowMatrix(u)
else:
raise TypeError("Expected RowMatrix/IndexedRowMatrix got %s" % mat_name)
|
python
|
def U(self):
"""
Returns a distributed matrix whose columns are the left
singular vectors of the SingularValueDecomposition if computeU was set to be True.
"""
u = self.call("U")
if u is not None:
mat_name = u.getClass().getSimpleName()
if mat_name == "RowMatrix":
return RowMatrix(u)
elif mat_name == "IndexedRowMatrix":
return IndexedRowMatrix(u)
else:
raise TypeError("Expected RowMatrix/IndexedRowMatrix got %s" % mat_name)
|
[
"def",
"U",
"(",
"self",
")",
":",
"u",
"=",
"self",
".",
"call",
"(",
"\"U\"",
")",
"if",
"u",
"is",
"not",
"None",
":",
"mat_name",
"=",
"u",
".",
"getClass",
"(",
")",
".",
"getSimpleName",
"(",
")",
"if",
"mat_name",
"==",
"\"RowMatrix\"",
":",
"return",
"RowMatrix",
"(",
"u",
")",
"elif",
"mat_name",
"==",
"\"IndexedRowMatrix\"",
":",
"return",
"IndexedRowMatrix",
"(",
"u",
")",
"else",
":",
"raise",
"TypeError",
"(",
"\"Expected RowMatrix/IndexedRowMatrix got %s\"",
"%",
"mat_name",
")"
] |
Returns a distributed matrix whose columns are the left
singular vectors of the SingularValueDecomposition if computeU was set to be True.
|
[
"Returns",
"a",
"distributed",
"matrix",
"whose",
"columns",
"are",
"the",
"left",
"singular",
"vectors",
"of",
"the",
"SingularValueDecomposition",
"if",
"computeU",
"was",
"set",
"to",
"be",
"True",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/distributed.py#L401-L414
|
19,225
|
apache/spark
|
python/pyspark/mllib/linalg/distributed.py
|
IndexedRowMatrix.rows
|
def rows(self):
"""
Rows of the IndexedRowMatrix stored as an RDD of IndexedRows.
>>> mat = IndexedRowMatrix(sc.parallelize([IndexedRow(0, [1, 2, 3]),
... IndexedRow(1, [4, 5, 6])]))
>>> rows = mat.rows
>>> rows.first()
IndexedRow(0, [1.0,2.0,3.0])
"""
# We use DataFrames for serialization of IndexedRows from
# Java, so we first convert the RDD of rows to a DataFrame
# on the Scala/Java side. Then we map each Row in the
# DataFrame back to an IndexedRow on this side.
rows_df = callMLlibFunc("getIndexedRows", self._java_matrix_wrapper._java_model)
rows = rows_df.rdd.map(lambda row: IndexedRow(row[0], row[1]))
return rows
|
python
|
def rows(self):
"""
Rows of the IndexedRowMatrix stored as an RDD of IndexedRows.
>>> mat = IndexedRowMatrix(sc.parallelize([IndexedRow(0, [1, 2, 3]),
... IndexedRow(1, [4, 5, 6])]))
>>> rows = mat.rows
>>> rows.first()
IndexedRow(0, [1.0,2.0,3.0])
"""
# We use DataFrames for serialization of IndexedRows from
# Java, so we first convert the RDD of rows to a DataFrame
# on the Scala/Java side. Then we map each Row in the
# DataFrame back to an IndexedRow on this side.
rows_df = callMLlibFunc("getIndexedRows", self._java_matrix_wrapper._java_model)
rows = rows_df.rdd.map(lambda row: IndexedRow(row[0], row[1]))
return rows
|
[
"def",
"rows",
"(",
"self",
")",
":",
"# We use DataFrames for serialization of IndexedRows from",
"# Java, so we first convert the RDD of rows to a DataFrame",
"# on the Scala/Java side. Then we map each Row in the",
"# DataFrame back to an IndexedRow on this side.",
"rows_df",
"=",
"callMLlibFunc",
"(",
"\"getIndexedRows\"",
",",
"self",
".",
"_java_matrix_wrapper",
".",
"_java_model",
")",
"rows",
"=",
"rows_df",
".",
"rdd",
".",
"map",
"(",
"lambda",
"row",
":",
"IndexedRow",
"(",
"row",
"[",
"0",
"]",
",",
"row",
"[",
"1",
"]",
")",
")",
"return",
"rows"
] |
Rows of the IndexedRowMatrix stored as an RDD of IndexedRows.
>>> mat = IndexedRowMatrix(sc.parallelize([IndexedRow(0, [1, 2, 3]),
... IndexedRow(1, [4, 5, 6])]))
>>> rows = mat.rows
>>> rows.first()
IndexedRow(0, [1.0,2.0,3.0])
|
[
"Rows",
"of",
"the",
"IndexedRowMatrix",
"stored",
"as",
"an",
"RDD",
"of",
"IndexedRows",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/distributed.py#L519-L535
|
19,226
|
apache/spark
|
python/pyspark/mllib/linalg/distributed.py
|
IndexedRowMatrix.toBlockMatrix
|
def toBlockMatrix(self, rowsPerBlock=1024, colsPerBlock=1024):
"""
Convert this matrix to a BlockMatrix.
:param rowsPerBlock: Number of rows that make up each block.
The blocks forming the final rows are not
required to have the given number of rows.
:param colsPerBlock: Number of columns that make up each block.
The blocks forming the final columns are not
required to have the given number of columns.
>>> rows = sc.parallelize([IndexedRow(0, [1, 2, 3]),
... IndexedRow(6, [4, 5, 6])])
>>> mat = IndexedRowMatrix(rows).toBlockMatrix()
>>> # This IndexedRowMatrix will have 7 effective rows, due to
>>> # the highest row index being 6, and the ensuing
>>> # BlockMatrix will have 7 rows as well.
>>> print(mat.numRows())
7
>>> print(mat.numCols())
3
"""
java_block_matrix = self._java_matrix_wrapper.call("toBlockMatrix",
rowsPerBlock,
colsPerBlock)
return BlockMatrix(java_block_matrix, rowsPerBlock, colsPerBlock)
|
python
|
def toBlockMatrix(self, rowsPerBlock=1024, colsPerBlock=1024):
"""
Convert this matrix to a BlockMatrix.
:param rowsPerBlock: Number of rows that make up each block.
The blocks forming the final rows are not
required to have the given number of rows.
:param colsPerBlock: Number of columns that make up each block.
The blocks forming the final columns are not
required to have the given number of columns.
>>> rows = sc.parallelize([IndexedRow(0, [1, 2, 3]),
... IndexedRow(6, [4, 5, 6])])
>>> mat = IndexedRowMatrix(rows).toBlockMatrix()
>>> # This IndexedRowMatrix will have 7 effective rows, due to
>>> # the highest row index being 6, and the ensuing
>>> # BlockMatrix will have 7 rows as well.
>>> print(mat.numRows())
7
>>> print(mat.numCols())
3
"""
java_block_matrix = self._java_matrix_wrapper.call("toBlockMatrix",
rowsPerBlock,
colsPerBlock)
return BlockMatrix(java_block_matrix, rowsPerBlock, colsPerBlock)
|
[
"def",
"toBlockMatrix",
"(",
"self",
",",
"rowsPerBlock",
"=",
"1024",
",",
"colsPerBlock",
"=",
"1024",
")",
":",
"java_block_matrix",
"=",
"self",
".",
"_java_matrix_wrapper",
".",
"call",
"(",
"\"toBlockMatrix\"",
",",
"rowsPerBlock",
",",
"colsPerBlock",
")",
"return",
"BlockMatrix",
"(",
"java_block_matrix",
",",
"rowsPerBlock",
",",
"colsPerBlock",
")"
] |
Convert this matrix to a BlockMatrix.
:param rowsPerBlock: Number of rows that make up each block.
The blocks forming the final rows are not
required to have the given number of rows.
:param colsPerBlock: Number of columns that make up each block.
The blocks forming the final columns are not
required to have the given number of columns.
>>> rows = sc.parallelize([IndexedRow(0, [1, 2, 3]),
... IndexedRow(6, [4, 5, 6])])
>>> mat = IndexedRowMatrix(rows).toBlockMatrix()
>>> # This IndexedRowMatrix will have 7 effective rows, due to
>>> # the highest row index being 6, and the ensuing
>>> # BlockMatrix will have 7 rows as well.
>>> print(mat.numRows())
7
>>> print(mat.numCols())
3
|
[
"Convert",
"this",
"matrix",
"to",
"a",
"BlockMatrix",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/distributed.py#L631-L658
|
19,227
|
apache/spark
|
python/pyspark/mllib/linalg/distributed.py
|
CoordinateMatrix.entries
|
def entries(self):
"""
Entries of the CoordinateMatrix stored as an RDD of
MatrixEntries.
>>> mat = CoordinateMatrix(sc.parallelize([MatrixEntry(0, 0, 1.2),
... MatrixEntry(6, 4, 2.1)]))
>>> entries = mat.entries
>>> entries.first()
MatrixEntry(0, 0, 1.2)
"""
# We use DataFrames for serialization of MatrixEntry entries
# from Java, so we first convert the RDD of entries to a
# DataFrame on the Scala/Java side. Then we map each Row in
# the DataFrame back to a MatrixEntry on this side.
entries_df = callMLlibFunc("getMatrixEntries", self._java_matrix_wrapper._java_model)
entries = entries_df.rdd.map(lambda row: MatrixEntry(row[0], row[1], row[2]))
return entries
|
python
|
def entries(self):
"""
Entries of the CoordinateMatrix stored as an RDD of
MatrixEntries.
>>> mat = CoordinateMatrix(sc.parallelize([MatrixEntry(0, 0, 1.2),
... MatrixEntry(6, 4, 2.1)]))
>>> entries = mat.entries
>>> entries.first()
MatrixEntry(0, 0, 1.2)
"""
# We use DataFrames for serialization of MatrixEntry entries
# from Java, so we first convert the RDD of entries to a
# DataFrame on the Scala/Java side. Then we map each Row in
# the DataFrame back to a MatrixEntry on this side.
entries_df = callMLlibFunc("getMatrixEntries", self._java_matrix_wrapper._java_model)
entries = entries_df.rdd.map(lambda row: MatrixEntry(row[0], row[1], row[2]))
return entries
|
[
"def",
"entries",
"(",
"self",
")",
":",
"# We use DataFrames for serialization of MatrixEntry entries",
"# from Java, so we first convert the RDD of entries to a",
"# DataFrame on the Scala/Java side. Then we map each Row in",
"# the DataFrame back to a MatrixEntry on this side.",
"entries_df",
"=",
"callMLlibFunc",
"(",
"\"getMatrixEntries\"",
",",
"self",
".",
"_java_matrix_wrapper",
".",
"_java_model",
")",
"entries",
"=",
"entries_df",
".",
"rdd",
".",
"map",
"(",
"lambda",
"row",
":",
"MatrixEntry",
"(",
"row",
"[",
"0",
"]",
",",
"row",
"[",
"1",
"]",
",",
"row",
"[",
"2",
"]",
")",
")",
"return",
"entries"
] |
Entries of the CoordinateMatrix stored as an RDD of
MatrixEntries.
>>> mat = CoordinateMatrix(sc.parallelize([MatrixEntry(0, 0, 1.2),
... MatrixEntry(6, 4, 2.1)]))
>>> entries = mat.entries
>>> entries.first()
MatrixEntry(0, 0, 1.2)
|
[
"Entries",
"of",
"the",
"CoordinateMatrix",
"stored",
"as",
"an",
"RDD",
"of",
"MatrixEntries",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/distributed.py#L811-L828
|
19,228
|
apache/spark
|
python/pyspark/mllib/linalg/distributed.py
|
BlockMatrix.persist
|
def persist(self, storageLevel):
"""
Persists the underlying RDD with the specified storage level.
"""
if not isinstance(storageLevel, StorageLevel):
raise TypeError("`storageLevel` should be a StorageLevel, got %s" % type(storageLevel))
javaStorageLevel = self._java_matrix_wrapper._sc._getJavaStorageLevel(storageLevel)
self._java_matrix_wrapper.call("persist", javaStorageLevel)
return self
|
python
|
def persist(self, storageLevel):
"""
Persists the underlying RDD with the specified storage level.
"""
if not isinstance(storageLevel, StorageLevel):
raise TypeError("`storageLevel` should be a StorageLevel, got %s" % type(storageLevel))
javaStorageLevel = self._java_matrix_wrapper._sc._getJavaStorageLevel(storageLevel)
self._java_matrix_wrapper.call("persist", javaStorageLevel)
return self
|
[
"def",
"persist",
"(",
"self",
",",
"storageLevel",
")",
":",
"if",
"not",
"isinstance",
"(",
"storageLevel",
",",
"StorageLevel",
")",
":",
"raise",
"TypeError",
"(",
"\"`storageLevel` should be a StorageLevel, got %s\"",
"%",
"type",
"(",
"storageLevel",
")",
")",
"javaStorageLevel",
"=",
"self",
".",
"_java_matrix_wrapper",
".",
"_sc",
".",
"_getJavaStorageLevel",
"(",
"storageLevel",
")",
"self",
".",
"_java_matrix_wrapper",
".",
"call",
"(",
"\"persist\"",
",",
"javaStorageLevel",
")",
"return",
"self"
] |
Persists the underlying RDD with the specified storage level.
|
[
"Persists",
"the",
"underlying",
"RDD",
"with",
"the",
"specified",
"storage",
"level",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/distributed.py#L1168-L1176
|
19,229
|
apache/spark
|
python/pyspark/mllib/linalg/distributed.py
|
BlockMatrix.add
|
def add(self, other):
"""
Adds two block matrices together. The matrices must have the
same size and matching `rowsPerBlock` and `colsPerBlock` values.
If one of the sub matrix blocks that are being added is a
SparseMatrix, the resulting sub matrix block will also be a
SparseMatrix, even if it is being added to a DenseMatrix. If
two dense sub matrix blocks are added, the output block will
also be a DenseMatrix.
>>> dm1 = Matrices.dense(3, 2, [1, 2, 3, 4, 5, 6])
>>> dm2 = Matrices.dense(3, 2, [7, 8, 9, 10, 11, 12])
>>> sm = Matrices.sparse(3, 2, [0, 1, 3], [0, 1, 2], [7, 11, 12])
>>> blocks1 = sc.parallelize([((0, 0), dm1), ((1, 0), dm2)])
>>> blocks2 = sc.parallelize([((0, 0), dm1), ((1, 0), dm2)])
>>> blocks3 = sc.parallelize([((0, 0), sm), ((1, 0), dm2)])
>>> mat1 = BlockMatrix(blocks1, 3, 2)
>>> mat2 = BlockMatrix(blocks2, 3, 2)
>>> mat3 = BlockMatrix(blocks3, 3, 2)
>>> mat1.add(mat2).toLocalMatrix()
DenseMatrix(6, 2, [2.0, 4.0, 6.0, 14.0, 16.0, 18.0, 8.0, 10.0, 12.0, 20.0, 22.0, 24.0], 0)
>>> mat1.add(mat3).toLocalMatrix()
DenseMatrix(6, 2, [8.0, 2.0, 3.0, 14.0, 16.0, 18.0, 4.0, 16.0, 18.0, 20.0, 22.0, 24.0], 0)
"""
if not isinstance(other, BlockMatrix):
raise TypeError("Other should be a BlockMatrix, got %s" % type(other))
other_java_block_matrix = other._java_matrix_wrapper._java_model
java_block_matrix = self._java_matrix_wrapper.call("add", other_java_block_matrix)
return BlockMatrix(java_block_matrix, self.rowsPerBlock, self.colsPerBlock)
|
python
|
def add(self, other):
"""
Adds two block matrices together. The matrices must have the
same size and matching `rowsPerBlock` and `colsPerBlock` values.
If one of the sub matrix blocks that are being added is a
SparseMatrix, the resulting sub matrix block will also be a
SparseMatrix, even if it is being added to a DenseMatrix. If
two dense sub matrix blocks are added, the output block will
also be a DenseMatrix.
>>> dm1 = Matrices.dense(3, 2, [1, 2, 3, 4, 5, 6])
>>> dm2 = Matrices.dense(3, 2, [7, 8, 9, 10, 11, 12])
>>> sm = Matrices.sparse(3, 2, [0, 1, 3], [0, 1, 2], [7, 11, 12])
>>> blocks1 = sc.parallelize([((0, 0), dm1), ((1, 0), dm2)])
>>> blocks2 = sc.parallelize([((0, 0), dm1), ((1, 0), dm2)])
>>> blocks3 = sc.parallelize([((0, 0), sm), ((1, 0), dm2)])
>>> mat1 = BlockMatrix(blocks1, 3, 2)
>>> mat2 = BlockMatrix(blocks2, 3, 2)
>>> mat3 = BlockMatrix(blocks3, 3, 2)
>>> mat1.add(mat2).toLocalMatrix()
DenseMatrix(6, 2, [2.0, 4.0, 6.0, 14.0, 16.0, 18.0, 8.0, 10.0, 12.0, 20.0, 22.0, 24.0], 0)
>>> mat1.add(mat3).toLocalMatrix()
DenseMatrix(6, 2, [8.0, 2.0, 3.0, 14.0, 16.0, 18.0, 4.0, 16.0, 18.0, 20.0, 22.0, 24.0], 0)
"""
if not isinstance(other, BlockMatrix):
raise TypeError("Other should be a BlockMatrix, got %s" % type(other))
other_java_block_matrix = other._java_matrix_wrapper._java_model
java_block_matrix = self._java_matrix_wrapper.call("add", other_java_block_matrix)
return BlockMatrix(java_block_matrix, self.rowsPerBlock, self.colsPerBlock)
|
[
"def",
"add",
"(",
"self",
",",
"other",
")",
":",
"if",
"not",
"isinstance",
"(",
"other",
",",
"BlockMatrix",
")",
":",
"raise",
"TypeError",
"(",
"\"Other should be a BlockMatrix, got %s\"",
"%",
"type",
"(",
"other",
")",
")",
"other_java_block_matrix",
"=",
"other",
".",
"_java_matrix_wrapper",
".",
"_java_model",
"java_block_matrix",
"=",
"self",
".",
"_java_matrix_wrapper",
".",
"call",
"(",
"\"add\"",
",",
"other_java_block_matrix",
")",
"return",
"BlockMatrix",
"(",
"java_block_matrix",
",",
"self",
".",
"rowsPerBlock",
",",
"self",
".",
"colsPerBlock",
")"
] |
Adds two block matrices together. The matrices must have the
same size and matching `rowsPerBlock` and `colsPerBlock` values.
If one of the sub matrix blocks that are being added is a
SparseMatrix, the resulting sub matrix block will also be a
SparseMatrix, even if it is being added to a DenseMatrix. If
two dense sub matrix blocks are added, the output block will
also be a DenseMatrix.
>>> dm1 = Matrices.dense(3, 2, [1, 2, 3, 4, 5, 6])
>>> dm2 = Matrices.dense(3, 2, [7, 8, 9, 10, 11, 12])
>>> sm = Matrices.sparse(3, 2, [0, 1, 3], [0, 1, 2], [7, 11, 12])
>>> blocks1 = sc.parallelize([((0, 0), dm1), ((1, 0), dm2)])
>>> blocks2 = sc.parallelize([((0, 0), dm1), ((1, 0), dm2)])
>>> blocks3 = sc.parallelize([((0, 0), sm), ((1, 0), dm2)])
>>> mat1 = BlockMatrix(blocks1, 3, 2)
>>> mat2 = BlockMatrix(blocks2, 3, 2)
>>> mat3 = BlockMatrix(blocks3, 3, 2)
>>> mat1.add(mat2).toLocalMatrix()
DenseMatrix(6, 2, [2.0, 4.0, 6.0, 14.0, 16.0, 18.0, 8.0, 10.0, 12.0, 20.0, 22.0, 24.0], 0)
>>> mat1.add(mat3).toLocalMatrix()
DenseMatrix(6, 2, [8.0, 2.0, 3.0, 14.0, 16.0, 18.0, 4.0, 16.0, 18.0, 20.0, 22.0, 24.0], 0)
|
[
"Adds",
"two",
"block",
"matrices",
"together",
".",
"The",
"matrices",
"must",
"have",
"the",
"same",
"size",
"and",
"matching",
"rowsPerBlock",
"and",
"colsPerBlock",
"values",
".",
"If",
"one",
"of",
"the",
"sub",
"matrix",
"blocks",
"that",
"are",
"being",
"added",
"is",
"a",
"SparseMatrix",
"the",
"resulting",
"sub",
"matrix",
"block",
"will",
"also",
"be",
"a",
"SparseMatrix",
"even",
"if",
"it",
"is",
"being",
"added",
"to",
"a",
"DenseMatrix",
".",
"If",
"two",
"dense",
"sub",
"matrix",
"blocks",
"are",
"added",
"the",
"output",
"block",
"will",
"also",
"be",
"a",
"DenseMatrix",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/distributed.py#L1186-L1217
|
19,230
|
apache/spark
|
python/pyspark/mllib/linalg/distributed.py
|
BlockMatrix.transpose
|
def transpose(self):
"""
Transpose this BlockMatrix. Returns a new BlockMatrix
instance sharing the same underlying data. Is a lazy operation.
>>> blocks = sc.parallelize([((0, 0), Matrices.dense(3, 2, [1, 2, 3, 4, 5, 6])),
... ((1, 0), Matrices.dense(3, 2, [7, 8, 9, 10, 11, 12]))])
>>> mat = BlockMatrix(blocks, 3, 2)
>>> mat_transposed = mat.transpose()
>>> mat_transposed.toLocalMatrix()
DenseMatrix(2, 6, [1.0, 4.0, 2.0, 5.0, 3.0, 6.0, 7.0, 10.0, 8.0, 11.0, 9.0, 12.0], 0)
"""
java_transposed_matrix = self._java_matrix_wrapper.call("transpose")
return BlockMatrix(java_transposed_matrix, self.colsPerBlock, self.rowsPerBlock)
|
python
|
def transpose(self):
"""
Transpose this BlockMatrix. Returns a new BlockMatrix
instance sharing the same underlying data. Is a lazy operation.
>>> blocks = sc.parallelize([((0, 0), Matrices.dense(3, 2, [1, 2, 3, 4, 5, 6])),
... ((1, 0), Matrices.dense(3, 2, [7, 8, 9, 10, 11, 12]))])
>>> mat = BlockMatrix(blocks, 3, 2)
>>> mat_transposed = mat.transpose()
>>> mat_transposed.toLocalMatrix()
DenseMatrix(2, 6, [1.0, 4.0, 2.0, 5.0, 3.0, 6.0, 7.0, 10.0, 8.0, 11.0, 9.0, 12.0], 0)
"""
java_transposed_matrix = self._java_matrix_wrapper.call("transpose")
return BlockMatrix(java_transposed_matrix, self.colsPerBlock, self.rowsPerBlock)
|
[
"def",
"transpose",
"(",
"self",
")",
":",
"java_transposed_matrix",
"=",
"self",
".",
"_java_matrix_wrapper",
".",
"call",
"(",
"\"transpose\"",
")",
"return",
"BlockMatrix",
"(",
"java_transposed_matrix",
",",
"self",
".",
"colsPerBlock",
",",
"self",
".",
"rowsPerBlock",
")"
] |
Transpose this BlockMatrix. Returns a new BlockMatrix
instance sharing the same underlying data. Is a lazy operation.
>>> blocks = sc.parallelize([((0, 0), Matrices.dense(3, 2, [1, 2, 3, 4, 5, 6])),
... ((1, 0), Matrices.dense(3, 2, [7, 8, 9, 10, 11, 12]))])
>>> mat = BlockMatrix(blocks, 3, 2)
>>> mat_transposed = mat.transpose()
>>> mat_transposed.toLocalMatrix()
DenseMatrix(2, 6, [1.0, 4.0, 2.0, 5.0, 3.0, 6.0, 7.0, 10.0, 8.0, 11.0, 9.0, 12.0], 0)
|
[
"Transpose",
"this",
"BlockMatrix",
".",
"Returns",
"a",
"new",
"BlockMatrix",
"instance",
"sharing",
"the",
"same",
"underlying",
"data",
".",
"Is",
"a",
"lazy",
"operation",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/distributed.py#L1290-L1304
|
19,231
|
apache/spark
|
python/pyspark/mllib/linalg/__init__.py
|
_vector_size
|
def _vector_size(v):
"""
Returns the size of the vector.
>>> _vector_size([1., 2., 3.])
3
>>> _vector_size((1., 2., 3.))
3
>>> _vector_size(array.array('d', [1., 2., 3.]))
3
>>> _vector_size(np.zeros(3))
3
>>> _vector_size(np.zeros((3, 1)))
3
>>> _vector_size(np.zeros((1, 3)))
Traceback (most recent call last):
...
ValueError: Cannot treat an ndarray of shape (1, 3) as a vector
"""
if isinstance(v, Vector):
return len(v)
elif type(v) in (array.array, list, tuple, xrange):
return len(v)
elif type(v) == np.ndarray:
if v.ndim == 1 or (v.ndim == 2 and v.shape[1] == 1):
return len(v)
else:
raise ValueError("Cannot treat an ndarray of shape %s as a vector" % str(v.shape))
elif _have_scipy and scipy.sparse.issparse(v):
assert v.shape[1] == 1, "Expected column vector"
return v.shape[0]
else:
raise TypeError("Cannot treat type %s as a vector" % type(v))
|
python
|
def _vector_size(v):
"""
Returns the size of the vector.
>>> _vector_size([1., 2., 3.])
3
>>> _vector_size((1., 2., 3.))
3
>>> _vector_size(array.array('d', [1., 2., 3.]))
3
>>> _vector_size(np.zeros(3))
3
>>> _vector_size(np.zeros((3, 1)))
3
>>> _vector_size(np.zeros((1, 3)))
Traceback (most recent call last):
...
ValueError: Cannot treat an ndarray of shape (1, 3) as a vector
"""
if isinstance(v, Vector):
return len(v)
elif type(v) in (array.array, list, tuple, xrange):
return len(v)
elif type(v) == np.ndarray:
if v.ndim == 1 or (v.ndim == 2 and v.shape[1] == 1):
return len(v)
else:
raise ValueError("Cannot treat an ndarray of shape %s as a vector" % str(v.shape))
elif _have_scipy and scipy.sparse.issparse(v):
assert v.shape[1] == 1, "Expected column vector"
return v.shape[0]
else:
raise TypeError("Cannot treat type %s as a vector" % type(v))
|
[
"def",
"_vector_size",
"(",
"v",
")",
":",
"if",
"isinstance",
"(",
"v",
",",
"Vector",
")",
":",
"return",
"len",
"(",
"v",
")",
"elif",
"type",
"(",
"v",
")",
"in",
"(",
"array",
".",
"array",
",",
"list",
",",
"tuple",
",",
"xrange",
")",
":",
"return",
"len",
"(",
"v",
")",
"elif",
"type",
"(",
"v",
")",
"==",
"np",
".",
"ndarray",
":",
"if",
"v",
".",
"ndim",
"==",
"1",
"or",
"(",
"v",
".",
"ndim",
"==",
"2",
"and",
"v",
".",
"shape",
"[",
"1",
"]",
"==",
"1",
")",
":",
"return",
"len",
"(",
"v",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Cannot treat an ndarray of shape %s as a vector\"",
"%",
"str",
"(",
"v",
".",
"shape",
")",
")",
"elif",
"_have_scipy",
"and",
"scipy",
".",
"sparse",
".",
"issparse",
"(",
"v",
")",
":",
"assert",
"v",
".",
"shape",
"[",
"1",
"]",
"==",
"1",
",",
"\"Expected column vector\"",
"return",
"v",
".",
"shape",
"[",
"0",
"]",
"else",
":",
"raise",
"TypeError",
"(",
"\"Cannot treat type %s as a vector\"",
"%",
"type",
"(",
"v",
")",
")"
] |
Returns the size of the vector.
>>> _vector_size([1., 2., 3.])
3
>>> _vector_size((1., 2., 3.))
3
>>> _vector_size(array.array('d', [1., 2., 3.]))
3
>>> _vector_size(np.zeros(3))
3
>>> _vector_size(np.zeros((3, 1)))
3
>>> _vector_size(np.zeros((1, 3)))
Traceback (most recent call last):
...
ValueError: Cannot treat an ndarray of shape (1, 3) as a vector
|
[
"Returns",
"the",
"size",
"of",
"the",
"vector",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/__init__.py#L86-L118
|
19,232
|
apache/spark
|
python/pyspark/mllib/linalg/__init__.py
|
DenseVector.parse
|
def parse(s):
"""
Parse string representation back into the DenseVector.
>>> DenseVector.parse(' [ 0.0,1.0,2.0, 3.0]')
DenseVector([0.0, 1.0, 2.0, 3.0])
"""
start = s.find('[')
if start == -1:
raise ValueError("Array should start with '['.")
end = s.find(']')
if end == -1:
raise ValueError("Array should end with ']'.")
s = s[start + 1: end]
try:
values = [float(val) for val in s.split(',') if val]
except ValueError:
raise ValueError("Unable to parse values from %s" % s)
return DenseVector(values)
|
python
|
def parse(s):
"""
Parse string representation back into the DenseVector.
>>> DenseVector.parse(' [ 0.0,1.0,2.0, 3.0]')
DenseVector([0.0, 1.0, 2.0, 3.0])
"""
start = s.find('[')
if start == -1:
raise ValueError("Array should start with '['.")
end = s.find(']')
if end == -1:
raise ValueError("Array should end with ']'.")
s = s[start + 1: end]
try:
values = [float(val) for val in s.split(',') if val]
except ValueError:
raise ValueError("Unable to parse values from %s" % s)
return DenseVector(values)
|
[
"def",
"parse",
"(",
"s",
")",
":",
"start",
"=",
"s",
".",
"find",
"(",
"'['",
")",
"if",
"start",
"==",
"-",
"1",
":",
"raise",
"ValueError",
"(",
"\"Array should start with '['.\"",
")",
"end",
"=",
"s",
".",
"find",
"(",
"']'",
")",
"if",
"end",
"==",
"-",
"1",
":",
"raise",
"ValueError",
"(",
"\"Array should end with ']'.\"",
")",
"s",
"=",
"s",
"[",
"start",
"+",
"1",
":",
"end",
"]",
"try",
":",
"values",
"=",
"[",
"float",
"(",
"val",
")",
"for",
"val",
"in",
"s",
".",
"split",
"(",
"','",
")",
"if",
"val",
"]",
"except",
"ValueError",
":",
"raise",
"ValueError",
"(",
"\"Unable to parse values from %s\"",
"%",
"s",
")",
"return",
"DenseVector",
"(",
"values",
")"
] |
Parse string representation back into the DenseVector.
>>> DenseVector.parse(' [ 0.0,1.0,2.0, 3.0]')
DenseVector([0.0, 1.0, 2.0, 3.0])
|
[
"Parse",
"string",
"representation",
"back",
"into",
"the",
"DenseVector",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/__init__.py#L297-L316
|
19,233
|
apache/spark
|
python/pyspark/mllib/linalg/__init__.py
|
DenseVector.squared_distance
|
def squared_distance(self, other):
"""
Squared distance of two Vectors.
>>> dense1 = DenseVector(array.array('d', [1., 2.]))
>>> dense1.squared_distance(dense1)
0.0
>>> dense2 = np.array([2., 1.])
>>> dense1.squared_distance(dense2)
2.0
>>> dense3 = [2., 1.]
>>> dense1.squared_distance(dense3)
2.0
>>> sparse1 = SparseVector(2, [0, 1], [2., 1.])
>>> dense1.squared_distance(sparse1)
2.0
>>> dense1.squared_distance([1.,])
Traceback (most recent call last):
...
AssertionError: dimension mismatch
>>> dense1.squared_distance(SparseVector(1, [0,], [1.,]))
Traceback (most recent call last):
...
AssertionError: dimension mismatch
"""
assert len(self) == _vector_size(other), "dimension mismatch"
if isinstance(other, SparseVector):
return other.squared_distance(self)
elif _have_scipy and scipy.sparse.issparse(other):
return _convert_to_vector(other).squared_distance(self)
if isinstance(other, Vector):
other = other.toArray()
elif not isinstance(other, np.ndarray):
other = np.array(other)
diff = self.toArray() - other
return np.dot(diff, diff)
|
python
|
def squared_distance(self, other):
"""
Squared distance of two Vectors.
>>> dense1 = DenseVector(array.array('d', [1., 2.]))
>>> dense1.squared_distance(dense1)
0.0
>>> dense2 = np.array([2., 1.])
>>> dense1.squared_distance(dense2)
2.0
>>> dense3 = [2., 1.]
>>> dense1.squared_distance(dense3)
2.0
>>> sparse1 = SparseVector(2, [0, 1], [2., 1.])
>>> dense1.squared_distance(sparse1)
2.0
>>> dense1.squared_distance([1.,])
Traceback (most recent call last):
...
AssertionError: dimension mismatch
>>> dense1.squared_distance(SparseVector(1, [0,], [1.,]))
Traceback (most recent call last):
...
AssertionError: dimension mismatch
"""
assert len(self) == _vector_size(other), "dimension mismatch"
if isinstance(other, SparseVector):
return other.squared_distance(self)
elif _have_scipy and scipy.sparse.issparse(other):
return _convert_to_vector(other).squared_distance(self)
if isinstance(other, Vector):
other = other.toArray()
elif not isinstance(other, np.ndarray):
other = np.array(other)
diff = self.toArray() - other
return np.dot(diff, diff)
|
[
"def",
"squared_distance",
"(",
"self",
",",
"other",
")",
":",
"assert",
"len",
"(",
"self",
")",
"==",
"_vector_size",
"(",
"other",
")",
",",
"\"dimension mismatch\"",
"if",
"isinstance",
"(",
"other",
",",
"SparseVector",
")",
":",
"return",
"other",
".",
"squared_distance",
"(",
"self",
")",
"elif",
"_have_scipy",
"and",
"scipy",
".",
"sparse",
".",
"issparse",
"(",
"other",
")",
":",
"return",
"_convert_to_vector",
"(",
"other",
")",
".",
"squared_distance",
"(",
"self",
")",
"if",
"isinstance",
"(",
"other",
",",
"Vector",
")",
":",
"other",
"=",
"other",
".",
"toArray",
"(",
")",
"elif",
"not",
"isinstance",
"(",
"other",
",",
"np",
".",
"ndarray",
")",
":",
"other",
"=",
"np",
".",
"array",
"(",
"other",
")",
"diff",
"=",
"self",
".",
"toArray",
"(",
")",
"-",
"other",
"return",
"np",
".",
"dot",
"(",
"diff",
",",
"diff",
")"
] |
Squared distance of two Vectors.
>>> dense1 = DenseVector(array.array('d', [1., 2.]))
>>> dense1.squared_distance(dense1)
0.0
>>> dense2 = np.array([2., 1.])
>>> dense1.squared_distance(dense2)
2.0
>>> dense3 = [2., 1.]
>>> dense1.squared_distance(dense3)
2.0
>>> sparse1 = SparseVector(2, [0, 1], [2., 1.])
>>> dense1.squared_distance(sparse1)
2.0
>>> dense1.squared_distance([1.,])
Traceback (most recent call last):
...
AssertionError: dimension mismatch
>>> dense1.squared_distance(SparseVector(1, [0,], [1.,]))
Traceback (most recent call last):
...
AssertionError: dimension mismatch
|
[
"Squared",
"distance",
"of",
"two",
"Vectors",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/__init__.py#L382-L418
|
19,234
|
apache/spark
|
python/pyspark/mllib/linalg/__init__.py
|
SparseVector.parse
|
def parse(s):
"""
Parse string representation back into the SparseVector.
>>> SparseVector.parse(' (4, [0,1 ],[ 4.0,5.0] )')
SparseVector(4, {0: 4.0, 1: 5.0})
"""
start = s.find('(')
if start == -1:
raise ValueError("Tuple should start with '('")
end = s.find(')')
if end == -1:
raise ValueError("Tuple should end with ')'")
s = s[start + 1: end].strip()
size = s[: s.find(',')]
try:
size = int(size)
except ValueError:
raise ValueError("Cannot parse size %s." % size)
ind_start = s.find('[')
if ind_start == -1:
raise ValueError("Indices array should start with '['.")
ind_end = s.find(']')
if ind_end == -1:
raise ValueError("Indices array should end with ']'")
new_s = s[ind_start + 1: ind_end]
ind_list = new_s.split(',')
try:
indices = [int(ind) for ind in ind_list if ind]
except ValueError:
raise ValueError("Unable to parse indices from %s." % new_s)
s = s[ind_end + 1:].strip()
val_start = s.find('[')
if val_start == -1:
raise ValueError("Values array should start with '['.")
val_end = s.find(']')
if val_end == -1:
raise ValueError("Values array should end with ']'.")
val_list = s[val_start + 1: val_end].split(',')
try:
values = [float(val) for val in val_list if val]
except ValueError:
raise ValueError("Unable to parse values from %s." % s)
return SparseVector(size, indices, values)
|
python
|
def parse(s):
"""
Parse string representation back into the SparseVector.
>>> SparseVector.parse(' (4, [0,1 ],[ 4.0,5.0] )')
SparseVector(4, {0: 4.0, 1: 5.0})
"""
start = s.find('(')
if start == -1:
raise ValueError("Tuple should start with '('")
end = s.find(')')
if end == -1:
raise ValueError("Tuple should end with ')'")
s = s[start + 1: end].strip()
size = s[: s.find(',')]
try:
size = int(size)
except ValueError:
raise ValueError("Cannot parse size %s." % size)
ind_start = s.find('[')
if ind_start == -1:
raise ValueError("Indices array should start with '['.")
ind_end = s.find(']')
if ind_end == -1:
raise ValueError("Indices array should end with ']'")
new_s = s[ind_start + 1: ind_end]
ind_list = new_s.split(',')
try:
indices = [int(ind) for ind in ind_list if ind]
except ValueError:
raise ValueError("Unable to parse indices from %s." % new_s)
s = s[ind_end + 1:].strip()
val_start = s.find('[')
if val_start == -1:
raise ValueError("Values array should start with '['.")
val_end = s.find(']')
if val_end == -1:
raise ValueError("Values array should end with ']'.")
val_list = s[val_start + 1: val_end].split(',')
try:
values = [float(val) for val in val_list if val]
except ValueError:
raise ValueError("Unable to parse values from %s." % s)
return SparseVector(size, indices, values)
|
[
"def",
"parse",
"(",
"s",
")",
":",
"start",
"=",
"s",
".",
"find",
"(",
"'('",
")",
"if",
"start",
"==",
"-",
"1",
":",
"raise",
"ValueError",
"(",
"\"Tuple should start with '('\"",
")",
"end",
"=",
"s",
".",
"find",
"(",
"')'",
")",
"if",
"end",
"==",
"-",
"1",
":",
"raise",
"ValueError",
"(",
"\"Tuple should end with ')'\"",
")",
"s",
"=",
"s",
"[",
"start",
"+",
"1",
":",
"end",
"]",
".",
"strip",
"(",
")",
"size",
"=",
"s",
"[",
":",
"s",
".",
"find",
"(",
"','",
")",
"]",
"try",
":",
"size",
"=",
"int",
"(",
"size",
")",
"except",
"ValueError",
":",
"raise",
"ValueError",
"(",
"\"Cannot parse size %s.\"",
"%",
"size",
")",
"ind_start",
"=",
"s",
".",
"find",
"(",
"'['",
")",
"if",
"ind_start",
"==",
"-",
"1",
":",
"raise",
"ValueError",
"(",
"\"Indices array should start with '['.\"",
")",
"ind_end",
"=",
"s",
".",
"find",
"(",
"']'",
")",
"if",
"ind_end",
"==",
"-",
"1",
":",
"raise",
"ValueError",
"(",
"\"Indices array should end with ']'\"",
")",
"new_s",
"=",
"s",
"[",
"ind_start",
"+",
"1",
":",
"ind_end",
"]",
"ind_list",
"=",
"new_s",
".",
"split",
"(",
"','",
")",
"try",
":",
"indices",
"=",
"[",
"int",
"(",
"ind",
")",
"for",
"ind",
"in",
"ind_list",
"if",
"ind",
"]",
"except",
"ValueError",
":",
"raise",
"ValueError",
"(",
"\"Unable to parse indices from %s.\"",
"%",
"new_s",
")",
"s",
"=",
"s",
"[",
"ind_end",
"+",
"1",
":",
"]",
".",
"strip",
"(",
")",
"val_start",
"=",
"s",
".",
"find",
"(",
"'['",
")",
"if",
"val_start",
"==",
"-",
"1",
":",
"raise",
"ValueError",
"(",
"\"Values array should start with '['.\"",
")",
"val_end",
"=",
"s",
".",
"find",
"(",
"']'",
")",
"if",
"val_end",
"==",
"-",
"1",
":",
"raise",
"ValueError",
"(",
"\"Values array should end with ']'.\"",
")",
"val_list",
"=",
"s",
"[",
"val_start",
"+",
"1",
":",
"val_end",
"]",
".",
"split",
"(",
"','",
")",
"try",
":",
"values",
"=",
"[",
"float",
"(",
"val",
")",
"for",
"val",
"in",
"val_list",
"if",
"val",
"]",
"except",
"ValueError",
":",
"raise",
"ValueError",
"(",
"\"Unable to parse values from %s.\"",
"%",
"s",
")",
"return",
"SparseVector",
"(",
"size",
",",
"indices",
",",
"values",
")"
] |
Parse string representation back into the SparseVector.
>>> SparseVector.parse(' (4, [0,1 ],[ 4.0,5.0] )')
SparseVector(4, {0: 4.0, 1: 5.0})
|
[
"Parse",
"string",
"representation",
"back",
"into",
"the",
"SparseVector",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/__init__.py#L589-L635
|
19,235
|
apache/spark
|
python/pyspark/mllib/linalg/__init__.py
|
SparseVector.dot
|
def dot(self, other):
"""
Dot product with a SparseVector or 1- or 2-dimensional Numpy array.
>>> a = SparseVector(4, [1, 3], [3.0, 4.0])
>>> a.dot(a)
25.0
>>> a.dot(array.array('d', [1., 2., 3., 4.]))
22.0
>>> b = SparseVector(4, [2], [1.0])
>>> a.dot(b)
0.0
>>> a.dot(np.array([[1, 1], [2, 2], [3, 3], [4, 4]]))
array([ 22., 22.])
>>> a.dot([1., 2., 3.])
Traceback (most recent call last):
...
AssertionError: dimension mismatch
>>> a.dot(np.array([1., 2.]))
Traceback (most recent call last):
...
AssertionError: dimension mismatch
>>> a.dot(DenseVector([1., 2.]))
Traceback (most recent call last):
...
AssertionError: dimension mismatch
>>> a.dot(np.zeros((3, 2)))
Traceback (most recent call last):
...
AssertionError: dimension mismatch
"""
if isinstance(other, np.ndarray):
if other.ndim not in [2, 1]:
raise ValueError("Cannot call dot with %d-dimensional array" % other.ndim)
assert len(self) == other.shape[0], "dimension mismatch"
return np.dot(self.values, other[self.indices])
assert len(self) == _vector_size(other), "dimension mismatch"
if isinstance(other, DenseVector):
return np.dot(other.array[self.indices], self.values)
elif isinstance(other, SparseVector):
# Find out common indices.
self_cmind = np.in1d(self.indices, other.indices, assume_unique=True)
self_values = self.values[self_cmind]
if self_values.size == 0:
return 0.0
else:
other_cmind = np.in1d(other.indices, self.indices, assume_unique=True)
return np.dot(self_values, other.values[other_cmind])
else:
return self.dot(_convert_to_vector(other))
|
python
|
def dot(self, other):
"""
Dot product with a SparseVector or 1- or 2-dimensional Numpy array.
>>> a = SparseVector(4, [1, 3], [3.0, 4.0])
>>> a.dot(a)
25.0
>>> a.dot(array.array('d', [1., 2., 3., 4.]))
22.0
>>> b = SparseVector(4, [2], [1.0])
>>> a.dot(b)
0.0
>>> a.dot(np.array([[1, 1], [2, 2], [3, 3], [4, 4]]))
array([ 22., 22.])
>>> a.dot([1., 2., 3.])
Traceback (most recent call last):
...
AssertionError: dimension mismatch
>>> a.dot(np.array([1., 2.]))
Traceback (most recent call last):
...
AssertionError: dimension mismatch
>>> a.dot(DenseVector([1., 2.]))
Traceback (most recent call last):
...
AssertionError: dimension mismatch
>>> a.dot(np.zeros((3, 2)))
Traceback (most recent call last):
...
AssertionError: dimension mismatch
"""
if isinstance(other, np.ndarray):
if other.ndim not in [2, 1]:
raise ValueError("Cannot call dot with %d-dimensional array" % other.ndim)
assert len(self) == other.shape[0], "dimension mismatch"
return np.dot(self.values, other[self.indices])
assert len(self) == _vector_size(other), "dimension mismatch"
if isinstance(other, DenseVector):
return np.dot(other.array[self.indices], self.values)
elif isinstance(other, SparseVector):
# Find out common indices.
self_cmind = np.in1d(self.indices, other.indices, assume_unique=True)
self_values = self.values[self_cmind]
if self_values.size == 0:
return 0.0
else:
other_cmind = np.in1d(other.indices, self.indices, assume_unique=True)
return np.dot(self_values, other.values[other_cmind])
else:
return self.dot(_convert_to_vector(other))
|
[
"def",
"dot",
"(",
"self",
",",
"other",
")",
":",
"if",
"isinstance",
"(",
"other",
",",
"np",
".",
"ndarray",
")",
":",
"if",
"other",
".",
"ndim",
"not",
"in",
"[",
"2",
",",
"1",
"]",
":",
"raise",
"ValueError",
"(",
"\"Cannot call dot with %d-dimensional array\"",
"%",
"other",
".",
"ndim",
")",
"assert",
"len",
"(",
"self",
")",
"==",
"other",
".",
"shape",
"[",
"0",
"]",
",",
"\"dimension mismatch\"",
"return",
"np",
".",
"dot",
"(",
"self",
".",
"values",
",",
"other",
"[",
"self",
".",
"indices",
"]",
")",
"assert",
"len",
"(",
"self",
")",
"==",
"_vector_size",
"(",
"other",
")",
",",
"\"dimension mismatch\"",
"if",
"isinstance",
"(",
"other",
",",
"DenseVector",
")",
":",
"return",
"np",
".",
"dot",
"(",
"other",
".",
"array",
"[",
"self",
".",
"indices",
"]",
",",
"self",
".",
"values",
")",
"elif",
"isinstance",
"(",
"other",
",",
"SparseVector",
")",
":",
"# Find out common indices.",
"self_cmind",
"=",
"np",
".",
"in1d",
"(",
"self",
".",
"indices",
",",
"other",
".",
"indices",
",",
"assume_unique",
"=",
"True",
")",
"self_values",
"=",
"self",
".",
"values",
"[",
"self_cmind",
"]",
"if",
"self_values",
".",
"size",
"==",
"0",
":",
"return",
"0.0",
"else",
":",
"other_cmind",
"=",
"np",
".",
"in1d",
"(",
"other",
".",
"indices",
",",
"self",
".",
"indices",
",",
"assume_unique",
"=",
"True",
")",
"return",
"np",
".",
"dot",
"(",
"self_values",
",",
"other",
".",
"values",
"[",
"other_cmind",
"]",
")",
"else",
":",
"return",
"self",
".",
"dot",
"(",
"_convert_to_vector",
"(",
"other",
")",
")"
] |
Dot product with a SparseVector or 1- or 2-dimensional Numpy array.
>>> a = SparseVector(4, [1, 3], [3.0, 4.0])
>>> a.dot(a)
25.0
>>> a.dot(array.array('d', [1., 2., 3., 4.]))
22.0
>>> b = SparseVector(4, [2], [1.0])
>>> a.dot(b)
0.0
>>> a.dot(np.array([[1, 1], [2, 2], [3, 3], [4, 4]]))
array([ 22., 22.])
>>> a.dot([1., 2., 3.])
Traceback (most recent call last):
...
AssertionError: dimension mismatch
>>> a.dot(np.array([1., 2.]))
Traceback (most recent call last):
...
AssertionError: dimension mismatch
>>> a.dot(DenseVector([1., 2.]))
Traceback (most recent call last):
...
AssertionError: dimension mismatch
>>> a.dot(np.zeros((3, 2)))
Traceback (most recent call last):
...
AssertionError: dimension mismatch
|
[
"Dot",
"product",
"with",
"a",
"SparseVector",
"or",
"1",
"-",
"or",
"2",
"-",
"dimensional",
"Numpy",
"array",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/__init__.py#L637-L691
|
19,236
|
apache/spark
|
python/pyspark/mllib/linalg/__init__.py
|
SparseVector.squared_distance
|
def squared_distance(self, other):
"""
Squared distance from a SparseVector or 1-dimensional NumPy array.
>>> a = SparseVector(4, [1, 3], [3.0, 4.0])
>>> a.squared_distance(a)
0.0
>>> a.squared_distance(array.array('d', [1., 2., 3., 4.]))
11.0
>>> a.squared_distance(np.array([1., 2., 3., 4.]))
11.0
>>> b = SparseVector(4, [2], [1.0])
>>> a.squared_distance(b)
26.0
>>> b.squared_distance(a)
26.0
>>> b.squared_distance([1., 2.])
Traceback (most recent call last):
...
AssertionError: dimension mismatch
>>> b.squared_distance(SparseVector(3, [1,], [1.0,]))
Traceback (most recent call last):
...
AssertionError: dimension mismatch
"""
assert len(self) == _vector_size(other), "dimension mismatch"
if isinstance(other, np.ndarray) or isinstance(other, DenseVector):
if isinstance(other, np.ndarray) and other.ndim != 1:
raise Exception("Cannot call squared_distance with %d-dimensional array" %
other.ndim)
if isinstance(other, DenseVector):
other = other.array
sparse_ind = np.zeros(other.size, dtype=bool)
sparse_ind[self.indices] = True
dist = other[sparse_ind] - self.values
result = np.dot(dist, dist)
other_ind = other[~sparse_ind]
result += np.dot(other_ind, other_ind)
return result
elif isinstance(other, SparseVector):
result = 0.0
i, j = 0, 0
while i < len(self.indices) and j < len(other.indices):
if self.indices[i] == other.indices[j]:
diff = self.values[i] - other.values[j]
result += diff * diff
i += 1
j += 1
elif self.indices[i] < other.indices[j]:
result += self.values[i] * self.values[i]
i += 1
else:
result += other.values[j] * other.values[j]
j += 1
while i < len(self.indices):
result += self.values[i] * self.values[i]
i += 1
while j < len(other.indices):
result += other.values[j] * other.values[j]
j += 1
return result
else:
return self.squared_distance(_convert_to_vector(other))
|
python
|
def squared_distance(self, other):
"""
Squared distance from a SparseVector or 1-dimensional NumPy array.
>>> a = SparseVector(4, [1, 3], [3.0, 4.0])
>>> a.squared_distance(a)
0.0
>>> a.squared_distance(array.array('d', [1., 2., 3., 4.]))
11.0
>>> a.squared_distance(np.array([1., 2., 3., 4.]))
11.0
>>> b = SparseVector(4, [2], [1.0])
>>> a.squared_distance(b)
26.0
>>> b.squared_distance(a)
26.0
>>> b.squared_distance([1., 2.])
Traceback (most recent call last):
...
AssertionError: dimension mismatch
>>> b.squared_distance(SparseVector(3, [1,], [1.0,]))
Traceback (most recent call last):
...
AssertionError: dimension mismatch
"""
assert len(self) == _vector_size(other), "dimension mismatch"
if isinstance(other, np.ndarray) or isinstance(other, DenseVector):
if isinstance(other, np.ndarray) and other.ndim != 1:
raise Exception("Cannot call squared_distance with %d-dimensional array" %
other.ndim)
if isinstance(other, DenseVector):
other = other.array
sparse_ind = np.zeros(other.size, dtype=bool)
sparse_ind[self.indices] = True
dist = other[sparse_ind] - self.values
result = np.dot(dist, dist)
other_ind = other[~sparse_ind]
result += np.dot(other_ind, other_ind)
return result
elif isinstance(other, SparseVector):
result = 0.0
i, j = 0, 0
while i < len(self.indices) and j < len(other.indices):
if self.indices[i] == other.indices[j]:
diff = self.values[i] - other.values[j]
result += diff * diff
i += 1
j += 1
elif self.indices[i] < other.indices[j]:
result += self.values[i] * self.values[i]
i += 1
else:
result += other.values[j] * other.values[j]
j += 1
while i < len(self.indices):
result += self.values[i] * self.values[i]
i += 1
while j < len(other.indices):
result += other.values[j] * other.values[j]
j += 1
return result
else:
return self.squared_distance(_convert_to_vector(other))
|
[
"def",
"squared_distance",
"(",
"self",
",",
"other",
")",
":",
"assert",
"len",
"(",
"self",
")",
"==",
"_vector_size",
"(",
"other",
")",
",",
"\"dimension mismatch\"",
"if",
"isinstance",
"(",
"other",
",",
"np",
".",
"ndarray",
")",
"or",
"isinstance",
"(",
"other",
",",
"DenseVector",
")",
":",
"if",
"isinstance",
"(",
"other",
",",
"np",
".",
"ndarray",
")",
"and",
"other",
".",
"ndim",
"!=",
"1",
":",
"raise",
"Exception",
"(",
"\"Cannot call squared_distance with %d-dimensional array\"",
"%",
"other",
".",
"ndim",
")",
"if",
"isinstance",
"(",
"other",
",",
"DenseVector",
")",
":",
"other",
"=",
"other",
".",
"array",
"sparse_ind",
"=",
"np",
".",
"zeros",
"(",
"other",
".",
"size",
",",
"dtype",
"=",
"bool",
")",
"sparse_ind",
"[",
"self",
".",
"indices",
"]",
"=",
"True",
"dist",
"=",
"other",
"[",
"sparse_ind",
"]",
"-",
"self",
".",
"values",
"result",
"=",
"np",
".",
"dot",
"(",
"dist",
",",
"dist",
")",
"other_ind",
"=",
"other",
"[",
"~",
"sparse_ind",
"]",
"result",
"+=",
"np",
".",
"dot",
"(",
"other_ind",
",",
"other_ind",
")",
"return",
"result",
"elif",
"isinstance",
"(",
"other",
",",
"SparseVector",
")",
":",
"result",
"=",
"0.0",
"i",
",",
"j",
"=",
"0",
",",
"0",
"while",
"i",
"<",
"len",
"(",
"self",
".",
"indices",
")",
"and",
"j",
"<",
"len",
"(",
"other",
".",
"indices",
")",
":",
"if",
"self",
".",
"indices",
"[",
"i",
"]",
"==",
"other",
".",
"indices",
"[",
"j",
"]",
":",
"diff",
"=",
"self",
".",
"values",
"[",
"i",
"]",
"-",
"other",
".",
"values",
"[",
"j",
"]",
"result",
"+=",
"diff",
"*",
"diff",
"i",
"+=",
"1",
"j",
"+=",
"1",
"elif",
"self",
".",
"indices",
"[",
"i",
"]",
"<",
"other",
".",
"indices",
"[",
"j",
"]",
":",
"result",
"+=",
"self",
".",
"values",
"[",
"i",
"]",
"*",
"self",
".",
"values",
"[",
"i",
"]",
"i",
"+=",
"1",
"else",
":",
"result",
"+=",
"other",
".",
"values",
"[",
"j",
"]",
"*",
"other",
".",
"values",
"[",
"j",
"]",
"j",
"+=",
"1",
"while",
"i",
"<",
"len",
"(",
"self",
".",
"indices",
")",
":",
"result",
"+=",
"self",
".",
"values",
"[",
"i",
"]",
"*",
"self",
".",
"values",
"[",
"i",
"]",
"i",
"+=",
"1",
"while",
"j",
"<",
"len",
"(",
"other",
".",
"indices",
")",
":",
"result",
"+=",
"other",
".",
"values",
"[",
"j",
"]",
"*",
"other",
".",
"values",
"[",
"j",
"]",
"j",
"+=",
"1",
"return",
"result",
"else",
":",
"return",
"self",
".",
"squared_distance",
"(",
"_convert_to_vector",
"(",
"other",
")",
")"
] |
Squared distance from a SparseVector or 1-dimensional NumPy array.
>>> a = SparseVector(4, [1, 3], [3.0, 4.0])
>>> a.squared_distance(a)
0.0
>>> a.squared_distance(array.array('d', [1., 2., 3., 4.]))
11.0
>>> a.squared_distance(np.array([1., 2., 3., 4.]))
11.0
>>> b = SparseVector(4, [2], [1.0])
>>> a.squared_distance(b)
26.0
>>> b.squared_distance(a)
26.0
>>> b.squared_distance([1., 2.])
Traceback (most recent call last):
...
AssertionError: dimension mismatch
>>> b.squared_distance(SparseVector(3, [1,], [1.0,]))
Traceback (most recent call last):
...
AssertionError: dimension mismatch
|
[
"Squared",
"distance",
"from",
"a",
"SparseVector",
"or",
"1",
"-",
"dimensional",
"NumPy",
"array",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/__init__.py#L693-L758
|
19,237
|
apache/spark
|
python/pyspark/mllib/linalg/__init__.py
|
SparseVector.toArray
|
def toArray(self):
"""
Returns a copy of this SparseVector as a 1-dimensional NumPy array.
"""
arr = np.zeros((self.size,), dtype=np.float64)
arr[self.indices] = self.values
return arr
|
python
|
def toArray(self):
"""
Returns a copy of this SparseVector as a 1-dimensional NumPy array.
"""
arr = np.zeros((self.size,), dtype=np.float64)
arr[self.indices] = self.values
return arr
|
[
"def",
"toArray",
"(",
"self",
")",
":",
"arr",
"=",
"np",
".",
"zeros",
"(",
"(",
"self",
".",
"size",
",",
")",
",",
"dtype",
"=",
"np",
".",
"float64",
")",
"arr",
"[",
"self",
".",
"indices",
"]",
"=",
"self",
".",
"values",
"return",
"arr"
] |
Returns a copy of this SparseVector as a 1-dimensional NumPy array.
|
[
"Returns",
"a",
"copy",
"of",
"this",
"SparseVector",
"as",
"a",
"1",
"-",
"dimensional",
"NumPy",
"array",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/__init__.py#L760-L766
|
19,238
|
apache/spark
|
python/pyspark/mllib/linalg/__init__.py
|
SparseVector.asML
|
def asML(self):
"""
Convert this vector to the new mllib-local representation.
This does NOT copy the data; it copies references.
:return: :py:class:`pyspark.ml.linalg.SparseVector`
.. versionadded:: 2.0.0
"""
return newlinalg.SparseVector(self.size, self.indices, self.values)
|
python
|
def asML(self):
"""
Convert this vector to the new mllib-local representation.
This does NOT copy the data; it copies references.
:return: :py:class:`pyspark.ml.linalg.SparseVector`
.. versionadded:: 2.0.0
"""
return newlinalg.SparseVector(self.size, self.indices, self.values)
|
[
"def",
"asML",
"(",
"self",
")",
":",
"return",
"newlinalg",
".",
"SparseVector",
"(",
"self",
".",
"size",
",",
"self",
".",
"indices",
",",
"self",
".",
"values",
")"
] |
Convert this vector to the new mllib-local representation.
This does NOT copy the data; it copies references.
:return: :py:class:`pyspark.ml.linalg.SparseVector`
.. versionadded:: 2.0.0
|
[
"Convert",
"this",
"vector",
"to",
"the",
"new",
"mllib",
"-",
"local",
"representation",
".",
"This",
"does",
"NOT",
"copy",
"the",
"data",
";",
"it",
"copies",
"references",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/__init__.py#L768-L777
|
19,239
|
apache/spark
|
python/pyspark/mllib/linalg/__init__.py
|
Vectors.dense
|
def dense(*elements):
"""
Create a dense vector of 64-bit floats from a Python list or numbers.
>>> Vectors.dense([1, 2, 3])
DenseVector([1.0, 2.0, 3.0])
>>> Vectors.dense(1.0, 2.0)
DenseVector([1.0, 2.0])
"""
if len(elements) == 1 and not isinstance(elements[0], (float, int, long)):
# it's list, numpy.array or other iterable object.
elements = elements[0]
return DenseVector(elements)
|
python
|
def dense(*elements):
"""
Create a dense vector of 64-bit floats from a Python list or numbers.
>>> Vectors.dense([1, 2, 3])
DenseVector([1.0, 2.0, 3.0])
>>> Vectors.dense(1.0, 2.0)
DenseVector([1.0, 2.0])
"""
if len(elements) == 1 and not isinstance(elements[0], (float, int, long)):
# it's list, numpy.array or other iterable object.
elements = elements[0]
return DenseVector(elements)
|
[
"def",
"dense",
"(",
"*",
"elements",
")",
":",
"if",
"len",
"(",
"elements",
")",
"==",
"1",
"and",
"not",
"isinstance",
"(",
"elements",
"[",
"0",
"]",
",",
"(",
"float",
",",
"int",
",",
"long",
")",
")",
":",
"# it's list, numpy.array or other iterable object.",
"elements",
"=",
"elements",
"[",
"0",
"]",
"return",
"DenseVector",
"(",
"elements",
")"
] |
Create a dense vector of 64-bit floats from a Python list or numbers.
>>> Vectors.dense([1, 2, 3])
DenseVector([1.0, 2.0, 3.0])
>>> Vectors.dense(1.0, 2.0)
DenseVector([1.0, 2.0])
|
[
"Create",
"a",
"dense",
"vector",
"of",
"64",
"-",
"bit",
"floats",
"from",
"a",
"Python",
"list",
"or",
"numbers",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/__init__.py#L874-L886
|
19,240
|
apache/spark
|
python/pyspark/mllib/linalg/__init__.py
|
Vectors.fromML
|
def fromML(vec):
"""
Convert a vector from the new mllib-local representation.
This does NOT copy the data; it copies references.
:param vec: a :py:class:`pyspark.ml.linalg.Vector`
:return: a :py:class:`pyspark.mllib.linalg.Vector`
.. versionadded:: 2.0.0
"""
if isinstance(vec, newlinalg.DenseVector):
return DenseVector(vec.array)
elif isinstance(vec, newlinalg.SparseVector):
return SparseVector(vec.size, vec.indices, vec.values)
else:
raise TypeError("Unsupported vector type %s" % type(vec))
|
python
|
def fromML(vec):
"""
Convert a vector from the new mllib-local representation.
This does NOT copy the data; it copies references.
:param vec: a :py:class:`pyspark.ml.linalg.Vector`
:return: a :py:class:`pyspark.mllib.linalg.Vector`
.. versionadded:: 2.0.0
"""
if isinstance(vec, newlinalg.DenseVector):
return DenseVector(vec.array)
elif isinstance(vec, newlinalg.SparseVector):
return SparseVector(vec.size, vec.indices, vec.values)
else:
raise TypeError("Unsupported vector type %s" % type(vec))
|
[
"def",
"fromML",
"(",
"vec",
")",
":",
"if",
"isinstance",
"(",
"vec",
",",
"newlinalg",
".",
"DenseVector",
")",
":",
"return",
"DenseVector",
"(",
"vec",
".",
"array",
")",
"elif",
"isinstance",
"(",
"vec",
",",
"newlinalg",
".",
"SparseVector",
")",
":",
"return",
"SparseVector",
"(",
"vec",
".",
"size",
",",
"vec",
".",
"indices",
",",
"vec",
".",
"values",
")",
"else",
":",
"raise",
"TypeError",
"(",
"\"Unsupported vector type %s\"",
"%",
"type",
"(",
"vec",
")",
")"
] |
Convert a vector from the new mllib-local representation.
This does NOT copy the data; it copies references.
:param vec: a :py:class:`pyspark.ml.linalg.Vector`
:return: a :py:class:`pyspark.mllib.linalg.Vector`
.. versionadded:: 2.0.0
|
[
"Convert",
"a",
"vector",
"from",
"the",
"new",
"mllib",
"-",
"local",
"representation",
".",
"This",
"does",
"NOT",
"copy",
"the",
"data",
";",
"it",
"copies",
"references",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/__init__.py#L889-L904
|
19,241
|
apache/spark
|
python/pyspark/mllib/linalg/__init__.py
|
Vectors.squared_distance
|
def squared_distance(v1, v2):
"""
Squared distance between two vectors.
a and b can be of type SparseVector, DenseVector, np.ndarray
or array.array.
>>> a = Vectors.sparse(4, [(0, 1), (3, 4)])
>>> b = Vectors.dense([2, 5, 4, 1])
>>> a.squared_distance(b)
51.0
"""
v1, v2 = _convert_to_vector(v1), _convert_to_vector(v2)
return v1.squared_distance(v2)
|
python
|
def squared_distance(v1, v2):
"""
Squared distance between two vectors.
a and b can be of type SparseVector, DenseVector, np.ndarray
or array.array.
>>> a = Vectors.sparse(4, [(0, 1), (3, 4)])
>>> b = Vectors.dense([2, 5, 4, 1])
>>> a.squared_distance(b)
51.0
"""
v1, v2 = _convert_to_vector(v1), _convert_to_vector(v2)
return v1.squared_distance(v2)
|
[
"def",
"squared_distance",
"(",
"v1",
",",
"v2",
")",
":",
"v1",
",",
"v2",
"=",
"_convert_to_vector",
"(",
"v1",
")",
",",
"_convert_to_vector",
"(",
"v2",
")",
"return",
"v1",
".",
"squared_distance",
"(",
"v2",
")"
] |
Squared distance between two vectors.
a and b can be of type SparseVector, DenseVector, np.ndarray
or array.array.
>>> a = Vectors.sparse(4, [(0, 1), (3, 4)])
>>> b = Vectors.dense([2, 5, 4, 1])
>>> a.squared_distance(b)
51.0
|
[
"Squared",
"distance",
"between",
"two",
"vectors",
".",
"a",
"and",
"b",
"can",
"be",
"of",
"type",
"SparseVector",
"DenseVector",
"np",
".",
"ndarray",
"or",
"array",
".",
"array",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/__init__.py#L920-L932
|
19,242
|
apache/spark
|
python/pyspark/mllib/linalg/__init__.py
|
Vectors.parse
|
def parse(s):
"""Parse a string representation back into the Vector.
>>> Vectors.parse('[2,1,2 ]')
DenseVector([2.0, 1.0, 2.0])
>>> Vectors.parse(' ( 100, [0], [2])')
SparseVector(100, {0: 2.0})
"""
if s.find('(') == -1 and s.find('[') != -1:
return DenseVector.parse(s)
elif s.find('(') != -1:
return SparseVector.parse(s)
else:
raise ValueError(
"Cannot find tokens '[' or '(' from the input string.")
|
python
|
def parse(s):
"""Parse a string representation back into the Vector.
>>> Vectors.parse('[2,1,2 ]')
DenseVector([2.0, 1.0, 2.0])
>>> Vectors.parse(' ( 100, [0], [2])')
SparseVector(100, {0: 2.0})
"""
if s.find('(') == -1 and s.find('[') != -1:
return DenseVector.parse(s)
elif s.find('(') != -1:
return SparseVector.parse(s)
else:
raise ValueError(
"Cannot find tokens '[' or '(' from the input string.")
|
[
"def",
"parse",
"(",
"s",
")",
":",
"if",
"s",
".",
"find",
"(",
"'('",
")",
"==",
"-",
"1",
"and",
"s",
".",
"find",
"(",
"'['",
")",
"!=",
"-",
"1",
":",
"return",
"DenseVector",
".",
"parse",
"(",
"s",
")",
"elif",
"s",
".",
"find",
"(",
"'('",
")",
"!=",
"-",
"1",
":",
"return",
"SparseVector",
".",
"parse",
"(",
"s",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Cannot find tokens '[' or '(' from the input string.\"",
")"
] |
Parse a string representation back into the Vector.
>>> Vectors.parse('[2,1,2 ]')
DenseVector([2.0, 1.0, 2.0])
>>> Vectors.parse(' ( 100, [0], [2])')
SparseVector(100, {0: 2.0})
|
[
"Parse",
"a",
"string",
"representation",
"back",
"into",
"the",
"Vector",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/__init__.py#L942-L956
|
19,243
|
apache/spark
|
python/pyspark/mllib/linalg/__init__.py
|
Matrix._convert_to_array
|
def _convert_to_array(array_like, dtype):
"""
Convert Matrix attributes which are array-like or buffer to array.
"""
if isinstance(array_like, bytes):
return np.frombuffer(array_like, dtype=dtype)
return np.asarray(array_like, dtype=dtype)
|
python
|
def _convert_to_array(array_like, dtype):
"""
Convert Matrix attributes which are array-like or buffer to array.
"""
if isinstance(array_like, bytes):
return np.frombuffer(array_like, dtype=dtype)
return np.asarray(array_like, dtype=dtype)
|
[
"def",
"_convert_to_array",
"(",
"array_like",
",",
"dtype",
")",
":",
"if",
"isinstance",
"(",
"array_like",
",",
"bytes",
")",
":",
"return",
"np",
".",
"frombuffer",
"(",
"array_like",
",",
"dtype",
"=",
"dtype",
")",
"return",
"np",
".",
"asarray",
"(",
"array_like",
",",
"dtype",
"=",
"dtype",
")"
] |
Convert Matrix attributes which are array-like or buffer to array.
|
[
"Convert",
"Matrix",
"attributes",
"which",
"are",
"array",
"-",
"like",
"or",
"buffer",
"to",
"array",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/__init__.py#L1014-L1020
|
19,244
|
apache/spark
|
python/pyspark/mllib/linalg/__init__.py
|
DenseMatrix.toSparse
|
def toSparse(self):
"""Convert to SparseMatrix"""
if self.isTransposed:
values = np.ravel(self.toArray(), order='F')
else:
values = self.values
indices = np.nonzero(values)[0]
colCounts = np.bincount(indices // self.numRows)
colPtrs = np.cumsum(np.hstack(
(0, colCounts, np.zeros(self.numCols - colCounts.size))))
values = values[indices]
rowIndices = indices % self.numRows
return SparseMatrix(self.numRows, self.numCols, colPtrs, rowIndices, values)
|
python
|
def toSparse(self):
"""Convert to SparseMatrix"""
if self.isTransposed:
values = np.ravel(self.toArray(), order='F')
else:
values = self.values
indices = np.nonzero(values)[0]
colCounts = np.bincount(indices // self.numRows)
colPtrs = np.cumsum(np.hstack(
(0, colCounts, np.zeros(self.numCols - colCounts.size))))
values = values[indices]
rowIndices = indices % self.numRows
return SparseMatrix(self.numRows, self.numCols, colPtrs, rowIndices, values)
|
[
"def",
"toSparse",
"(",
"self",
")",
":",
"if",
"self",
".",
"isTransposed",
":",
"values",
"=",
"np",
".",
"ravel",
"(",
"self",
".",
"toArray",
"(",
")",
",",
"order",
"=",
"'F'",
")",
"else",
":",
"values",
"=",
"self",
".",
"values",
"indices",
"=",
"np",
".",
"nonzero",
"(",
"values",
")",
"[",
"0",
"]",
"colCounts",
"=",
"np",
".",
"bincount",
"(",
"indices",
"//",
"self",
".",
"numRows",
")",
"colPtrs",
"=",
"np",
".",
"cumsum",
"(",
"np",
".",
"hstack",
"(",
"(",
"0",
",",
"colCounts",
",",
"np",
".",
"zeros",
"(",
"self",
".",
"numCols",
"-",
"colCounts",
".",
"size",
")",
")",
")",
")",
"values",
"=",
"values",
"[",
"indices",
"]",
"rowIndices",
"=",
"indices",
"%",
"self",
".",
"numRows",
"return",
"SparseMatrix",
"(",
"self",
".",
"numRows",
",",
"self",
".",
"numCols",
",",
"colPtrs",
",",
"rowIndices",
",",
"values",
")"
] |
Convert to SparseMatrix
|
[
"Convert",
"to",
"SparseMatrix"
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/__init__.py#L1097-L1110
|
19,245
|
apache/spark
|
python/pyspark/mllib/linalg/__init__.py
|
Matrices.sparse
|
def sparse(numRows, numCols, colPtrs, rowIndices, values):
"""
Create a SparseMatrix
"""
return SparseMatrix(numRows, numCols, colPtrs, rowIndices, values)
|
python
|
def sparse(numRows, numCols, colPtrs, rowIndices, values):
"""
Create a SparseMatrix
"""
return SparseMatrix(numRows, numCols, colPtrs, rowIndices, values)
|
[
"def",
"sparse",
"(",
"numRows",
",",
"numCols",
",",
"colPtrs",
",",
"rowIndices",
",",
"values",
")",
":",
"return",
"SparseMatrix",
"(",
"numRows",
",",
"numCols",
",",
"colPtrs",
",",
"rowIndices",
",",
"values",
")"
] |
Create a SparseMatrix
|
[
"Create",
"a",
"SparseMatrix"
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/__init__.py#L1321-L1325
|
19,246
|
apache/spark
|
python/pyspark/mllib/linalg/__init__.py
|
Matrices.fromML
|
def fromML(mat):
"""
Convert a matrix from the new mllib-local representation.
This does NOT copy the data; it copies references.
:param mat: a :py:class:`pyspark.ml.linalg.Matrix`
:return: a :py:class:`pyspark.mllib.linalg.Matrix`
.. versionadded:: 2.0.0
"""
if isinstance(mat, newlinalg.DenseMatrix):
return DenseMatrix(mat.numRows, mat.numCols, mat.values, mat.isTransposed)
elif isinstance(mat, newlinalg.SparseMatrix):
return SparseMatrix(mat.numRows, mat.numCols, mat.colPtrs, mat.rowIndices,
mat.values, mat.isTransposed)
else:
raise TypeError("Unsupported matrix type %s" % type(mat))
|
python
|
def fromML(mat):
"""
Convert a matrix from the new mllib-local representation.
This does NOT copy the data; it copies references.
:param mat: a :py:class:`pyspark.ml.linalg.Matrix`
:return: a :py:class:`pyspark.mllib.linalg.Matrix`
.. versionadded:: 2.0.0
"""
if isinstance(mat, newlinalg.DenseMatrix):
return DenseMatrix(mat.numRows, mat.numCols, mat.values, mat.isTransposed)
elif isinstance(mat, newlinalg.SparseMatrix):
return SparseMatrix(mat.numRows, mat.numCols, mat.colPtrs, mat.rowIndices,
mat.values, mat.isTransposed)
else:
raise TypeError("Unsupported matrix type %s" % type(mat))
|
[
"def",
"fromML",
"(",
"mat",
")",
":",
"if",
"isinstance",
"(",
"mat",
",",
"newlinalg",
".",
"DenseMatrix",
")",
":",
"return",
"DenseMatrix",
"(",
"mat",
".",
"numRows",
",",
"mat",
".",
"numCols",
",",
"mat",
".",
"values",
",",
"mat",
".",
"isTransposed",
")",
"elif",
"isinstance",
"(",
"mat",
",",
"newlinalg",
".",
"SparseMatrix",
")",
":",
"return",
"SparseMatrix",
"(",
"mat",
".",
"numRows",
",",
"mat",
".",
"numCols",
",",
"mat",
".",
"colPtrs",
",",
"mat",
".",
"rowIndices",
",",
"mat",
".",
"values",
",",
"mat",
".",
"isTransposed",
")",
"else",
":",
"raise",
"TypeError",
"(",
"\"Unsupported matrix type %s\"",
"%",
"type",
"(",
"mat",
")",
")"
] |
Convert a matrix from the new mllib-local representation.
This does NOT copy the data; it copies references.
:param mat: a :py:class:`pyspark.ml.linalg.Matrix`
:return: a :py:class:`pyspark.mllib.linalg.Matrix`
.. versionadded:: 2.0.0
|
[
"Convert",
"a",
"matrix",
"from",
"the",
"new",
"mllib",
"-",
"local",
"representation",
".",
"This",
"does",
"NOT",
"copy",
"the",
"data",
";",
"it",
"copies",
"references",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/__init__.py#L1328-L1344
|
19,247
|
apache/spark
|
python/pyspark/ml/feature.py
|
StringIndexerModel.from_labels
|
def from_labels(cls, labels, inputCol, outputCol=None, handleInvalid=None):
"""
Construct the model directly from an array of label strings,
requires an active SparkContext.
"""
sc = SparkContext._active_spark_context
java_class = sc._gateway.jvm.java.lang.String
jlabels = StringIndexerModel._new_java_array(labels, java_class)
model = StringIndexerModel._create_from_java_class(
"org.apache.spark.ml.feature.StringIndexerModel", jlabels)
model.setInputCol(inputCol)
if outputCol is not None:
model.setOutputCol(outputCol)
if handleInvalid is not None:
model.setHandleInvalid(handleInvalid)
return model
|
python
|
def from_labels(cls, labels, inputCol, outputCol=None, handleInvalid=None):
"""
Construct the model directly from an array of label strings,
requires an active SparkContext.
"""
sc = SparkContext._active_spark_context
java_class = sc._gateway.jvm.java.lang.String
jlabels = StringIndexerModel._new_java_array(labels, java_class)
model = StringIndexerModel._create_from_java_class(
"org.apache.spark.ml.feature.StringIndexerModel", jlabels)
model.setInputCol(inputCol)
if outputCol is not None:
model.setOutputCol(outputCol)
if handleInvalid is not None:
model.setHandleInvalid(handleInvalid)
return model
|
[
"def",
"from_labels",
"(",
"cls",
",",
"labels",
",",
"inputCol",
",",
"outputCol",
"=",
"None",
",",
"handleInvalid",
"=",
"None",
")",
":",
"sc",
"=",
"SparkContext",
".",
"_active_spark_context",
"java_class",
"=",
"sc",
".",
"_gateway",
".",
"jvm",
".",
"java",
".",
"lang",
".",
"String",
"jlabels",
"=",
"StringIndexerModel",
".",
"_new_java_array",
"(",
"labels",
",",
"java_class",
")",
"model",
"=",
"StringIndexerModel",
".",
"_create_from_java_class",
"(",
"\"org.apache.spark.ml.feature.StringIndexerModel\"",
",",
"jlabels",
")",
"model",
".",
"setInputCol",
"(",
"inputCol",
")",
"if",
"outputCol",
"is",
"not",
"None",
":",
"model",
".",
"setOutputCol",
"(",
"outputCol",
")",
"if",
"handleInvalid",
"is",
"not",
"None",
":",
"model",
".",
"setHandleInvalid",
"(",
"handleInvalid",
")",
"return",
"model"
] |
Construct the model directly from an array of label strings,
requires an active SparkContext.
|
[
"Construct",
"the",
"model",
"directly",
"from",
"an",
"array",
"of",
"label",
"strings",
"requires",
"an",
"active",
"SparkContext",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/feature.py#L2503-L2518
|
19,248
|
apache/spark
|
python/pyspark/ml/feature.py
|
StringIndexerModel.from_arrays_of_labels
|
def from_arrays_of_labels(cls, arrayOfLabels, inputCols, outputCols=None,
handleInvalid=None):
"""
Construct the model directly from an array of array of label strings,
requires an active SparkContext.
"""
sc = SparkContext._active_spark_context
java_class = sc._gateway.jvm.java.lang.String
jlabels = StringIndexerModel._new_java_array(arrayOfLabels, java_class)
model = StringIndexerModel._create_from_java_class(
"org.apache.spark.ml.feature.StringIndexerModel", jlabels)
model.setInputCols(inputCols)
if outputCols is not None:
model.setOutputCols(outputCols)
if handleInvalid is not None:
model.setHandleInvalid(handleInvalid)
return model
|
python
|
def from_arrays_of_labels(cls, arrayOfLabels, inputCols, outputCols=None,
handleInvalid=None):
"""
Construct the model directly from an array of array of label strings,
requires an active SparkContext.
"""
sc = SparkContext._active_spark_context
java_class = sc._gateway.jvm.java.lang.String
jlabels = StringIndexerModel._new_java_array(arrayOfLabels, java_class)
model = StringIndexerModel._create_from_java_class(
"org.apache.spark.ml.feature.StringIndexerModel", jlabels)
model.setInputCols(inputCols)
if outputCols is not None:
model.setOutputCols(outputCols)
if handleInvalid is not None:
model.setHandleInvalid(handleInvalid)
return model
|
[
"def",
"from_arrays_of_labels",
"(",
"cls",
",",
"arrayOfLabels",
",",
"inputCols",
",",
"outputCols",
"=",
"None",
",",
"handleInvalid",
"=",
"None",
")",
":",
"sc",
"=",
"SparkContext",
".",
"_active_spark_context",
"java_class",
"=",
"sc",
".",
"_gateway",
".",
"jvm",
".",
"java",
".",
"lang",
".",
"String",
"jlabels",
"=",
"StringIndexerModel",
".",
"_new_java_array",
"(",
"arrayOfLabels",
",",
"java_class",
")",
"model",
"=",
"StringIndexerModel",
".",
"_create_from_java_class",
"(",
"\"org.apache.spark.ml.feature.StringIndexerModel\"",
",",
"jlabels",
")",
"model",
".",
"setInputCols",
"(",
"inputCols",
")",
"if",
"outputCols",
"is",
"not",
"None",
":",
"model",
".",
"setOutputCols",
"(",
"outputCols",
")",
"if",
"handleInvalid",
"is",
"not",
"None",
":",
"model",
".",
"setHandleInvalid",
"(",
"handleInvalid",
")",
"return",
"model"
] |
Construct the model directly from an array of array of label strings,
requires an active SparkContext.
|
[
"Construct",
"the",
"model",
"directly",
"from",
"an",
"array",
"of",
"array",
"of",
"label",
"strings",
"requires",
"an",
"active",
"SparkContext",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/feature.py#L2522-L2538
|
19,249
|
apache/spark
|
python/pyspark/sql/utils.py
|
install_exception_handler
|
def install_exception_handler():
"""
Hook an exception handler into Py4j, which could capture some SQL exceptions in Java.
When calling Java API, it will call `get_return_value` to parse the returned object.
If any exception happened in JVM, the result will be Java exception object, it raise
py4j.protocol.Py4JJavaError. We replace the original `get_return_value` with one that
could capture the Java exception and throw a Python one (with the same error message).
It's idempotent, could be called multiple times.
"""
original = py4j.protocol.get_return_value
# The original `get_return_value` is not patched, it's idempotent.
patched = capture_sql_exception(original)
# only patch the one used in py4j.java_gateway (call Java API)
py4j.java_gateway.get_return_value = patched
|
python
|
def install_exception_handler():
"""
Hook an exception handler into Py4j, which could capture some SQL exceptions in Java.
When calling Java API, it will call `get_return_value` to parse the returned object.
If any exception happened in JVM, the result will be Java exception object, it raise
py4j.protocol.Py4JJavaError. We replace the original `get_return_value` with one that
could capture the Java exception and throw a Python one (with the same error message).
It's idempotent, could be called multiple times.
"""
original = py4j.protocol.get_return_value
# The original `get_return_value` is not patched, it's idempotent.
patched = capture_sql_exception(original)
# only patch the one used in py4j.java_gateway (call Java API)
py4j.java_gateway.get_return_value = patched
|
[
"def",
"install_exception_handler",
"(",
")",
":",
"original",
"=",
"py4j",
".",
"protocol",
".",
"get_return_value",
"# The original `get_return_value` is not patched, it's idempotent.",
"patched",
"=",
"capture_sql_exception",
"(",
"original",
")",
"# only patch the one used in py4j.java_gateway (call Java API)",
"py4j",
".",
"java_gateway",
".",
"get_return_value",
"=",
"patched"
] |
Hook an exception handler into Py4j, which could capture some SQL exceptions in Java.
When calling Java API, it will call `get_return_value` to parse the returned object.
If any exception happened in JVM, the result will be Java exception object, it raise
py4j.protocol.Py4JJavaError. We replace the original `get_return_value` with one that
could capture the Java exception and throw a Python one (with the same error message).
It's idempotent, could be called multiple times.
|
[
"Hook",
"an",
"exception",
"handler",
"into",
"Py4j",
"which",
"could",
"capture",
"some",
"SQL",
"exceptions",
"in",
"Java",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/utils.py#L99-L114
|
19,250
|
apache/spark
|
python/pyspark/sql/utils.py
|
require_minimum_pandas_version
|
def require_minimum_pandas_version():
""" Raise ImportError if minimum version of Pandas is not installed
"""
# TODO(HyukjinKwon): Relocate and deduplicate the version specification.
minimum_pandas_version = "0.19.2"
from distutils.version import LooseVersion
try:
import pandas
have_pandas = True
except ImportError:
have_pandas = False
if not have_pandas:
raise ImportError("Pandas >= %s must be installed; however, "
"it was not found." % minimum_pandas_version)
if LooseVersion(pandas.__version__) < LooseVersion(minimum_pandas_version):
raise ImportError("Pandas >= %s must be installed; however, "
"your version was %s." % (minimum_pandas_version, pandas.__version__))
|
python
|
def require_minimum_pandas_version():
""" Raise ImportError if minimum version of Pandas is not installed
"""
# TODO(HyukjinKwon): Relocate and deduplicate the version specification.
minimum_pandas_version = "0.19.2"
from distutils.version import LooseVersion
try:
import pandas
have_pandas = True
except ImportError:
have_pandas = False
if not have_pandas:
raise ImportError("Pandas >= %s must be installed; however, "
"it was not found." % minimum_pandas_version)
if LooseVersion(pandas.__version__) < LooseVersion(minimum_pandas_version):
raise ImportError("Pandas >= %s must be installed; however, "
"your version was %s." % (minimum_pandas_version, pandas.__version__))
|
[
"def",
"require_minimum_pandas_version",
"(",
")",
":",
"# TODO(HyukjinKwon): Relocate and deduplicate the version specification.",
"minimum_pandas_version",
"=",
"\"0.19.2\"",
"from",
"distutils",
".",
"version",
"import",
"LooseVersion",
"try",
":",
"import",
"pandas",
"have_pandas",
"=",
"True",
"except",
"ImportError",
":",
"have_pandas",
"=",
"False",
"if",
"not",
"have_pandas",
":",
"raise",
"ImportError",
"(",
"\"Pandas >= %s must be installed; however, \"",
"\"it was not found.\"",
"%",
"minimum_pandas_version",
")",
"if",
"LooseVersion",
"(",
"pandas",
".",
"__version__",
")",
"<",
"LooseVersion",
"(",
"minimum_pandas_version",
")",
":",
"raise",
"ImportError",
"(",
"\"Pandas >= %s must be installed; however, \"",
"\"your version was %s.\"",
"%",
"(",
"minimum_pandas_version",
",",
"pandas",
".",
"__version__",
")",
")"
] |
Raise ImportError if minimum version of Pandas is not installed
|
[
"Raise",
"ImportError",
"if",
"minimum",
"version",
"of",
"Pandas",
"is",
"not",
"installed"
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/utils.py#L130-L147
|
19,251
|
apache/spark
|
python/pyspark/sql/utils.py
|
require_minimum_pyarrow_version
|
def require_minimum_pyarrow_version():
""" Raise ImportError if minimum version of pyarrow is not installed
"""
# TODO(HyukjinKwon): Relocate and deduplicate the version specification.
minimum_pyarrow_version = "0.12.1"
from distutils.version import LooseVersion
try:
import pyarrow
have_arrow = True
except ImportError:
have_arrow = False
if not have_arrow:
raise ImportError("PyArrow >= %s must be installed; however, "
"it was not found." % minimum_pyarrow_version)
if LooseVersion(pyarrow.__version__) < LooseVersion(minimum_pyarrow_version):
raise ImportError("PyArrow >= %s must be installed; however, "
"your version was %s." % (minimum_pyarrow_version, pyarrow.__version__))
|
python
|
def require_minimum_pyarrow_version():
""" Raise ImportError if minimum version of pyarrow is not installed
"""
# TODO(HyukjinKwon): Relocate and deduplicate the version specification.
minimum_pyarrow_version = "0.12.1"
from distutils.version import LooseVersion
try:
import pyarrow
have_arrow = True
except ImportError:
have_arrow = False
if not have_arrow:
raise ImportError("PyArrow >= %s must be installed; however, "
"it was not found." % minimum_pyarrow_version)
if LooseVersion(pyarrow.__version__) < LooseVersion(minimum_pyarrow_version):
raise ImportError("PyArrow >= %s must be installed; however, "
"your version was %s." % (minimum_pyarrow_version, pyarrow.__version__))
|
[
"def",
"require_minimum_pyarrow_version",
"(",
")",
":",
"# TODO(HyukjinKwon): Relocate and deduplicate the version specification.",
"minimum_pyarrow_version",
"=",
"\"0.12.1\"",
"from",
"distutils",
".",
"version",
"import",
"LooseVersion",
"try",
":",
"import",
"pyarrow",
"have_arrow",
"=",
"True",
"except",
"ImportError",
":",
"have_arrow",
"=",
"False",
"if",
"not",
"have_arrow",
":",
"raise",
"ImportError",
"(",
"\"PyArrow >= %s must be installed; however, \"",
"\"it was not found.\"",
"%",
"minimum_pyarrow_version",
")",
"if",
"LooseVersion",
"(",
"pyarrow",
".",
"__version__",
")",
"<",
"LooseVersion",
"(",
"minimum_pyarrow_version",
")",
":",
"raise",
"ImportError",
"(",
"\"PyArrow >= %s must be installed; however, \"",
"\"your version was %s.\"",
"%",
"(",
"minimum_pyarrow_version",
",",
"pyarrow",
".",
"__version__",
")",
")"
] |
Raise ImportError if minimum version of pyarrow is not installed
|
[
"Raise",
"ImportError",
"if",
"minimum",
"version",
"of",
"pyarrow",
"is",
"not",
"installed"
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/utils.py#L150-L167
|
19,252
|
apache/spark
|
python/pyspark/java_gateway.py
|
_do_server_auth
|
def _do_server_auth(conn, auth_secret):
"""
Performs the authentication protocol defined by the SocketAuthHelper class on the given
file-like object 'conn'.
"""
write_with_length(auth_secret.encode("utf-8"), conn)
conn.flush()
reply = UTF8Deserializer().loads(conn)
if reply != "ok":
conn.close()
raise Exception("Unexpected reply from iterator server.")
|
python
|
def _do_server_auth(conn, auth_secret):
"""
Performs the authentication protocol defined by the SocketAuthHelper class on the given
file-like object 'conn'.
"""
write_with_length(auth_secret.encode("utf-8"), conn)
conn.flush()
reply = UTF8Deserializer().loads(conn)
if reply != "ok":
conn.close()
raise Exception("Unexpected reply from iterator server.")
|
[
"def",
"_do_server_auth",
"(",
"conn",
",",
"auth_secret",
")",
":",
"write_with_length",
"(",
"auth_secret",
".",
"encode",
"(",
"\"utf-8\"",
")",
",",
"conn",
")",
"conn",
".",
"flush",
"(",
")",
"reply",
"=",
"UTF8Deserializer",
"(",
")",
".",
"loads",
"(",
"conn",
")",
"if",
"reply",
"!=",
"\"ok\"",
":",
"conn",
".",
"close",
"(",
")",
"raise",
"Exception",
"(",
"\"Unexpected reply from iterator server.\"",
")"
] |
Performs the authentication protocol defined by the SocketAuthHelper class on the given
file-like object 'conn'.
|
[
"Performs",
"the",
"authentication",
"protocol",
"defined",
"by",
"the",
"SocketAuthHelper",
"class",
"on",
"the",
"given",
"file",
"-",
"like",
"object",
"conn",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/java_gateway.py#L150-L160
|
19,253
|
apache/spark
|
python/pyspark/java_gateway.py
|
ensure_callback_server_started
|
def ensure_callback_server_started(gw):
"""
Start callback server if not already started. The callback server is needed if the Java
driver process needs to callback into the Python driver process to execute Python code.
"""
# getattr will fallback to JVM, so we cannot test by hasattr()
if "_callback_server" not in gw.__dict__ or gw._callback_server is None:
gw.callback_server_parameters.eager_load = True
gw.callback_server_parameters.daemonize = True
gw.callback_server_parameters.daemonize_connections = True
gw.callback_server_parameters.port = 0
gw.start_callback_server(gw.callback_server_parameters)
cbport = gw._callback_server.server_socket.getsockname()[1]
gw._callback_server.port = cbport
# gateway with real port
gw._python_proxy_port = gw._callback_server.port
# get the GatewayServer object in JVM by ID
jgws = JavaObject("GATEWAY_SERVER", gw._gateway_client)
# update the port of CallbackClient with real port
jgws.resetCallbackClient(jgws.getCallbackClient().getAddress(), gw._python_proxy_port)
|
python
|
def ensure_callback_server_started(gw):
"""
Start callback server if not already started. The callback server is needed if the Java
driver process needs to callback into the Python driver process to execute Python code.
"""
# getattr will fallback to JVM, so we cannot test by hasattr()
if "_callback_server" not in gw.__dict__ or gw._callback_server is None:
gw.callback_server_parameters.eager_load = True
gw.callback_server_parameters.daemonize = True
gw.callback_server_parameters.daemonize_connections = True
gw.callback_server_parameters.port = 0
gw.start_callback_server(gw.callback_server_parameters)
cbport = gw._callback_server.server_socket.getsockname()[1]
gw._callback_server.port = cbport
# gateway with real port
gw._python_proxy_port = gw._callback_server.port
# get the GatewayServer object in JVM by ID
jgws = JavaObject("GATEWAY_SERVER", gw._gateway_client)
# update the port of CallbackClient with real port
jgws.resetCallbackClient(jgws.getCallbackClient().getAddress(), gw._python_proxy_port)
|
[
"def",
"ensure_callback_server_started",
"(",
"gw",
")",
":",
"# getattr will fallback to JVM, so we cannot test by hasattr()",
"if",
"\"_callback_server\"",
"not",
"in",
"gw",
".",
"__dict__",
"or",
"gw",
".",
"_callback_server",
"is",
"None",
":",
"gw",
".",
"callback_server_parameters",
".",
"eager_load",
"=",
"True",
"gw",
".",
"callback_server_parameters",
".",
"daemonize",
"=",
"True",
"gw",
".",
"callback_server_parameters",
".",
"daemonize_connections",
"=",
"True",
"gw",
".",
"callback_server_parameters",
".",
"port",
"=",
"0",
"gw",
".",
"start_callback_server",
"(",
"gw",
".",
"callback_server_parameters",
")",
"cbport",
"=",
"gw",
".",
"_callback_server",
".",
"server_socket",
".",
"getsockname",
"(",
")",
"[",
"1",
"]",
"gw",
".",
"_callback_server",
".",
"port",
"=",
"cbport",
"# gateway with real port",
"gw",
".",
"_python_proxy_port",
"=",
"gw",
".",
"_callback_server",
".",
"port",
"# get the GatewayServer object in JVM by ID",
"jgws",
"=",
"JavaObject",
"(",
"\"GATEWAY_SERVER\"",
",",
"gw",
".",
"_gateway_client",
")",
"# update the port of CallbackClient with real port",
"jgws",
".",
"resetCallbackClient",
"(",
"jgws",
".",
"getCallbackClient",
"(",
")",
".",
"getAddress",
"(",
")",
",",
"gw",
".",
"_python_proxy_port",
")"
] |
Start callback server if not already started. The callback server is needed if the Java
driver process needs to callback into the Python driver process to execute Python code.
|
[
"Start",
"callback",
"server",
"if",
"not",
"already",
"started",
".",
"The",
"callback",
"server",
"is",
"needed",
"if",
"the",
"Java",
"driver",
"process",
"needs",
"to",
"callback",
"into",
"the",
"Python",
"driver",
"process",
"to",
"execute",
"Python",
"code",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/java_gateway.py#L192-L212
|
19,254
|
apache/spark
|
python/pyspark/find_spark_home.py
|
_find_spark_home
|
def _find_spark_home():
"""Find the SPARK_HOME."""
# If the environment has SPARK_HOME set trust it.
if "SPARK_HOME" in os.environ:
return os.environ["SPARK_HOME"]
def is_spark_home(path):
"""Takes a path and returns true if the provided path could be a reasonable SPARK_HOME"""
return (os.path.isfile(os.path.join(path, "bin/spark-submit")) and
(os.path.isdir(os.path.join(path, "jars")) or
os.path.isdir(os.path.join(path, "assembly"))))
paths = ["../", os.path.dirname(os.path.realpath(__file__))]
# Add the path of the PySpark module if it exists
if sys.version < "3":
import imp
try:
module_home = imp.find_module("pyspark")[1]
paths.append(module_home)
# If we are installed in edit mode also look two dirs up
paths.append(os.path.join(module_home, "../../"))
except ImportError:
# Not pip installed no worries
pass
else:
from importlib.util import find_spec
try:
module_home = os.path.dirname(find_spec("pyspark").origin)
paths.append(module_home)
# If we are installed in edit mode also look two dirs up
paths.append(os.path.join(module_home, "../../"))
except ImportError:
# Not pip installed no worries
pass
# Normalize the paths
paths = [os.path.abspath(p) for p in paths]
try:
return next(path for path in paths if is_spark_home(path))
except StopIteration:
print("Could not find valid SPARK_HOME while searching {0}".format(paths), file=sys.stderr)
sys.exit(-1)
|
python
|
def _find_spark_home():
"""Find the SPARK_HOME."""
# If the environment has SPARK_HOME set trust it.
if "SPARK_HOME" in os.environ:
return os.environ["SPARK_HOME"]
def is_spark_home(path):
"""Takes a path and returns true if the provided path could be a reasonable SPARK_HOME"""
return (os.path.isfile(os.path.join(path, "bin/spark-submit")) and
(os.path.isdir(os.path.join(path, "jars")) or
os.path.isdir(os.path.join(path, "assembly"))))
paths = ["../", os.path.dirname(os.path.realpath(__file__))]
# Add the path of the PySpark module if it exists
if sys.version < "3":
import imp
try:
module_home = imp.find_module("pyspark")[1]
paths.append(module_home)
# If we are installed in edit mode also look two dirs up
paths.append(os.path.join(module_home, "../../"))
except ImportError:
# Not pip installed no worries
pass
else:
from importlib.util import find_spec
try:
module_home = os.path.dirname(find_spec("pyspark").origin)
paths.append(module_home)
# If we are installed in edit mode also look two dirs up
paths.append(os.path.join(module_home, "../../"))
except ImportError:
# Not pip installed no worries
pass
# Normalize the paths
paths = [os.path.abspath(p) for p in paths]
try:
return next(path for path in paths if is_spark_home(path))
except StopIteration:
print("Could not find valid SPARK_HOME while searching {0}".format(paths), file=sys.stderr)
sys.exit(-1)
|
[
"def",
"_find_spark_home",
"(",
")",
":",
"# If the environment has SPARK_HOME set trust it.",
"if",
"\"SPARK_HOME\"",
"in",
"os",
".",
"environ",
":",
"return",
"os",
".",
"environ",
"[",
"\"SPARK_HOME\"",
"]",
"def",
"is_spark_home",
"(",
"path",
")",
":",
"\"\"\"Takes a path and returns true if the provided path could be a reasonable SPARK_HOME\"\"\"",
"return",
"(",
"os",
".",
"path",
".",
"isfile",
"(",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"\"bin/spark-submit\"",
")",
")",
"and",
"(",
"os",
".",
"path",
".",
"isdir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"\"jars\"",
")",
")",
"or",
"os",
".",
"path",
".",
"isdir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"\"assembly\"",
")",
")",
")",
")",
"paths",
"=",
"[",
"\"../\"",
",",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"realpath",
"(",
"__file__",
")",
")",
"]",
"# Add the path of the PySpark module if it exists",
"if",
"sys",
".",
"version",
"<",
"\"3\"",
":",
"import",
"imp",
"try",
":",
"module_home",
"=",
"imp",
".",
"find_module",
"(",
"\"pyspark\"",
")",
"[",
"1",
"]",
"paths",
".",
"append",
"(",
"module_home",
")",
"# If we are installed in edit mode also look two dirs up",
"paths",
".",
"append",
"(",
"os",
".",
"path",
".",
"join",
"(",
"module_home",
",",
"\"../../\"",
")",
")",
"except",
"ImportError",
":",
"# Not pip installed no worries",
"pass",
"else",
":",
"from",
"importlib",
".",
"util",
"import",
"find_spec",
"try",
":",
"module_home",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"find_spec",
"(",
"\"pyspark\"",
")",
".",
"origin",
")",
"paths",
".",
"append",
"(",
"module_home",
")",
"# If we are installed in edit mode also look two dirs up",
"paths",
".",
"append",
"(",
"os",
".",
"path",
".",
"join",
"(",
"module_home",
",",
"\"../../\"",
")",
")",
"except",
"ImportError",
":",
"# Not pip installed no worries",
"pass",
"# Normalize the paths",
"paths",
"=",
"[",
"os",
".",
"path",
".",
"abspath",
"(",
"p",
")",
"for",
"p",
"in",
"paths",
"]",
"try",
":",
"return",
"next",
"(",
"path",
"for",
"path",
"in",
"paths",
"if",
"is_spark_home",
"(",
"path",
")",
")",
"except",
"StopIteration",
":",
"print",
"(",
"\"Could not find valid SPARK_HOME while searching {0}\"",
".",
"format",
"(",
"paths",
")",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"sys",
".",
"exit",
"(",
"-",
"1",
")"
] |
Find the SPARK_HOME.
|
[
"Find",
"the",
"SPARK_HOME",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/find_spark_home.py#L28-L71
|
19,255
|
apache/spark
|
examples/src/main/python/pagerank.py
|
computeContribs
|
def computeContribs(urls, rank):
"""Calculates URL contributions to the rank of other URLs."""
num_urls = len(urls)
for url in urls:
yield (url, rank / num_urls)
|
python
|
def computeContribs(urls, rank):
"""Calculates URL contributions to the rank of other URLs."""
num_urls = len(urls)
for url in urls:
yield (url, rank / num_urls)
|
[
"def",
"computeContribs",
"(",
"urls",
",",
"rank",
")",
":",
"num_urls",
"=",
"len",
"(",
"urls",
")",
"for",
"url",
"in",
"urls",
":",
"yield",
"(",
"url",
",",
"rank",
"/",
"num_urls",
")"
] |
Calculates URL contributions to the rank of other URLs.
|
[
"Calculates",
"URL",
"contributions",
"to",
"the",
"rank",
"of",
"other",
"URLs",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/examples/src/main/python/pagerank.py#L34-L38
|
19,256
|
apache/spark
|
python/pyspark/ml/image.py
|
_ImageSchema.imageSchema
|
def imageSchema(self):
"""
Returns the image schema.
:return: a :class:`StructType` with a single column of images
named "image" (nullable) and having the same type returned by :meth:`columnSchema`.
.. versionadded:: 2.3.0
"""
if self._imageSchema is None:
ctx = SparkContext._active_spark_context
jschema = ctx._jvm.org.apache.spark.ml.image.ImageSchema.imageSchema()
self._imageSchema = _parse_datatype_json_string(jschema.json())
return self._imageSchema
|
python
|
def imageSchema(self):
"""
Returns the image schema.
:return: a :class:`StructType` with a single column of images
named "image" (nullable) and having the same type returned by :meth:`columnSchema`.
.. versionadded:: 2.3.0
"""
if self._imageSchema is None:
ctx = SparkContext._active_spark_context
jschema = ctx._jvm.org.apache.spark.ml.image.ImageSchema.imageSchema()
self._imageSchema = _parse_datatype_json_string(jschema.json())
return self._imageSchema
|
[
"def",
"imageSchema",
"(",
"self",
")",
":",
"if",
"self",
".",
"_imageSchema",
"is",
"None",
":",
"ctx",
"=",
"SparkContext",
".",
"_active_spark_context",
"jschema",
"=",
"ctx",
".",
"_jvm",
".",
"org",
".",
"apache",
".",
"spark",
".",
"ml",
".",
"image",
".",
"ImageSchema",
".",
"imageSchema",
"(",
")",
"self",
".",
"_imageSchema",
"=",
"_parse_datatype_json_string",
"(",
"jschema",
".",
"json",
"(",
")",
")",
"return",
"self",
".",
"_imageSchema"
] |
Returns the image schema.
:return: a :class:`StructType` with a single column of images
named "image" (nullable) and having the same type returned by :meth:`columnSchema`.
.. versionadded:: 2.3.0
|
[
"Returns",
"the",
"image",
"schema",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/image.py#L55-L69
|
19,257
|
apache/spark
|
python/pyspark/ml/image.py
|
_ImageSchema.ocvTypes
|
def ocvTypes(self):
"""
Returns the OpenCV type mapping supported.
:return: a dictionary containing the OpenCV type mapping supported.
.. versionadded:: 2.3.0
"""
if self._ocvTypes is None:
ctx = SparkContext._active_spark_context
self._ocvTypes = dict(ctx._jvm.org.apache.spark.ml.image.ImageSchema.javaOcvTypes())
return self._ocvTypes
|
python
|
def ocvTypes(self):
"""
Returns the OpenCV type mapping supported.
:return: a dictionary containing the OpenCV type mapping supported.
.. versionadded:: 2.3.0
"""
if self._ocvTypes is None:
ctx = SparkContext._active_spark_context
self._ocvTypes = dict(ctx._jvm.org.apache.spark.ml.image.ImageSchema.javaOcvTypes())
return self._ocvTypes
|
[
"def",
"ocvTypes",
"(",
"self",
")",
":",
"if",
"self",
".",
"_ocvTypes",
"is",
"None",
":",
"ctx",
"=",
"SparkContext",
".",
"_active_spark_context",
"self",
".",
"_ocvTypes",
"=",
"dict",
"(",
"ctx",
".",
"_jvm",
".",
"org",
".",
"apache",
".",
"spark",
".",
"ml",
".",
"image",
".",
"ImageSchema",
".",
"javaOcvTypes",
"(",
")",
")",
"return",
"self",
".",
"_ocvTypes"
] |
Returns the OpenCV type mapping supported.
:return: a dictionary containing the OpenCV type mapping supported.
.. versionadded:: 2.3.0
|
[
"Returns",
"the",
"OpenCV",
"type",
"mapping",
"supported",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/image.py#L72-L84
|
19,258
|
apache/spark
|
python/pyspark/ml/image.py
|
_ImageSchema.columnSchema
|
def columnSchema(self):
"""
Returns the schema for the image column.
:return: a :class:`StructType` for image column,
``struct<origin:string, height:int, width:int, nChannels:int, mode:int, data:binary>``.
.. versionadded:: 2.4.0
"""
if self._columnSchema is None:
ctx = SparkContext._active_spark_context
jschema = ctx._jvm.org.apache.spark.ml.image.ImageSchema.columnSchema()
self._columnSchema = _parse_datatype_json_string(jschema.json())
return self._columnSchema
|
python
|
def columnSchema(self):
"""
Returns the schema for the image column.
:return: a :class:`StructType` for image column,
``struct<origin:string, height:int, width:int, nChannels:int, mode:int, data:binary>``.
.. versionadded:: 2.4.0
"""
if self._columnSchema is None:
ctx = SparkContext._active_spark_context
jschema = ctx._jvm.org.apache.spark.ml.image.ImageSchema.columnSchema()
self._columnSchema = _parse_datatype_json_string(jschema.json())
return self._columnSchema
|
[
"def",
"columnSchema",
"(",
"self",
")",
":",
"if",
"self",
".",
"_columnSchema",
"is",
"None",
":",
"ctx",
"=",
"SparkContext",
".",
"_active_spark_context",
"jschema",
"=",
"ctx",
".",
"_jvm",
".",
"org",
".",
"apache",
".",
"spark",
".",
"ml",
".",
"image",
".",
"ImageSchema",
".",
"columnSchema",
"(",
")",
"self",
".",
"_columnSchema",
"=",
"_parse_datatype_json_string",
"(",
"jschema",
".",
"json",
"(",
")",
")",
"return",
"self",
".",
"_columnSchema"
] |
Returns the schema for the image column.
:return: a :class:`StructType` for image column,
``struct<origin:string, height:int, width:int, nChannels:int, mode:int, data:binary>``.
.. versionadded:: 2.4.0
|
[
"Returns",
"the",
"schema",
"for",
"the",
"image",
"column",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/image.py#L87-L101
|
19,259
|
apache/spark
|
python/pyspark/ml/image.py
|
_ImageSchema.imageFields
|
def imageFields(self):
"""
Returns field names of image columns.
:return: a list of field names.
.. versionadded:: 2.3.0
"""
if self._imageFields is None:
ctx = SparkContext._active_spark_context
self._imageFields = list(ctx._jvm.org.apache.spark.ml.image.ImageSchema.imageFields())
return self._imageFields
|
python
|
def imageFields(self):
"""
Returns field names of image columns.
:return: a list of field names.
.. versionadded:: 2.3.0
"""
if self._imageFields is None:
ctx = SparkContext._active_spark_context
self._imageFields = list(ctx._jvm.org.apache.spark.ml.image.ImageSchema.imageFields())
return self._imageFields
|
[
"def",
"imageFields",
"(",
"self",
")",
":",
"if",
"self",
".",
"_imageFields",
"is",
"None",
":",
"ctx",
"=",
"SparkContext",
".",
"_active_spark_context",
"self",
".",
"_imageFields",
"=",
"list",
"(",
"ctx",
".",
"_jvm",
".",
"org",
".",
"apache",
".",
"spark",
".",
"ml",
".",
"image",
".",
"ImageSchema",
".",
"imageFields",
"(",
")",
")",
"return",
"self",
".",
"_imageFields"
] |
Returns field names of image columns.
:return: a list of field names.
.. versionadded:: 2.3.0
|
[
"Returns",
"field",
"names",
"of",
"image",
"columns",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/image.py#L104-L116
|
19,260
|
apache/spark
|
python/pyspark/ml/image.py
|
_ImageSchema.undefinedImageType
|
def undefinedImageType(self):
"""
Returns the name of undefined image type for the invalid image.
.. versionadded:: 2.3.0
"""
if self._undefinedImageType is None:
ctx = SparkContext._active_spark_context
self._undefinedImageType = \
ctx._jvm.org.apache.spark.ml.image.ImageSchema.undefinedImageType()
return self._undefinedImageType
|
python
|
def undefinedImageType(self):
"""
Returns the name of undefined image type for the invalid image.
.. versionadded:: 2.3.0
"""
if self._undefinedImageType is None:
ctx = SparkContext._active_spark_context
self._undefinedImageType = \
ctx._jvm.org.apache.spark.ml.image.ImageSchema.undefinedImageType()
return self._undefinedImageType
|
[
"def",
"undefinedImageType",
"(",
"self",
")",
":",
"if",
"self",
".",
"_undefinedImageType",
"is",
"None",
":",
"ctx",
"=",
"SparkContext",
".",
"_active_spark_context",
"self",
".",
"_undefinedImageType",
"=",
"ctx",
".",
"_jvm",
".",
"org",
".",
"apache",
".",
"spark",
".",
"ml",
".",
"image",
".",
"ImageSchema",
".",
"undefinedImageType",
"(",
")",
"return",
"self",
".",
"_undefinedImageType"
] |
Returns the name of undefined image type for the invalid image.
.. versionadded:: 2.3.0
|
[
"Returns",
"the",
"name",
"of",
"undefined",
"image",
"type",
"for",
"the",
"invalid",
"image",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/image.py#L119-L130
|
19,261
|
apache/spark
|
python/pyspark/ml/image.py
|
_ImageSchema.toNDArray
|
def toNDArray(self, image):
"""
Converts an image to an array with metadata.
:param `Row` image: A row that contains the image to be converted. It should
have the attributes specified in `ImageSchema.imageSchema`.
:return: a `numpy.ndarray` that is an image.
.. versionadded:: 2.3.0
"""
if not isinstance(image, Row):
raise TypeError(
"image argument should be pyspark.sql.types.Row; however, "
"it got [%s]." % type(image))
if any(not hasattr(image, f) for f in self.imageFields):
raise ValueError(
"image argument should have attributes specified in "
"ImageSchema.imageSchema [%s]." % ", ".join(self.imageFields))
height = image.height
width = image.width
nChannels = image.nChannels
return np.ndarray(
shape=(height, width, nChannels),
dtype=np.uint8,
buffer=image.data,
strides=(width * nChannels, nChannels, 1))
|
python
|
def toNDArray(self, image):
"""
Converts an image to an array with metadata.
:param `Row` image: A row that contains the image to be converted. It should
have the attributes specified in `ImageSchema.imageSchema`.
:return: a `numpy.ndarray` that is an image.
.. versionadded:: 2.3.0
"""
if not isinstance(image, Row):
raise TypeError(
"image argument should be pyspark.sql.types.Row; however, "
"it got [%s]." % type(image))
if any(not hasattr(image, f) for f in self.imageFields):
raise ValueError(
"image argument should have attributes specified in "
"ImageSchema.imageSchema [%s]." % ", ".join(self.imageFields))
height = image.height
width = image.width
nChannels = image.nChannels
return np.ndarray(
shape=(height, width, nChannels),
dtype=np.uint8,
buffer=image.data,
strides=(width * nChannels, nChannels, 1))
|
[
"def",
"toNDArray",
"(",
"self",
",",
"image",
")",
":",
"if",
"not",
"isinstance",
"(",
"image",
",",
"Row",
")",
":",
"raise",
"TypeError",
"(",
"\"image argument should be pyspark.sql.types.Row; however, \"",
"\"it got [%s].\"",
"%",
"type",
"(",
"image",
")",
")",
"if",
"any",
"(",
"not",
"hasattr",
"(",
"image",
",",
"f",
")",
"for",
"f",
"in",
"self",
".",
"imageFields",
")",
":",
"raise",
"ValueError",
"(",
"\"image argument should have attributes specified in \"",
"\"ImageSchema.imageSchema [%s].\"",
"%",
"\", \"",
".",
"join",
"(",
"self",
".",
"imageFields",
")",
")",
"height",
"=",
"image",
".",
"height",
"width",
"=",
"image",
".",
"width",
"nChannels",
"=",
"image",
".",
"nChannels",
"return",
"np",
".",
"ndarray",
"(",
"shape",
"=",
"(",
"height",
",",
"width",
",",
"nChannels",
")",
",",
"dtype",
"=",
"np",
".",
"uint8",
",",
"buffer",
"=",
"image",
".",
"data",
",",
"strides",
"=",
"(",
"width",
"*",
"nChannels",
",",
"nChannels",
",",
"1",
")",
")"
] |
Converts an image to an array with metadata.
:param `Row` image: A row that contains the image to be converted. It should
have the attributes specified in `ImageSchema.imageSchema`.
:return: a `numpy.ndarray` that is an image.
.. versionadded:: 2.3.0
|
[
"Converts",
"an",
"image",
"to",
"an",
"array",
"with",
"metadata",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/image.py#L132-L160
|
19,262
|
apache/spark
|
python/pyspark/ml/image.py
|
_ImageSchema.toImage
|
def toImage(self, array, origin=""):
"""
Converts an array with metadata to a two-dimensional image.
:param `numpy.ndarray` array: The array to convert to image.
:param str origin: Path to the image, optional.
:return: a :class:`Row` that is a two dimensional image.
.. versionadded:: 2.3.0
"""
if not isinstance(array, np.ndarray):
raise TypeError(
"array argument should be numpy.ndarray; however, it got [%s]." % type(array))
if array.ndim != 3:
raise ValueError("Invalid array shape")
height, width, nChannels = array.shape
ocvTypes = ImageSchema.ocvTypes
if nChannels == 1:
mode = ocvTypes["CV_8UC1"]
elif nChannels == 3:
mode = ocvTypes["CV_8UC3"]
elif nChannels == 4:
mode = ocvTypes["CV_8UC4"]
else:
raise ValueError("Invalid number of channels")
# Running `bytearray(numpy.array([1]))` fails in specific Python versions
# with a specific Numpy version, for example in Python 3.6.0 and NumPy 1.13.3.
# Here, it avoids it by converting it to bytes.
if LooseVersion(np.__version__) >= LooseVersion('1.9'):
data = bytearray(array.astype(dtype=np.uint8).ravel().tobytes())
else:
# Numpy prior to 1.9 don't have `tobytes` method.
data = bytearray(array.astype(dtype=np.uint8).ravel())
# Creating new Row with _create_row(), because Row(name = value, ... )
# orders fields by name, which conflicts with expected schema order
# when the new DataFrame is created by UDF
return _create_row(self.imageFields,
[origin, height, width, nChannels, mode, data])
|
python
|
def toImage(self, array, origin=""):
"""
Converts an array with metadata to a two-dimensional image.
:param `numpy.ndarray` array: The array to convert to image.
:param str origin: Path to the image, optional.
:return: a :class:`Row` that is a two dimensional image.
.. versionadded:: 2.3.0
"""
if not isinstance(array, np.ndarray):
raise TypeError(
"array argument should be numpy.ndarray; however, it got [%s]." % type(array))
if array.ndim != 3:
raise ValueError("Invalid array shape")
height, width, nChannels = array.shape
ocvTypes = ImageSchema.ocvTypes
if nChannels == 1:
mode = ocvTypes["CV_8UC1"]
elif nChannels == 3:
mode = ocvTypes["CV_8UC3"]
elif nChannels == 4:
mode = ocvTypes["CV_8UC4"]
else:
raise ValueError("Invalid number of channels")
# Running `bytearray(numpy.array([1]))` fails in specific Python versions
# with a specific Numpy version, for example in Python 3.6.0 and NumPy 1.13.3.
# Here, it avoids it by converting it to bytes.
if LooseVersion(np.__version__) >= LooseVersion('1.9'):
data = bytearray(array.astype(dtype=np.uint8).ravel().tobytes())
else:
# Numpy prior to 1.9 don't have `tobytes` method.
data = bytearray(array.astype(dtype=np.uint8).ravel())
# Creating new Row with _create_row(), because Row(name = value, ... )
# orders fields by name, which conflicts with expected schema order
# when the new DataFrame is created by UDF
return _create_row(self.imageFields,
[origin, height, width, nChannels, mode, data])
|
[
"def",
"toImage",
"(",
"self",
",",
"array",
",",
"origin",
"=",
"\"\"",
")",
":",
"if",
"not",
"isinstance",
"(",
"array",
",",
"np",
".",
"ndarray",
")",
":",
"raise",
"TypeError",
"(",
"\"array argument should be numpy.ndarray; however, it got [%s].\"",
"%",
"type",
"(",
"array",
")",
")",
"if",
"array",
".",
"ndim",
"!=",
"3",
":",
"raise",
"ValueError",
"(",
"\"Invalid array shape\"",
")",
"height",
",",
"width",
",",
"nChannels",
"=",
"array",
".",
"shape",
"ocvTypes",
"=",
"ImageSchema",
".",
"ocvTypes",
"if",
"nChannels",
"==",
"1",
":",
"mode",
"=",
"ocvTypes",
"[",
"\"CV_8UC1\"",
"]",
"elif",
"nChannels",
"==",
"3",
":",
"mode",
"=",
"ocvTypes",
"[",
"\"CV_8UC3\"",
"]",
"elif",
"nChannels",
"==",
"4",
":",
"mode",
"=",
"ocvTypes",
"[",
"\"CV_8UC4\"",
"]",
"else",
":",
"raise",
"ValueError",
"(",
"\"Invalid number of channels\"",
")",
"# Running `bytearray(numpy.array([1]))` fails in specific Python versions",
"# with a specific Numpy version, for example in Python 3.6.0 and NumPy 1.13.3.",
"# Here, it avoids it by converting it to bytes.",
"if",
"LooseVersion",
"(",
"np",
".",
"__version__",
")",
">=",
"LooseVersion",
"(",
"'1.9'",
")",
":",
"data",
"=",
"bytearray",
"(",
"array",
".",
"astype",
"(",
"dtype",
"=",
"np",
".",
"uint8",
")",
".",
"ravel",
"(",
")",
".",
"tobytes",
"(",
")",
")",
"else",
":",
"# Numpy prior to 1.9 don't have `tobytes` method.",
"data",
"=",
"bytearray",
"(",
"array",
".",
"astype",
"(",
"dtype",
"=",
"np",
".",
"uint8",
")",
".",
"ravel",
"(",
")",
")",
"# Creating new Row with _create_row(), because Row(name = value, ... )",
"# orders fields by name, which conflicts with expected schema order",
"# when the new DataFrame is created by UDF",
"return",
"_create_row",
"(",
"self",
".",
"imageFields",
",",
"[",
"origin",
",",
"height",
",",
"width",
",",
"nChannels",
",",
"mode",
",",
"data",
"]",
")"
] |
Converts an array with metadata to a two-dimensional image.
:param `numpy.ndarray` array: The array to convert to image.
:param str origin: Path to the image, optional.
:return: a :class:`Row` that is a two dimensional image.
.. versionadded:: 2.3.0
|
[
"Converts",
"an",
"array",
"with",
"metadata",
"to",
"a",
"two",
"-",
"dimensional",
"image",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/image.py#L162-L204
|
19,263
|
apache/spark
|
python/pyspark/ml/image.py
|
_ImageSchema.readImages
|
def readImages(self, path, recursive=False, numPartitions=-1,
dropImageFailures=False, sampleRatio=1.0, seed=0):
"""
Reads the directory of images from the local or remote source.
.. note:: If multiple jobs are run in parallel with different sampleRatio or recursive flag,
there may be a race condition where one job overwrites the hadoop configs of another.
.. note:: If sample ratio is less than 1, sampling uses a PathFilter that is efficient but
potentially non-deterministic.
.. note:: Deprecated in 2.4.0. Use `spark.read.format("image").load(path)` instead and
this `readImages` will be removed in 3.0.0.
:param str path: Path to the image directory.
:param bool recursive: Recursive search flag.
:param int numPartitions: Number of DataFrame partitions.
:param bool dropImageFailures: Drop the files that are not valid images.
:param float sampleRatio: Fraction of the images loaded.
:param int seed: Random number seed.
:return: a :class:`DataFrame` with a single column of "images",
see ImageSchema for details.
>>> df = ImageSchema.readImages('data/mllib/images/origin/kittens', recursive=True)
>>> df.count()
5
.. versionadded:: 2.3.0
"""
warnings.warn("`ImageSchema.readImage` is deprecated. " +
"Use `spark.read.format(\"image\").load(path)` instead.", DeprecationWarning)
spark = SparkSession.builder.getOrCreate()
image_schema = spark._jvm.org.apache.spark.ml.image.ImageSchema
jsession = spark._jsparkSession
jresult = image_schema.readImages(path, jsession, recursive, numPartitions,
dropImageFailures, float(sampleRatio), seed)
return DataFrame(jresult, spark._wrapped)
|
python
|
def readImages(self, path, recursive=False, numPartitions=-1,
dropImageFailures=False, sampleRatio=1.0, seed=0):
"""
Reads the directory of images from the local or remote source.
.. note:: If multiple jobs are run in parallel with different sampleRatio or recursive flag,
there may be a race condition where one job overwrites the hadoop configs of another.
.. note:: If sample ratio is less than 1, sampling uses a PathFilter that is efficient but
potentially non-deterministic.
.. note:: Deprecated in 2.4.0. Use `spark.read.format("image").load(path)` instead and
this `readImages` will be removed in 3.0.0.
:param str path: Path to the image directory.
:param bool recursive: Recursive search flag.
:param int numPartitions: Number of DataFrame partitions.
:param bool dropImageFailures: Drop the files that are not valid images.
:param float sampleRatio: Fraction of the images loaded.
:param int seed: Random number seed.
:return: a :class:`DataFrame` with a single column of "images",
see ImageSchema for details.
>>> df = ImageSchema.readImages('data/mllib/images/origin/kittens', recursive=True)
>>> df.count()
5
.. versionadded:: 2.3.0
"""
warnings.warn("`ImageSchema.readImage` is deprecated. " +
"Use `spark.read.format(\"image\").load(path)` instead.", DeprecationWarning)
spark = SparkSession.builder.getOrCreate()
image_schema = spark._jvm.org.apache.spark.ml.image.ImageSchema
jsession = spark._jsparkSession
jresult = image_schema.readImages(path, jsession, recursive, numPartitions,
dropImageFailures, float(sampleRatio), seed)
return DataFrame(jresult, spark._wrapped)
|
[
"def",
"readImages",
"(",
"self",
",",
"path",
",",
"recursive",
"=",
"False",
",",
"numPartitions",
"=",
"-",
"1",
",",
"dropImageFailures",
"=",
"False",
",",
"sampleRatio",
"=",
"1.0",
",",
"seed",
"=",
"0",
")",
":",
"warnings",
".",
"warn",
"(",
"\"`ImageSchema.readImage` is deprecated. \"",
"+",
"\"Use `spark.read.format(\\\"image\\\").load(path)` instead.\"",
",",
"DeprecationWarning",
")",
"spark",
"=",
"SparkSession",
".",
"builder",
".",
"getOrCreate",
"(",
")",
"image_schema",
"=",
"spark",
".",
"_jvm",
".",
"org",
".",
"apache",
".",
"spark",
".",
"ml",
".",
"image",
".",
"ImageSchema",
"jsession",
"=",
"spark",
".",
"_jsparkSession",
"jresult",
"=",
"image_schema",
".",
"readImages",
"(",
"path",
",",
"jsession",
",",
"recursive",
",",
"numPartitions",
",",
"dropImageFailures",
",",
"float",
"(",
"sampleRatio",
")",
",",
"seed",
")",
"return",
"DataFrame",
"(",
"jresult",
",",
"spark",
".",
"_wrapped",
")"
] |
Reads the directory of images from the local or remote source.
.. note:: If multiple jobs are run in parallel with different sampleRatio or recursive flag,
there may be a race condition where one job overwrites the hadoop configs of another.
.. note:: If sample ratio is less than 1, sampling uses a PathFilter that is efficient but
potentially non-deterministic.
.. note:: Deprecated in 2.4.0. Use `spark.read.format("image").load(path)` instead and
this `readImages` will be removed in 3.0.0.
:param str path: Path to the image directory.
:param bool recursive: Recursive search flag.
:param int numPartitions: Number of DataFrame partitions.
:param bool dropImageFailures: Drop the files that are not valid images.
:param float sampleRatio: Fraction of the images loaded.
:param int seed: Random number seed.
:return: a :class:`DataFrame` with a single column of "images",
see ImageSchema for details.
>>> df = ImageSchema.readImages('data/mllib/images/origin/kittens', recursive=True)
>>> df.count()
5
.. versionadded:: 2.3.0
|
[
"Reads",
"the",
"directory",
"of",
"images",
"from",
"the",
"local",
"or",
"remote",
"source",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/image.py#L206-L242
|
19,264
|
apache/spark
|
python/pyspark/ml/wrapper.py
|
JavaWrapper._create_from_java_class
|
def _create_from_java_class(cls, java_class, *args):
"""
Construct this object from given Java classname and arguments
"""
java_obj = JavaWrapper._new_java_obj(java_class, *args)
return cls(java_obj)
|
python
|
def _create_from_java_class(cls, java_class, *args):
"""
Construct this object from given Java classname and arguments
"""
java_obj = JavaWrapper._new_java_obj(java_class, *args)
return cls(java_obj)
|
[
"def",
"_create_from_java_class",
"(",
"cls",
",",
"java_class",
",",
"*",
"args",
")",
":",
"java_obj",
"=",
"JavaWrapper",
".",
"_new_java_obj",
"(",
"java_class",
",",
"*",
"args",
")",
"return",
"cls",
"(",
"java_obj",
")"
] |
Construct this object from given Java classname and arguments
|
[
"Construct",
"this",
"object",
"from",
"given",
"Java",
"classname",
"and",
"arguments"
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/wrapper.py#L44-L49
|
19,265
|
apache/spark
|
python/pyspark/ml/wrapper.py
|
JavaWrapper._new_java_array
|
def _new_java_array(pylist, java_class):
"""
Create a Java array of given java_class type. Useful for
calling a method with a Scala Array from Python with Py4J.
If the param pylist is a 2D array, then a 2D java array will be returned.
The returned 2D java array is a square, non-jagged 2D array that is big
enough for all elements. The empty slots in the inner Java arrays will
be filled with null to make the non-jagged 2D array.
:param pylist:
Python list to convert to a Java Array.
:param java_class:
Java class to specify the type of Array. Should be in the
form of sc._gateway.jvm.* (sc is a valid Spark Context).
:return:
Java Array of converted pylist.
Example primitive Java classes:
- basestring -> sc._gateway.jvm.java.lang.String
- int -> sc._gateway.jvm.java.lang.Integer
- float -> sc._gateway.jvm.java.lang.Double
- bool -> sc._gateway.jvm.java.lang.Boolean
"""
sc = SparkContext._active_spark_context
java_array = None
if len(pylist) > 0 and isinstance(pylist[0], list):
# If pylist is a 2D array, then a 2D java array will be created.
# The 2D array is a square, non-jagged 2D array that is big enough for all elements.
inner_array_length = 0
for i in xrange(len(pylist)):
inner_array_length = max(inner_array_length, len(pylist[i]))
java_array = sc._gateway.new_array(java_class, len(pylist), inner_array_length)
for i in xrange(len(pylist)):
for j in xrange(len(pylist[i])):
java_array[i][j] = pylist[i][j]
else:
java_array = sc._gateway.new_array(java_class, len(pylist))
for i in xrange(len(pylist)):
java_array[i] = pylist[i]
return java_array
|
python
|
def _new_java_array(pylist, java_class):
"""
Create a Java array of given java_class type. Useful for
calling a method with a Scala Array from Python with Py4J.
If the param pylist is a 2D array, then a 2D java array will be returned.
The returned 2D java array is a square, non-jagged 2D array that is big
enough for all elements. The empty slots in the inner Java arrays will
be filled with null to make the non-jagged 2D array.
:param pylist:
Python list to convert to a Java Array.
:param java_class:
Java class to specify the type of Array. Should be in the
form of sc._gateway.jvm.* (sc is a valid Spark Context).
:return:
Java Array of converted pylist.
Example primitive Java classes:
- basestring -> sc._gateway.jvm.java.lang.String
- int -> sc._gateway.jvm.java.lang.Integer
- float -> sc._gateway.jvm.java.lang.Double
- bool -> sc._gateway.jvm.java.lang.Boolean
"""
sc = SparkContext._active_spark_context
java_array = None
if len(pylist) > 0 and isinstance(pylist[0], list):
# If pylist is a 2D array, then a 2D java array will be created.
# The 2D array is a square, non-jagged 2D array that is big enough for all elements.
inner_array_length = 0
for i in xrange(len(pylist)):
inner_array_length = max(inner_array_length, len(pylist[i]))
java_array = sc._gateway.new_array(java_class, len(pylist), inner_array_length)
for i in xrange(len(pylist)):
for j in xrange(len(pylist[i])):
java_array[i][j] = pylist[i][j]
else:
java_array = sc._gateway.new_array(java_class, len(pylist))
for i in xrange(len(pylist)):
java_array[i] = pylist[i]
return java_array
|
[
"def",
"_new_java_array",
"(",
"pylist",
",",
"java_class",
")",
":",
"sc",
"=",
"SparkContext",
".",
"_active_spark_context",
"java_array",
"=",
"None",
"if",
"len",
"(",
"pylist",
")",
">",
"0",
"and",
"isinstance",
"(",
"pylist",
"[",
"0",
"]",
",",
"list",
")",
":",
"# If pylist is a 2D array, then a 2D java array will be created.",
"# The 2D array is a square, non-jagged 2D array that is big enough for all elements.",
"inner_array_length",
"=",
"0",
"for",
"i",
"in",
"xrange",
"(",
"len",
"(",
"pylist",
")",
")",
":",
"inner_array_length",
"=",
"max",
"(",
"inner_array_length",
",",
"len",
"(",
"pylist",
"[",
"i",
"]",
")",
")",
"java_array",
"=",
"sc",
".",
"_gateway",
".",
"new_array",
"(",
"java_class",
",",
"len",
"(",
"pylist",
")",
",",
"inner_array_length",
")",
"for",
"i",
"in",
"xrange",
"(",
"len",
"(",
"pylist",
")",
")",
":",
"for",
"j",
"in",
"xrange",
"(",
"len",
"(",
"pylist",
"[",
"i",
"]",
")",
")",
":",
"java_array",
"[",
"i",
"]",
"[",
"j",
"]",
"=",
"pylist",
"[",
"i",
"]",
"[",
"j",
"]",
"else",
":",
"java_array",
"=",
"sc",
".",
"_gateway",
".",
"new_array",
"(",
"java_class",
",",
"len",
"(",
"pylist",
")",
")",
"for",
"i",
"in",
"xrange",
"(",
"len",
"(",
"pylist",
")",
")",
":",
"java_array",
"[",
"i",
"]",
"=",
"pylist",
"[",
"i",
"]",
"return",
"java_array"
] |
Create a Java array of given java_class type. Useful for
calling a method with a Scala Array from Python with Py4J.
If the param pylist is a 2D array, then a 2D java array will be returned.
The returned 2D java array is a square, non-jagged 2D array that is big
enough for all elements. The empty slots in the inner Java arrays will
be filled with null to make the non-jagged 2D array.
:param pylist:
Python list to convert to a Java Array.
:param java_class:
Java class to specify the type of Array. Should be in the
form of sc._gateway.jvm.* (sc is a valid Spark Context).
:return:
Java Array of converted pylist.
Example primitive Java classes:
- basestring -> sc._gateway.jvm.java.lang.String
- int -> sc._gateway.jvm.java.lang.Integer
- float -> sc._gateway.jvm.java.lang.Double
- bool -> sc._gateway.jvm.java.lang.Boolean
|
[
"Create",
"a",
"Java",
"array",
"of",
"given",
"java_class",
"type",
".",
"Useful",
"for",
"calling",
"a",
"method",
"with",
"a",
"Scala",
"Array",
"from",
"Python",
"with",
"Py4J",
".",
"If",
"the",
"param",
"pylist",
"is",
"a",
"2D",
"array",
"then",
"a",
"2D",
"java",
"array",
"will",
"be",
"returned",
".",
"The",
"returned",
"2D",
"java",
"array",
"is",
"a",
"square",
"non",
"-",
"jagged",
"2D",
"array",
"that",
"is",
"big",
"enough",
"for",
"all",
"elements",
".",
"The",
"empty",
"slots",
"in",
"the",
"inner",
"Java",
"arrays",
"will",
"be",
"filled",
"with",
"null",
"to",
"make",
"the",
"non",
"-",
"jagged",
"2D",
"array",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/wrapper.py#L70-L109
|
19,266
|
apache/spark
|
python/pyspark/profiler.py
|
ProfilerCollector.add_profiler
|
def add_profiler(self, id, profiler):
""" Add a profiler for RDD `id` """
if not self.profilers:
if self.profile_dump_path:
atexit.register(self.dump_profiles, self.profile_dump_path)
else:
atexit.register(self.show_profiles)
self.profilers.append([id, profiler, False])
|
python
|
def add_profiler(self, id, profiler):
""" Add a profiler for RDD `id` """
if not self.profilers:
if self.profile_dump_path:
atexit.register(self.dump_profiles, self.profile_dump_path)
else:
atexit.register(self.show_profiles)
self.profilers.append([id, profiler, False])
|
[
"def",
"add_profiler",
"(",
"self",
",",
"id",
",",
"profiler",
")",
":",
"if",
"not",
"self",
".",
"profilers",
":",
"if",
"self",
".",
"profile_dump_path",
":",
"atexit",
".",
"register",
"(",
"self",
".",
"dump_profiles",
",",
"self",
".",
"profile_dump_path",
")",
"else",
":",
"atexit",
".",
"register",
"(",
"self",
".",
"show_profiles",
")",
"self",
".",
"profilers",
".",
"append",
"(",
"[",
"id",
",",
"profiler",
",",
"False",
"]",
")"
] |
Add a profiler for RDD `id`
|
[
"Add",
"a",
"profiler",
"for",
"RDD",
"id"
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/profiler.py#L43-L51
|
19,267
|
apache/spark
|
python/pyspark/profiler.py
|
ProfilerCollector.show_profiles
|
def show_profiles(self):
""" Print the profile stats to stdout """
for i, (id, profiler, showed) in enumerate(self.profilers):
if not showed and profiler:
profiler.show(id)
# mark it as showed
self.profilers[i][2] = True
|
python
|
def show_profiles(self):
""" Print the profile stats to stdout """
for i, (id, profiler, showed) in enumerate(self.profilers):
if not showed and profiler:
profiler.show(id)
# mark it as showed
self.profilers[i][2] = True
|
[
"def",
"show_profiles",
"(",
"self",
")",
":",
"for",
"i",
",",
"(",
"id",
",",
"profiler",
",",
"showed",
")",
"in",
"enumerate",
"(",
"self",
".",
"profilers",
")",
":",
"if",
"not",
"showed",
"and",
"profiler",
":",
"profiler",
".",
"show",
"(",
"id",
")",
"# mark it as showed",
"self",
".",
"profilers",
"[",
"i",
"]",
"[",
"2",
"]",
"=",
"True"
] |
Print the profile stats to stdout
|
[
"Print",
"the",
"profile",
"stats",
"to",
"stdout"
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/profiler.py#L59-L65
|
19,268
|
apache/spark
|
python/pyspark/profiler.py
|
Profiler.show
|
def show(self, id):
""" Print the profile stats to stdout, id is the RDD id """
stats = self.stats()
if stats:
print("=" * 60)
print("Profile of RDD<id=%d>" % id)
print("=" * 60)
stats.sort_stats("time", "cumulative").print_stats()
|
python
|
def show(self, id):
""" Print the profile stats to stdout, id is the RDD id """
stats = self.stats()
if stats:
print("=" * 60)
print("Profile of RDD<id=%d>" % id)
print("=" * 60)
stats.sort_stats("time", "cumulative").print_stats()
|
[
"def",
"show",
"(",
"self",
",",
"id",
")",
":",
"stats",
"=",
"self",
".",
"stats",
"(",
")",
"if",
"stats",
":",
"print",
"(",
"\"=\"",
"*",
"60",
")",
"print",
"(",
"\"Profile of RDD<id=%d>\"",
"%",
"id",
")",
"print",
"(",
"\"=\"",
"*",
"60",
")",
"stats",
".",
"sort_stats",
"(",
"\"time\"",
",",
"\"cumulative\"",
")",
".",
"print_stats",
"(",
")"
] |
Print the profile stats to stdout, id is the RDD id
|
[
"Print",
"the",
"profile",
"stats",
"to",
"stdout",
"id",
"is",
"the",
"RDD",
"id"
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/profiler.py#L113-L120
|
19,269
|
apache/spark
|
python/pyspark/profiler.py
|
Profiler.dump
|
def dump(self, id, path):
""" Dump the profile into path, id is the RDD id """
if not os.path.exists(path):
os.makedirs(path)
stats = self.stats()
if stats:
p = os.path.join(path, "rdd_%d.pstats" % id)
stats.dump_stats(p)
|
python
|
def dump(self, id, path):
""" Dump the profile into path, id is the RDD id """
if not os.path.exists(path):
os.makedirs(path)
stats = self.stats()
if stats:
p = os.path.join(path, "rdd_%d.pstats" % id)
stats.dump_stats(p)
|
[
"def",
"dump",
"(",
"self",
",",
"id",
",",
"path",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
":",
"os",
".",
"makedirs",
"(",
"path",
")",
"stats",
"=",
"self",
".",
"stats",
"(",
")",
"if",
"stats",
":",
"p",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"\"rdd_%d.pstats\"",
"%",
"id",
")",
"stats",
".",
"dump_stats",
"(",
"p",
")"
] |
Dump the profile into path, id is the RDD id
|
[
"Dump",
"the",
"profile",
"into",
"path",
"id",
"is",
"the",
"RDD",
"id"
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/profiler.py#L122-L129
|
19,270
|
apache/spark
|
python/pyspark/profiler.py
|
BasicProfiler.profile
|
def profile(self, func):
""" Runs and profiles the method to_profile passed in. A profile object is returned. """
pr = cProfile.Profile()
pr.runcall(func)
st = pstats.Stats(pr)
st.stream = None # make it picklable
st.strip_dirs()
# Adds a new profile to the existing accumulated value
self._accumulator.add(st)
|
python
|
def profile(self, func):
""" Runs and profiles the method to_profile passed in. A profile object is returned. """
pr = cProfile.Profile()
pr.runcall(func)
st = pstats.Stats(pr)
st.stream = None # make it picklable
st.strip_dirs()
# Adds a new profile to the existing accumulated value
self._accumulator.add(st)
|
[
"def",
"profile",
"(",
"self",
",",
"func",
")",
":",
"pr",
"=",
"cProfile",
".",
"Profile",
"(",
")",
"pr",
".",
"runcall",
"(",
"func",
")",
"st",
"=",
"pstats",
".",
"Stats",
"(",
"pr",
")",
"st",
".",
"stream",
"=",
"None",
"# make it picklable",
"st",
".",
"strip_dirs",
"(",
")",
"# Adds a new profile to the existing accumulated value",
"self",
".",
"_accumulator",
".",
"add",
"(",
"st",
")"
] |
Runs and profiles the method to_profile passed in. A profile object is returned.
|
[
"Runs",
"and",
"profiles",
"the",
"method",
"to_profile",
"passed",
"in",
".",
"A",
"profile",
"object",
"is",
"returned",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/profiler.py#L158-L167
|
19,271
|
apache/spark
|
python/pyspark/sql/context.py
|
SQLContext.getOrCreate
|
def getOrCreate(cls, sc):
"""
Get the existing SQLContext or create a new one with given SparkContext.
:param sc: SparkContext
"""
if cls._instantiatedContext is None:
jsqlContext = sc._jvm.SQLContext.getOrCreate(sc._jsc.sc())
sparkSession = SparkSession(sc, jsqlContext.sparkSession())
cls(sc, sparkSession, jsqlContext)
return cls._instantiatedContext
|
python
|
def getOrCreate(cls, sc):
"""
Get the existing SQLContext or create a new one with given SparkContext.
:param sc: SparkContext
"""
if cls._instantiatedContext is None:
jsqlContext = sc._jvm.SQLContext.getOrCreate(sc._jsc.sc())
sparkSession = SparkSession(sc, jsqlContext.sparkSession())
cls(sc, sparkSession, jsqlContext)
return cls._instantiatedContext
|
[
"def",
"getOrCreate",
"(",
"cls",
",",
"sc",
")",
":",
"if",
"cls",
".",
"_instantiatedContext",
"is",
"None",
":",
"jsqlContext",
"=",
"sc",
".",
"_jvm",
".",
"SQLContext",
".",
"getOrCreate",
"(",
"sc",
".",
"_jsc",
".",
"sc",
"(",
")",
")",
"sparkSession",
"=",
"SparkSession",
"(",
"sc",
",",
"jsqlContext",
".",
"sparkSession",
"(",
")",
")",
"cls",
"(",
"sc",
",",
"sparkSession",
",",
"jsqlContext",
")",
"return",
"cls",
".",
"_instantiatedContext"
] |
Get the existing SQLContext or create a new one with given SparkContext.
:param sc: SparkContext
|
[
"Get",
"the",
"existing",
"SQLContext",
"or",
"create",
"a",
"new",
"one",
"with",
"given",
"SparkContext",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/context.py#L103-L113
|
19,272
|
apache/spark
|
python/pyspark/sql/context.py
|
SQLContext.setConf
|
def setConf(self, key, value):
"""Sets the given Spark SQL configuration property.
"""
self.sparkSession.conf.set(key, value)
|
python
|
def setConf(self, key, value):
"""Sets the given Spark SQL configuration property.
"""
self.sparkSession.conf.set(key, value)
|
[
"def",
"setConf",
"(",
"self",
",",
"key",
",",
"value",
")",
":",
"self",
".",
"sparkSession",
".",
"conf",
".",
"set",
"(",
"key",
",",
"value",
")"
] |
Sets the given Spark SQL configuration property.
|
[
"Sets",
"the",
"given",
"Spark",
"SQL",
"configuration",
"property",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/context.py#L125-L128
|
19,273
|
apache/spark
|
python/pyspark/sql/context.py
|
SQLContext.getConf
|
def getConf(self, key, defaultValue=_NoValue):
"""Returns the value of Spark SQL configuration property for the given key.
If the key is not set and defaultValue is set, return
defaultValue. If the key is not set and defaultValue is not set, return
the system default value.
>>> sqlContext.getConf("spark.sql.shuffle.partitions")
u'200'
>>> sqlContext.getConf("spark.sql.shuffle.partitions", u"10")
u'10'
>>> sqlContext.setConf("spark.sql.shuffle.partitions", u"50")
>>> sqlContext.getConf("spark.sql.shuffle.partitions", u"10")
u'50'
"""
return self.sparkSession.conf.get(key, defaultValue)
|
python
|
def getConf(self, key, defaultValue=_NoValue):
"""Returns the value of Spark SQL configuration property for the given key.
If the key is not set and defaultValue is set, return
defaultValue. If the key is not set and defaultValue is not set, return
the system default value.
>>> sqlContext.getConf("spark.sql.shuffle.partitions")
u'200'
>>> sqlContext.getConf("spark.sql.shuffle.partitions", u"10")
u'10'
>>> sqlContext.setConf("spark.sql.shuffle.partitions", u"50")
>>> sqlContext.getConf("spark.sql.shuffle.partitions", u"10")
u'50'
"""
return self.sparkSession.conf.get(key, defaultValue)
|
[
"def",
"getConf",
"(",
"self",
",",
"key",
",",
"defaultValue",
"=",
"_NoValue",
")",
":",
"return",
"self",
".",
"sparkSession",
".",
"conf",
".",
"get",
"(",
"key",
",",
"defaultValue",
")"
] |
Returns the value of Spark SQL configuration property for the given key.
If the key is not set and defaultValue is set, return
defaultValue. If the key is not set and defaultValue is not set, return
the system default value.
>>> sqlContext.getConf("spark.sql.shuffle.partitions")
u'200'
>>> sqlContext.getConf("spark.sql.shuffle.partitions", u"10")
u'10'
>>> sqlContext.setConf("spark.sql.shuffle.partitions", u"50")
>>> sqlContext.getConf("spark.sql.shuffle.partitions", u"10")
u'50'
|
[
"Returns",
"the",
"value",
"of",
"Spark",
"SQL",
"configuration",
"property",
"for",
"the",
"given",
"key",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/context.py#L132-L147
|
19,274
|
apache/spark
|
python/pyspark/sql/context.py
|
SQLContext.createExternalTable
|
def createExternalTable(self, tableName, path=None, source=None, schema=None, **options):
"""Creates an external table based on the dataset in a data source.
It returns the DataFrame associated with the external table.
The data source is specified by the ``source`` and a set of ``options``.
If ``source`` is not specified, the default data source configured by
``spark.sql.sources.default`` will be used.
Optionally, a schema can be provided as the schema of the returned :class:`DataFrame` and
created external table.
:return: :class:`DataFrame`
"""
return self.sparkSession.catalog.createExternalTable(
tableName, path, source, schema, **options)
|
python
|
def createExternalTable(self, tableName, path=None, source=None, schema=None, **options):
"""Creates an external table based on the dataset in a data source.
It returns the DataFrame associated with the external table.
The data source is specified by the ``source`` and a set of ``options``.
If ``source`` is not specified, the default data source configured by
``spark.sql.sources.default`` will be used.
Optionally, a schema can be provided as the schema of the returned :class:`DataFrame` and
created external table.
:return: :class:`DataFrame`
"""
return self.sparkSession.catalog.createExternalTable(
tableName, path, source, schema, **options)
|
[
"def",
"createExternalTable",
"(",
"self",
",",
"tableName",
",",
"path",
"=",
"None",
",",
"source",
"=",
"None",
",",
"schema",
"=",
"None",
",",
"*",
"*",
"options",
")",
":",
"return",
"self",
".",
"sparkSession",
".",
"catalog",
".",
"createExternalTable",
"(",
"tableName",
",",
"path",
",",
"source",
",",
"schema",
",",
"*",
"*",
"options",
")"
] |
Creates an external table based on the dataset in a data source.
It returns the DataFrame associated with the external table.
The data source is specified by the ``source`` and a set of ``options``.
If ``source`` is not specified, the default data source configured by
``spark.sql.sources.default`` will be used.
Optionally, a schema can be provided as the schema of the returned :class:`DataFrame` and
created external table.
:return: :class:`DataFrame`
|
[
"Creates",
"an",
"external",
"table",
"based",
"on",
"the",
"dataset",
"in",
"a",
"data",
"source",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/context.py#L329-L344
|
19,275
|
apache/spark
|
python/pyspark/sql/context.py
|
SQLContext.tableNames
|
def tableNames(self, dbName=None):
"""Returns a list of names of tables in the database ``dbName``.
:param dbName: string, name of the database to use. Default to the current database.
:return: list of table names, in string
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> "table1" in sqlContext.tableNames()
True
>>> "table1" in sqlContext.tableNames("default")
True
"""
if dbName is None:
return [name for name in self._ssql_ctx.tableNames()]
else:
return [name for name in self._ssql_ctx.tableNames(dbName)]
|
python
|
def tableNames(self, dbName=None):
"""Returns a list of names of tables in the database ``dbName``.
:param dbName: string, name of the database to use. Default to the current database.
:return: list of table names, in string
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> "table1" in sqlContext.tableNames()
True
>>> "table1" in sqlContext.tableNames("default")
True
"""
if dbName is None:
return [name for name in self._ssql_ctx.tableNames()]
else:
return [name for name in self._ssql_ctx.tableNames(dbName)]
|
[
"def",
"tableNames",
"(",
"self",
",",
"dbName",
"=",
"None",
")",
":",
"if",
"dbName",
"is",
"None",
":",
"return",
"[",
"name",
"for",
"name",
"in",
"self",
".",
"_ssql_ctx",
".",
"tableNames",
"(",
")",
"]",
"else",
":",
"return",
"[",
"name",
"for",
"name",
"in",
"self",
".",
"_ssql_ctx",
".",
"tableNames",
"(",
"dbName",
")",
"]"
] |
Returns a list of names of tables in the database ``dbName``.
:param dbName: string, name of the database to use. Default to the current database.
:return: list of table names, in string
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> "table1" in sqlContext.tableNames()
True
>>> "table1" in sqlContext.tableNames("default")
True
|
[
"Returns",
"a",
"list",
"of",
"names",
"of",
"tables",
"in",
"the",
"database",
"dbName",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/context.py#L397-L412
|
19,276
|
apache/spark
|
python/pyspark/ml/classification.py
|
OneVsRestModel.copy
|
def copy(self, extra=None):
"""
Creates a copy of this instance with a randomly generated uid
and some extra params. This creates a deep copy of the embedded paramMap,
and copies the embedded and extra parameters over.
:param extra: Extra parameters to copy to the new instance
:return: Copy of this instance
"""
if extra is None:
extra = dict()
newModel = Params.copy(self, extra)
newModel.models = [model.copy(extra) for model in self.models]
return newModel
|
python
|
def copy(self, extra=None):
"""
Creates a copy of this instance with a randomly generated uid
and some extra params. This creates a deep copy of the embedded paramMap,
and copies the embedded and extra parameters over.
:param extra: Extra parameters to copy to the new instance
:return: Copy of this instance
"""
if extra is None:
extra = dict()
newModel = Params.copy(self, extra)
newModel.models = [model.copy(extra) for model in self.models]
return newModel
|
[
"def",
"copy",
"(",
"self",
",",
"extra",
"=",
"None",
")",
":",
"if",
"extra",
"is",
"None",
":",
"extra",
"=",
"dict",
"(",
")",
"newModel",
"=",
"Params",
".",
"copy",
"(",
"self",
",",
"extra",
")",
"newModel",
".",
"models",
"=",
"[",
"model",
".",
"copy",
"(",
"extra",
")",
"for",
"model",
"in",
"self",
".",
"models",
"]",
"return",
"newModel"
] |
Creates a copy of this instance with a randomly generated uid
and some extra params. This creates a deep copy of the embedded paramMap,
and copies the embedded and extra parameters over.
:param extra: Extra parameters to copy to the new instance
:return: Copy of this instance
|
[
"Creates",
"a",
"copy",
"of",
"this",
"instance",
"with",
"a",
"randomly",
"generated",
"uid",
"and",
"some",
"extra",
"params",
".",
"This",
"creates",
"a",
"deep",
"copy",
"of",
"the",
"embedded",
"paramMap",
"and",
"copies",
"the",
"embedded",
"and",
"extra",
"parameters",
"over",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/classification.py#L2046-L2059
|
19,277
|
apache/spark
|
python/pyspark/ml/classification.py
|
OneVsRestModel._from_java
|
def _from_java(cls, java_stage):
"""
Given a Java OneVsRestModel, create and return a Python wrapper of it.
Used for ML persistence.
"""
featuresCol = java_stage.getFeaturesCol()
labelCol = java_stage.getLabelCol()
predictionCol = java_stage.getPredictionCol()
classifier = JavaParams._from_java(java_stage.getClassifier())
models = [JavaParams._from_java(model) for model in java_stage.models()]
py_stage = cls(models=models).setPredictionCol(predictionCol).setLabelCol(labelCol)\
.setFeaturesCol(featuresCol).setClassifier(classifier)
py_stage._resetUid(java_stage.uid())
return py_stage
|
python
|
def _from_java(cls, java_stage):
"""
Given a Java OneVsRestModel, create and return a Python wrapper of it.
Used for ML persistence.
"""
featuresCol = java_stage.getFeaturesCol()
labelCol = java_stage.getLabelCol()
predictionCol = java_stage.getPredictionCol()
classifier = JavaParams._from_java(java_stage.getClassifier())
models = [JavaParams._from_java(model) for model in java_stage.models()]
py_stage = cls(models=models).setPredictionCol(predictionCol).setLabelCol(labelCol)\
.setFeaturesCol(featuresCol).setClassifier(classifier)
py_stage._resetUid(java_stage.uid())
return py_stage
|
[
"def",
"_from_java",
"(",
"cls",
",",
"java_stage",
")",
":",
"featuresCol",
"=",
"java_stage",
".",
"getFeaturesCol",
"(",
")",
"labelCol",
"=",
"java_stage",
".",
"getLabelCol",
"(",
")",
"predictionCol",
"=",
"java_stage",
".",
"getPredictionCol",
"(",
")",
"classifier",
"=",
"JavaParams",
".",
"_from_java",
"(",
"java_stage",
".",
"getClassifier",
"(",
")",
")",
"models",
"=",
"[",
"JavaParams",
".",
"_from_java",
"(",
"model",
")",
"for",
"model",
"in",
"java_stage",
".",
"models",
"(",
")",
"]",
"py_stage",
"=",
"cls",
"(",
"models",
"=",
"models",
")",
".",
"setPredictionCol",
"(",
"predictionCol",
")",
".",
"setLabelCol",
"(",
"labelCol",
")",
".",
"setFeaturesCol",
"(",
"featuresCol",
")",
".",
"setClassifier",
"(",
"classifier",
")",
"py_stage",
".",
"_resetUid",
"(",
"java_stage",
".",
"uid",
"(",
")",
")",
"return",
"py_stage"
] |
Given a Java OneVsRestModel, create and return a Python wrapper of it.
Used for ML persistence.
|
[
"Given",
"a",
"Java",
"OneVsRestModel",
"create",
"and",
"return",
"a",
"Python",
"wrapper",
"of",
"it",
".",
"Used",
"for",
"ML",
"persistence",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/classification.py#L2062-L2075
|
19,278
|
apache/spark
|
python/pyspark/ml/classification.py
|
OneVsRestModel._to_java
|
def _to_java(self):
"""
Transfer this instance to a Java OneVsRestModel. Used for ML persistence.
:return: Java object equivalent to this instance.
"""
sc = SparkContext._active_spark_context
java_models = [model._to_java() for model in self.models]
java_models_array = JavaWrapper._new_java_array(
java_models, sc._gateway.jvm.org.apache.spark.ml.classification.ClassificationModel)
metadata = JavaParams._new_java_obj("org.apache.spark.sql.types.Metadata")
_java_obj = JavaParams._new_java_obj("org.apache.spark.ml.classification.OneVsRestModel",
self.uid, metadata.empty(), java_models_array)
_java_obj.set("classifier", self.getClassifier()._to_java())
_java_obj.set("featuresCol", self.getFeaturesCol())
_java_obj.set("labelCol", self.getLabelCol())
_java_obj.set("predictionCol", self.getPredictionCol())
return _java_obj
|
python
|
def _to_java(self):
"""
Transfer this instance to a Java OneVsRestModel. Used for ML persistence.
:return: Java object equivalent to this instance.
"""
sc = SparkContext._active_spark_context
java_models = [model._to_java() for model in self.models]
java_models_array = JavaWrapper._new_java_array(
java_models, sc._gateway.jvm.org.apache.spark.ml.classification.ClassificationModel)
metadata = JavaParams._new_java_obj("org.apache.spark.sql.types.Metadata")
_java_obj = JavaParams._new_java_obj("org.apache.spark.ml.classification.OneVsRestModel",
self.uid, metadata.empty(), java_models_array)
_java_obj.set("classifier", self.getClassifier()._to_java())
_java_obj.set("featuresCol", self.getFeaturesCol())
_java_obj.set("labelCol", self.getLabelCol())
_java_obj.set("predictionCol", self.getPredictionCol())
return _java_obj
|
[
"def",
"_to_java",
"(",
"self",
")",
":",
"sc",
"=",
"SparkContext",
".",
"_active_spark_context",
"java_models",
"=",
"[",
"model",
".",
"_to_java",
"(",
")",
"for",
"model",
"in",
"self",
".",
"models",
"]",
"java_models_array",
"=",
"JavaWrapper",
".",
"_new_java_array",
"(",
"java_models",
",",
"sc",
".",
"_gateway",
".",
"jvm",
".",
"org",
".",
"apache",
".",
"spark",
".",
"ml",
".",
"classification",
".",
"ClassificationModel",
")",
"metadata",
"=",
"JavaParams",
".",
"_new_java_obj",
"(",
"\"org.apache.spark.sql.types.Metadata\"",
")",
"_java_obj",
"=",
"JavaParams",
".",
"_new_java_obj",
"(",
"\"org.apache.spark.ml.classification.OneVsRestModel\"",
",",
"self",
".",
"uid",
",",
"metadata",
".",
"empty",
"(",
")",
",",
"java_models_array",
")",
"_java_obj",
".",
"set",
"(",
"\"classifier\"",
",",
"self",
".",
"getClassifier",
"(",
")",
".",
"_to_java",
"(",
")",
")",
"_java_obj",
".",
"set",
"(",
"\"featuresCol\"",
",",
"self",
".",
"getFeaturesCol",
"(",
")",
")",
"_java_obj",
".",
"set",
"(",
"\"labelCol\"",
",",
"self",
".",
"getLabelCol",
"(",
")",
")",
"_java_obj",
".",
"set",
"(",
"\"predictionCol\"",
",",
"self",
".",
"getPredictionCol",
"(",
")",
")",
"return",
"_java_obj"
] |
Transfer this instance to a Java OneVsRestModel. Used for ML persistence.
:return: Java object equivalent to this instance.
|
[
"Transfer",
"this",
"instance",
"to",
"a",
"Java",
"OneVsRestModel",
".",
"Used",
"for",
"ML",
"persistence",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/classification.py#L2077-L2094
|
19,279
|
apache/spark
|
python/pyspark/util.py
|
_exception_message
|
def _exception_message(excp):
"""Return the message from an exception as either a str or unicode object. Supports both
Python 2 and Python 3.
>>> msg = "Exception message"
>>> excp = Exception(msg)
>>> msg == _exception_message(excp)
True
>>> msg = u"unicöde"
>>> excp = Exception(msg)
>>> msg == _exception_message(excp)
True
"""
if isinstance(excp, Py4JJavaError):
# 'Py4JJavaError' doesn't contain the stack trace available on the Java side in 'message'
# attribute in Python 2. We should call 'str' function on this exception in general but
# 'Py4JJavaError' has an issue about addressing non-ascii strings. So, here we work
# around by the direct call, '__str__()'. Please see SPARK-23517.
return excp.__str__()
if hasattr(excp, "message"):
return excp.message
return str(excp)
|
python
|
def _exception_message(excp):
"""Return the message from an exception as either a str or unicode object. Supports both
Python 2 and Python 3.
>>> msg = "Exception message"
>>> excp = Exception(msg)
>>> msg == _exception_message(excp)
True
>>> msg = u"unicöde"
>>> excp = Exception(msg)
>>> msg == _exception_message(excp)
True
"""
if isinstance(excp, Py4JJavaError):
# 'Py4JJavaError' doesn't contain the stack trace available on the Java side in 'message'
# attribute in Python 2. We should call 'str' function on this exception in general but
# 'Py4JJavaError' has an issue about addressing non-ascii strings. So, here we work
# around by the direct call, '__str__()'. Please see SPARK-23517.
return excp.__str__()
if hasattr(excp, "message"):
return excp.message
return str(excp)
|
[
"def",
"_exception_message",
"(",
"excp",
")",
":",
"if",
"isinstance",
"(",
"excp",
",",
"Py4JJavaError",
")",
":",
"# 'Py4JJavaError' doesn't contain the stack trace available on the Java side in 'message'",
"# attribute in Python 2. We should call 'str' function on this exception in general but",
"# 'Py4JJavaError' has an issue about addressing non-ascii strings. So, here we work",
"# around by the direct call, '__str__()'. Please see SPARK-23517.",
"return",
"excp",
".",
"__str__",
"(",
")",
"if",
"hasattr",
"(",
"excp",
",",
"\"message\"",
")",
":",
"return",
"excp",
".",
"message",
"return",
"str",
"(",
"excp",
")"
] |
Return the message from an exception as either a str or unicode object. Supports both
Python 2 and Python 3.
>>> msg = "Exception message"
>>> excp = Exception(msg)
>>> msg == _exception_message(excp)
True
>>> msg = u"unicöde"
>>> excp = Exception(msg)
>>> msg == _exception_message(excp)
True
|
[
"Return",
"the",
"message",
"from",
"an",
"exception",
"as",
"either",
"a",
"str",
"or",
"unicode",
"object",
".",
"Supports",
"both",
"Python",
"2",
"and",
"Python",
"3",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/util.py#L27-L49
|
19,280
|
apache/spark
|
python/pyspark/util.py
|
_get_argspec
|
def _get_argspec(f):
"""
Get argspec of a function. Supports both Python 2 and Python 3.
"""
if sys.version_info[0] < 3:
argspec = inspect.getargspec(f)
else:
# `getargspec` is deprecated since python3.0 (incompatible with function annotations).
# See SPARK-23569.
argspec = inspect.getfullargspec(f)
return argspec
|
python
|
def _get_argspec(f):
"""
Get argspec of a function. Supports both Python 2 and Python 3.
"""
if sys.version_info[0] < 3:
argspec = inspect.getargspec(f)
else:
# `getargspec` is deprecated since python3.0 (incompatible with function annotations).
# See SPARK-23569.
argspec = inspect.getfullargspec(f)
return argspec
|
[
"def",
"_get_argspec",
"(",
"f",
")",
":",
"if",
"sys",
".",
"version_info",
"[",
"0",
"]",
"<",
"3",
":",
"argspec",
"=",
"inspect",
".",
"getargspec",
"(",
"f",
")",
"else",
":",
"# `getargspec` is deprecated since python3.0 (incompatible with function annotations).",
"# See SPARK-23569.",
"argspec",
"=",
"inspect",
".",
"getfullargspec",
"(",
"f",
")",
"return",
"argspec"
] |
Get argspec of a function. Supports both Python 2 and Python 3.
|
[
"Get",
"argspec",
"of",
"a",
"function",
".",
"Supports",
"both",
"Python",
"2",
"and",
"Python",
"3",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/util.py#L52-L62
|
19,281
|
apache/spark
|
python/pyspark/util.py
|
fail_on_stopiteration
|
def fail_on_stopiteration(f):
"""
Wraps the input function to fail on 'StopIteration' by raising a 'RuntimeError'
prevents silent loss of data when 'f' is used in a for loop in Spark code
"""
def wrapper(*args, **kwargs):
try:
return f(*args, **kwargs)
except StopIteration as exc:
raise RuntimeError(
"Caught StopIteration thrown from user's code; failing the task",
exc
)
return wrapper
|
python
|
def fail_on_stopiteration(f):
"""
Wraps the input function to fail on 'StopIteration' by raising a 'RuntimeError'
prevents silent loss of data when 'f' is used in a for loop in Spark code
"""
def wrapper(*args, **kwargs):
try:
return f(*args, **kwargs)
except StopIteration as exc:
raise RuntimeError(
"Caught StopIteration thrown from user's code; failing the task",
exc
)
return wrapper
|
[
"def",
"fail_on_stopiteration",
"(",
"f",
")",
":",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"return",
"f",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"except",
"StopIteration",
"as",
"exc",
":",
"raise",
"RuntimeError",
"(",
"\"Caught StopIteration thrown from user's code; failing the task\"",
",",
"exc",
")",
"return",
"wrapper"
] |
Wraps the input function to fail on 'StopIteration' by raising a 'RuntimeError'
prevents silent loss of data when 'f' is used in a for loop in Spark code
|
[
"Wraps",
"the",
"input",
"function",
"to",
"fail",
"on",
"StopIteration",
"by",
"raising",
"a",
"RuntimeError",
"prevents",
"silent",
"loss",
"of",
"data",
"when",
"f",
"is",
"used",
"in",
"a",
"for",
"loop",
"in",
"Spark",
"code"
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/util.py#L92-L106
|
19,282
|
apache/spark
|
python/pyspark/context.py
|
SparkContext._ensure_initialized
|
def _ensure_initialized(cls, instance=None, gateway=None, conf=None):
"""
Checks whether a SparkContext is initialized or not.
Throws error if a SparkContext is already running.
"""
with SparkContext._lock:
if not SparkContext._gateway:
SparkContext._gateway = gateway or launch_gateway(conf)
SparkContext._jvm = SparkContext._gateway.jvm
if instance:
if (SparkContext._active_spark_context and
SparkContext._active_spark_context != instance):
currentMaster = SparkContext._active_spark_context.master
currentAppName = SparkContext._active_spark_context.appName
callsite = SparkContext._active_spark_context._callsite
# Raise error if there is already a running Spark context
raise ValueError(
"Cannot run multiple SparkContexts at once; "
"existing SparkContext(app=%s, master=%s)"
" created by %s at %s:%s "
% (currentAppName, currentMaster,
callsite.function, callsite.file, callsite.linenum))
else:
SparkContext._active_spark_context = instance
|
python
|
def _ensure_initialized(cls, instance=None, gateway=None, conf=None):
"""
Checks whether a SparkContext is initialized or not.
Throws error if a SparkContext is already running.
"""
with SparkContext._lock:
if not SparkContext._gateway:
SparkContext._gateway = gateway or launch_gateway(conf)
SparkContext._jvm = SparkContext._gateway.jvm
if instance:
if (SparkContext._active_spark_context and
SparkContext._active_spark_context != instance):
currentMaster = SparkContext._active_spark_context.master
currentAppName = SparkContext._active_spark_context.appName
callsite = SparkContext._active_spark_context._callsite
# Raise error if there is already a running Spark context
raise ValueError(
"Cannot run multiple SparkContexts at once; "
"existing SparkContext(app=%s, master=%s)"
" created by %s at %s:%s "
% (currentAppName, currentMaster,
callsite.function, callsite.file, callsite.linenum))
else:
SparkContext._active_spark_context = instance
|
[
"def",
"_ensure_initialized",
"(",
"cls",
",",
"instance",
"=",
"None",
",",
"gateway",
"=",
"None",
",",
"conf",
"=",
"None",
")",
":",
"with",
"SparkContext",
".",
"_lock",
":",
"if",
"not",
"SparkContext",
".",
"_gateway",
":",
"SparkContext",
".",
"_gateway",
"=",
"gateway",
"or",
"launch_gateway",
"(",
"conf",
")",
"SparkContext",
".",
"_jvm",
"=",
"SparkContext",
".",
"_gateway",
".",
"jvm",
"if",
"instance",
":",
"if",
"(",
"SparkContext",
".",
"_active_spark_context",
"and",
"SparkContext",
".",
"_active_spark_context",
"!=",
"instance",
")",
":",
"currentMaster",
"=",
"SparkContext",
".",
"_active_spark_context",
".",
"master",
"currentAppName",
"=",
"SparkContext",
".",
"_active_spark_context",
".",
"appName",
"callsite",
"=",
"SparkContext",
".",
"_active_spark_context",
".",
"_callsite",
"# Raise error if there is already a running Spark context",
"raise",
"ValueError",
"(",
"\"Cannot run multiple SparkContexts at once; \"",
"\"existing SparkContext(app=%s, master=%s)\"",
"\" created by %s at %s:%s \"",
"%",
"(",
"currentAppName",
",",
"currentMaster",
",",
"callsite",
".",
"function",
",",
"callsite",
".",
"file",
",",
"callsite",
".",
"linenum",
")",
")",
"else",
":",
"SparkContext",
".",
"_active_spark_context",
"=",
"instance"
] |
Checks whether a SparkContext is initialized or not.
Throws error if a SparkContext is already running.
|
[
"Checks",
"whether",
"a",
"SparkContext",
"is",
"initialized",
"or",
"not",
".",
"Throws",
"error",
"if",
"a",
"SparkContext",
"is",
"already",
"running",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/context.py#L303-L328
|
19,283
|
apache/spark
|
python/pyspark/context.py
|
SparkContext.getOrCreate
|
def getOrCreate(cls, conf=None):
"""
Get or instantiate a SparkContext and register it as a singleton object.
:param conf: SparkConf (optional)
"""
with SparkContext._lock:
if SparkContext._active_spark_context is None:
SparkContext(conf=conf or SparkConf())
return SparkContext._active_spark_context
|
python
|
def getOrCreate(cls, conf=None):
"""
Get or instantiate a SparkContext and register it as a singleton object.
:param conf: SparkConf (optional)
"""
with SparkContext._lock:
if SparkContext._active_spark_context is None:
SparkContext(conf=conf or SparkConf())
return SparkContext._active_spark_context
|
[
"def",
"getOrCreate",
"(",
"cls",
",",
"conf",
"=",
"None",
")",
":",
"with",
"SparkContext",
".",
"_lock",
":",
"if",
"SparkContext",
".",
"_active_spark_context",
"is",
"None",
":",
"SparkContext",
"(",
"conf",
"=",
"conf",
"or",
"SparkConf",
"(",
")",
")",
"return",
"SparkContext",
".",
"_active_spark_context"
] |
Get or instantiate a SparkContext and register it as a singleton object.
:param conf: SparkConf (optional)
|
[
"Get",
"or",
"instantiate",
"a",
"SparkContext",
"and",
"register",
"it",
"as",
"a",
"singleton",
"object",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/context.py#L353-L362
|
19,284
|
apache/spark
|
python/pyspark/context.py
|
SparkContext.setSystemProperty
|
def setSystemProperty(cls, key, value):
"""
Set a Java system property, such as spark.executor.memory. This must
must be invoked before instantiating SparkContext.
"""
SparkContext._ensure_initialized()
SparkContext._jvm.java.lang.System.setProperty(key, value)
|
python
|
def setSystemProperty(cls, key, value):
"""
Set a Java system property, such as spark.executor.memory. This must
must be invoked before instantiating SparkContext.
"""
SparkContext._ensure_initialized()
SparkContext._jvm.java.lang.System.setProperty(key, value)
|
[
"def",
"setSystemProperty",
"(",
"cls",
",",
"key",
",",
"value",
")",
":",
"SparkContext",
".",
"_ensure_initialized",
"(",
")",
"SparkContext",
".",
"_jvm",
".",
"java",
".",
"lang",
".",
"System",
".",
"setProperty",
"(",
"key",
",",
"value",
")"
] |
Set a Java system property, such as spark.executor.memory. This must
must be invoked before instantiating SparkContext.
|
[
"Set",
"a",
"Java",
"system",
"property",
"such",
"as",
"spark",
".",
"executor",
".",
"memory",
".",
"This",
"must",
"must",
"be",
"invoked",
"before",
"instantiating",
"SparkContext",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/context.py#L372-L378
|
19,285
|
apache/spark
|
python/pyspark/context.py
|
SparkContext.stop
|
def stop(self):
"""
Shut down the SparkContext.
"""
if getattr(self, "_jsc", None):
try:
self._jsc.stop()
except Py4JError:
# Case: SPARK-18523
warnings.warn(
'Unable to cleanly shutdown Spark JVM process.'
' It is possible that the process has crashed,'
' been killed or may also be in a zombie state.',
RuntimeWarning
)
finally:
self._jsc = None
if getattr(self, "_accumulatorServer", None):
self._accumulatorServer.shutdown()
self._accumulatorServer = None
with SparkContext._lock:
SparkContext._active_spark_context = None
|
python
|
def stop(self):
"""
Shut down the SparkContext.
"""
if getattr(self, "_jsc", None):
try:
self._jsc.stop()
except Py4JError:
# Case: SPARK-18523
warnings.warn(
'Unable to cleanly shutdown Spark JVM process.'
' It is possible that the process has crashed,'
' been killed or may also be in a zombie state.',
RuntimeWarning
)
finally:
self._jsc = None
if getattr(self, "_accumulatorServer", None):
self._accumulatorServer.shutdown()
self._accumulatorServer = None
with SparkContext._lock:
SparkContext._active_spark_context = None
|
[
"def",
"stop",
"(",
"self",
")",
":",
"if",
"getattr",
"(",
"self",
",",
"\"_jsc\"",
",",
"None",
")",
":",
"try",
":",
"self",
".",
"_jsc",
".",
"stop",
"(",
")",
"except",
"Py4JError",
":",
"# Case: SPARK-18523",
"warnings",
".",
"warn",
"(",
"'Unable to cleanly shutdown Spark JVM process.'",
"' It is possible that the process has crashed,'",
"' been killed or may also be in a zombie state.'",
",",
"RuntimeWarning",
")",
"finally",
":",
"self",
".",
"_jsc",
"=",
"None",
"if",
"getattr",
"(",
"self",
",",
"\"_accumulatorServer\"",
",",
"None",
")",
":",
"self",
".",
"_accumulatorServer",
".",
"shutdown",
"(",
")",
"self",
".",
"_accumulatorServer",
"=",
"None",
"with",
"SparkContext",
".",
"_lock",
":",
"SparkContext",
".",
"_active_spark_context",
"=",
"None"
] |
Shut down the SparkContext.
|
[
"Shut",
"down",
"the",
"SparkContext",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/context.py#L427-L448
|
19,286
|
apache/spark
|
python/pyspark/context.py
|
SparkContext.parallelize
|
def parallelize(self, c, numSlices=None):
"""
Distribute a local Python collection to form an RDD. Using xrange
is recommended if the input represents a range for performance.
>>> sc.parallelize([0, 2, 3, 4, 6], 5).glom().collect()
[[0], [2], [3], [4], [6]]
>>> sc.parallelize(xrange(0, 6, 2), 5).glom().collect()
[[], [0], [], [2], [4]]
"""
numSlices = int(numSlices) if numSlices is not None else self.defaultParallelism
if isinstance(c, xrange):
size = len(c)
if size == 0:
return self.parallelize([], numSlices)
step = c[1] - c[0] if size > 1 else 1
start0 = c[0]
def getStart(split):
return start0 + int((split * size / numSlices)) * step
def f(split, iterator):
# it's an empty iterator here but we need this line for triggering the
# logic of signal handling in FramedSerializer.load_stream, for instance,
# SpecialLengths.END_OF_DATA_SECTION in _read_with_length. Since
# FramedSerializer.load_stream produces a generator, the control should
# at least be in that function once. Here we do it by explicitly converting
# the empty iterator to a list, thus make sure worker reuse takes effect.
# See more details in SPARK-26549.
assert len(list(iterator)) == 0
return xrange(getStart(split), getStart(split + 1), step)
return self.parallelize([], numSlices).mapPartitionsWithIndex(f)
# Make sure we distribute data evenly if it's smaller than self.batchSize
if "__len__" not in dir(c):
c = list(c) # Make it a list so we can compute its length
batchSize = max(1, min(len(c) // numSlices, self._batchSize or 1024))
serializer = BatchedSerializer(self._unbatched_serializer, batchSize)
def reader_func(temp_filename):
return self._jvm.PythonRDD.readRDDFromFile(self._jsc, temp_filename, numSlices)
def createRDDServer():
return self._jvm.PythonParallelizeServer(self._jsc.sc(), numSlices)
jrdd = self._serialize_to_jvm(c, serializer, reader_func, createRDDServer)
return RDD(jrdd, self, serializer)
|
python
|
def parallelize(self, c, numSlices=None):
"""
Distribute a local Python collection to form an RDD. Using xrange
is recommended if the input represents a range for performance.
>>> sc.parallelize([0, 2, 3, 4, 6], 5).glom().collect()
[[0], [2], [3], [4], [6]]
>>> sc.parallelize(xrange(0, 6, 2), 5).glom().collect()
[[], [0], [], [2], [4]]
"""
numSlices = int(numSlices) if numSlices is not None else self.defaultParallelism
if isinstance(c, xrange):
size = len(c)
if size == 0:
return self.parallelize([], numSlices)
step = c[1] - c[0] if size > 1 else 1
start0 = c[0]
def getStart(split):
return start0 + int((split * size / numSlices)) * step
def f(split, iterator):
# it's an empty iterator here but we need this line for triggering the
# logic of signal handling in FramedSerializer.load_stream, for instance,
# SpecialLengths.END_OF_DATA_SECTION in _read_with_length. Since
# FramedSerializer.load_stream produces a generator, the control should
# at least be in that function once. Here we do it by explicitly converting
# the empty iterator to a list, thus make sure worker reuse takes effect.
# See more details in SPARK-26549.
assert len(list(iterator)) == 0
return xrange(getStart(split), getStart(split + 1), step)
return self.parallelize([], numSlices).mapPartitionsWithIndex(f)
# Make sure we distribute data evenly if it's smaller than self.batchSize
if "__len__" not in dir(c):
c = list(c) # Make it a list so we can compute its length
batchSize = max(1, min(len(c) // numSlices, self._batchSize or 1024))
serializer = BatchedSerializer(self._unbatched_serializer, batchSize)
def reader_func(temp_filename):
return self._jvm.PythonRDD.readRDDFromFile(self._jsc, temp_filename, numSlices)
def createRDDServer():
return self._jvm.PythonParallelizeServer(self._jsc.sc(), numSlices)
jrdd = self._serialize_to_jvm(c, serializer, reader_func, createRDDServer)
return RDD(jrdd, self, serializer)
|
[
"def",
"parallelize",
"(",
"self",
",",
"c",
",",
"numSlices",
"=",
"None",
")",
":",
"numSlices",
"=",
"int",
"(",
"numSlices",
")",
"if",
"numSlices",
"is",
"not",
"None",
"else",
"self",
".",
"defaultParallelism",
"if",
"isinstance",
"(",
"c",
",",
"xrange",
")",
":",
"size",
"=",
"len",
"(",
"c",
")",
"if",
"size",
"==",
"0",
":",
"return",
"self",
".",
"parallelize",
"(",
"[",
"]",
",",
"numSlices",
")",
"step",
"=",
"c",
"[",
"1",
"]",
"-",
"c",
"[",
"0",
"]",
"if",
"size",
">",
"1",
"else",
"1",
"start0",
"=",
"c",
"[",
"0",
"]",
"def",
"getStart",
"(",
"split",
")",
":",
"return",
"start0",
"+",
"int",
"(",
"(",
"split",
"*",
"size",
"/",
"numSlices",
")",
")",
"*",
"step",
"def",
"f",
"(",
"split",
",",
"iterator",
")",
":",
"# it's an empty iterator here but we need this line for triggering the",
"# logic of signal handling in FramedSerializer.load_stream, for instance,",
"# SpecialLengths.END_OF_DATA_SECTION in _read_with_length. Since",
"# FramedSerializer.load_stream produces a generator, the control should",
"# at least be in that function once. Here we do it by explicitly converting",
"# the empty iterator to a list, thus make sure worker reuse takes effect.",
"# See more details in SPARK-26549.",
"assert",
"len",
"(",
"list",
"(",
"iterator",
")",
")",
"==",
"0",
"return",
"xrange",
"(",
"getStart",
"(",
"split",
")",
",",
"getStart",
"(",
"split",
"+",
"1",
")",
",",
"step",
")",
"return",
"self",
".",
"parallelize",
"(",
"[",
"]",
",",
"numSlices",
")",
".",
"mapPartitionsWithIndex",
"(",
"f",
")",
"# Make sure we distribute data evenly if it's smaller than self.batchSize",
"if",
"\"__len__\"",
"not",
"in",
"dir",
"(",
"c",
")",
":",
"c",
"=",
"list",
"(",
"c",
")",
"# Make it a list so we can compute its length",
"batchSize",
"=",
"max",
"(",
"1",
",",
"min",
"(",
"len",
"(",
"c",
")",
"//",
"numSlices",
",",
"self",
".",
"_batchSize",
"or",
"1024",
")",
")",
"serializer",
"=",
"BatchedSerializer",
"(",
"self",
".",
"_unbatched_serializer",
",",
"batchSize",
")",
"def",
"reader_func",
"(",
"temp_filename",
")",
":",
"return",
"self",
".",
"_jvm",
".",
"PythonRDD",
".",
"readRDDFromFile",
"(",
"self",
".",
"_jsc",
",",
"temp_filename",
",",
"numSlices",
")",
"def",
"createRDDServer",
"(",
")",
":",
"return",
"self",
".",
"_jvm",
".",
"PythonParallelizeServer",
"(",
"self",
".",
"_jsc",
".",
"sc",
"(",
")",
",",
"numSlices",
")",
"jrdd",
"=",
"self",
".",
"_serialize_to_jvm",
"(",
"c",
",",
"serializer",
",",
"reader_func",
",",
"createRDDServer",
")",
"return",
"RDD",
"(",
"jrdd",
",",
"self",
",",
"serializer",
")"
] |
Distribute a local Python collection to form an RDD. Using xrange
is recommended if the input represents a range for performance.
>>> sc.parallelize([0, 2, 3, 4, 6], 5).glom().collect()
[[0], [2], [3], [4], [6]]
>>> sc.parallelize(xrange(0, 6, 2), 5).glom().collect()
[[], [0], [], [2], [4]]
|
[
"Distribute",
"a",
"local",
"Python",
"collection",
"to",
"form",
"an",
"RDD",
".",
"Using",
"xrange",
"is",
"recommended",
"if",
"the",
"input",
"represents",
"a",
"range",
"for",
"performance",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/context.py#L482-L529
|
19,287
|
apache/spark
|
python/pyspark/context.py
|
SparkContext.union
|
def union(self, rdds):
"""
Build the union of a list of RDDs.
This supports unions() of RDDs with different serialized formats,
although this forces them to be reserialized using the default
serializer:
>>> path = os.path.join(tempdir, "union-text.txt")
>>> with open(path, "w") as testFile:
... _ = testFile.write("Hello")
>>> textFile = sc.textFile(path)
>>> textFile.collect()
[u'Hello']
>>> parallelized = sc.parallelize(["World!"])
>>> sorted(sc.union([textFile, parallelized]).collect())
[u'Hello', 'World!']
"""
first_jrdd_deserializer = rdds[0]._jrdd_deserializer
if any(x._jrdd_deserializer != first_jrdd_deserializer for x in rdds):
rdds = [x._reserialize() for x in rdds]
cls = SparkContext._jvm.org.apache.spark.api.java.JavaRDD
jrdds = SparkContext._gateway.new_array(cls, len(rdds))
for i in range(0, len(rdds)):
jrdds[i] = rdds[i]._jrdd
return RDD(self._jsc.union(jrdds), self, rdds[0]._jrdd_deserializer)
|
python
|
def union(self, rdds):
"""
Build the union of a list of RDDs.
This supports unions() of RDDs with different serialized formats,
although this forces them to be reserialized using the default
serializer:
>>> path = os.path.join(tempdir, "union-text.txt")
>>> with open(path, "w") as testFile:
... _ = testFile.write("Hello")
>>> textFile = sc.textFile(path)
>>> textFile.collect()
[u'Hello']
>>> parallelized = sc.parallelize(["World!"])
>>> sorted(sc.union([textFile, parallelized]).collect())
[u'Hello', 'World!']
"""
first_jrdd_deserializer = rdds[0]._jrdd_deserializer
if any(x._jrdd_deserializer != first_jrdd_deserializer for x in rdds):
rdds = [x._reserialize() for x in rdds]
cls = SparkContext._jvm.org.apache.spark.api.java.JavaRDD
jrdds = SparkContext._gateway.new_array(cls, len(rdds))
for i in range(0, len(rdds)):
jrdds[i] = rdds[i]._jrdd
return RDD(self._jsc.union(jrdds), self, rdds[0]._jrdd_deserializer)
|
[
"def",
"union",
"(",
"self",
",",
"rdds",
")",
":",
"first_jrdd_deserializer",
"=",
"rdds",
"[",
"0",
"]",
".",
"_jrdd_deserializer",
"if",
"any",
"(",
"x",
".",
"_jrdd_deserializer",
"!=",
"first_jrdd_deserializer",
"for",
"x",
"in",
"rdds",
")",
":",
"rdds",
"=",
"[",
"x",
".",
"_reserialize",
"(",
")",
"for",
"x",
"in",
"rdds",
"]",
"cls",
"=",
"SparkContext",
".",
"_jvm",
".",
"org",
".",
"apache",
".",
"spark",
".",
"api",
".",
"java",
".",
"JavaRDD",
"jrdds",
"=",
"SparkContext",
".",
"_gateway",
".",
"new_array",
"(",
"cls",
",",
"len",
"(",
"rdds",
")",
")",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"rdds",
")",
")",
":",
"jrdds",
"[",
"i",
"]",
"=",
"rdds",
"[",
"i",
"]",
".",
"_jrdd",
"return",
"RDD",
"(",
"self",
".",
"_jsc",
".",
"union",
"(",
"jrdds",
")",
",",
"self",
",",
"rdds",
"[",
"0",
"]",
".",
"_jrdd_deserializer",
")"
] |
Build the union of a list of RDDs.
This supports unions() of RDDs with different serialized formats,
although this forces them to be reserialized using the default
serializer:
>>> path = os.path.join(tempdir, "union-text.txt")
>>> with open(path, "w") as testFile:
... _ = testFile.write("Hello")
>>> textFile = sc.textFile(path)
>>> textFile.collect()
[u'Hello']
>>> parallelized = sc.parallelize(["World!"])
>>> sorted(sc.union([textFile, parallelized]).collect())
[u'Hello', 'World!']
|
[
"Build",
"the",
"union",
"of",
"a",
"list",
"of",
"RDDs",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/context.py#L837-L862
|
19,288
|
apache/spark
|
python/pyspark/context.py
|
SparkContext._getJavaStorageLevel
|
def _getJavaStorageLevel(self, storageLevel):
"""
Returns a Java StorageLevel based on a pyspark.StorageLevel.
"""
if not isinstance(storageLevel, StorageLevel):
raise Exception("storageLevel must be of type pyspark.StorageLevel")
newStorageLevel = self._jvm.org.apache.spark.storage.StorageLevel
return newStorageLevel(storageLevel.useDisk,
storageLevel.useMemory,
storageLevel.useOffHeap,
storageLevel.deserialized,
storageLevel.replication)
|
python
|
def _getJavaStorageLevel(self, storageLevel):
"""
Returns a Java StorageLevel based on a pyspark.StorageLevel.
"""
if not isinstance(storageLevel, StorageLevel):
raise Exception("storageLevel must be of type pyspark.StorageLevel")
newStorageLevel = self._jvm.org.apache.spark.storage.StorageLevel
return newStorageLevel(storageLevel.useDisk,
storageLevel.useMemory,
storageLevel.useOffHeap,
storageLevel.deserialized,
storageLevel.replication)
|
[
"def",
"_getJavaStorageLevel",
"(",
"self",
",",
"storageLevel",
")",
":",
"if",
"not",
"isinstance",
"(",
"storageLevel",
",",
"StorageLevel",
")",
":",
"raise",
"Exception",
"(",
"\"storageLevel must be of type pyspark.StorageLevel\"",
")",
"newStorageLevel",
"=",
"self",
".",
"_jvm",
".",
"org",
".",
"apache",
".",
"spark",
".",
"storage",
".",
"StorageLevel",
"return",
"newStorageLevel",
"(",
"storageLevel",
".",
"useDisk",
",",
"storageLevel",
".",
"useMemory",
",",
"storageLevel",
".",
"useOffHeap",
",",
"storageLevel",
".",
"deserialized",
",",
"storageLevel",
".",
"replication",
")"
] |
Returns a Java StorageLevel based on a pyspark.StorageLevel.
|
[
"Returns",
"a",
"Java",
"StorageLevel",
"based",
"on",
"a",
"pyspark",
".",
"StorageLevel",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/context.py#L949-L961
|
19,289
|
apache/spark
|
python/pyspark/context.py
|
SparkContext.setJobGroup
|
def setJobGroup(self, groupId, description, interruptOnCancel=False):
"""
Assigns a group ID to all the jobs started by this thread until the group ID is set to a
different value or cleared.
Often, a unit of execution in an application consists of multiple Spark actions or jobs.
Application programmers can use this method to group all those jobs together and give a
group description. Once set, the Spark web UI will associate such jobs with this group.
The application can use L{SparkContext.cancelJobGroup} to cancel all
running jobs in this group.
>>> import threading
>>> from time import sleep
>>> result = "Not Set"
>>> lock = threading.Lock()
>>> def map_func(x):
... sleep(100)
... raise Exception("Task should have been cancelled")
>>> def start_job(x):
... global result
... try:
... sc.setJobGroup("job_to_cancel", "some description")
... result = sc.parallelize(range(x)).map(map_func).collect()
... except Exception as e:
... result = "Cancelled"
... lock.release()
>>> def stop_job():
... sleep(5)
... sc.cancelJobGroup("job_to_cancel")
>>> suppress = lock.acquire()
>>> suppress = threading.Thread(target=start_job, args=(10,)).start()
>>> suppress = threading.Thread(target=stop_job).start()
>>> suppress = lock.acquire()
>>> print(result)
Cancelled
If interruptOnCancel is set to true for the job group, then job cancellation will result
in Thread.interrupt() being called on the job's executor threads. This is useful to help
ensure that the tasks are actually stopped in a timely manner, but is off by default due
to HDFS-1208, where HDFS may respond to Thread.interrupt() by marking nodes as dead.
"""
self._jsc.setJobGroup(groupId, description, interruptOnCancel)
|
python
|
def setJobGroup(self, groupId, description, interruptOnCancel=False):
"""
Assigns a group ID to all the jobs started by this thread until the group ID is set to a
different value or cleared.
Often, a unit of execution in an application consists of multiple Spark actions or jobs.
Application programmers can use this method to group all those jobs together and give a
group description. Once set, the Spark web UI will associate such jobs with this group.
The application can use L{SparkContext.cancelJobGroup} to cancel all
running jobs in this group.
>>> import threading
>>> from time import sleep
>>> result = "Not Set"
>>> lock = threading.Lock()
>>> def map_func(x):
... sleep(100)
... raise Exception("Task should have been cancelled")
>>> def start_job(x):
... global result
... try:
... sc.setJobGroup("job_to_cancel", "some description")
... result = sc.parallelize(range(x)).map(map_func).collect()
... except Exception as e:
... result = "Cancelled"
... lock.release()
>>> def stop_job():
... sleep(5)
... sc.cancelJobGroup("job_to_cancel")
>>> suppress = lock.acquire()
>>> suppress = threading.Thread(target=start_job, args=(10,)).start()
>>> suppress = threading.Thread(target=stop_job).start()
>>> suppress = lock.acquire()
>>> print(result)
Cancelled
If interruptOnCancel is set to true for the job group, then job cancellation will result
in Thread.interrupt() being called on the job's executor threads. This is useful to help
ensure that the tasks are actually stopped in a timely manner, but is off by default due
to HDFS-1208, where HDFS may respond to Thread.interrupt() by marking nodes as dead.
"""
self._jsc.setJobGroup(groupId, description, interruptOnCancel)
|
[
"def",
"setJobGroup",
"(",
"self",
",",
"groupId",
",",
"description",
",",
"interruptOnCancel",
"=",
"False",
")",
":",
"self",
".",
"_jsc",
".",
"setJobGroup",
"(",
"groupId",
",",
"description",
",",
"interruptOnCancel",
")"
] |
Assigns a group ID to all the jobs started by this thread until the group ID is set to a
different value or cleared.
Often, a unit of execution in an application consists of multiple Spark actions or jobs.
Application programmers can use this method to group all those jobs together and give a
group description. Once set, the Spark web UI will associate such jobs with this group.
The application can use L{SparkContext.cancelJobGroup} to cancel all
running jobs in this group.
>>> import threading
>>> from time import sleep
>>> result = "Not Set"
>>> lock = threading.Lock()
>>> def map_func(x):
... sleep(100)
... raise Exception("Task should have been cancelled")
>>> def start_job(x):
... global result
... try:
... sc.setJobGroup("job_to_cancel", "some description")
... result = sc.parallelize(range(x)).map(map_func).collect()
... except Exception as e:
... result = "Cancelled"
... lock.release()
>>> def stop_job():
... sleep(5)
... sc.cancelJobGroup("job_to_cancel")
>>> suppress = lock.acquire()
>>> suppress = threading.Thread(target=start_job, args=(10,)).start()
>>> suppress = threading.Thread(target=stop_job).start()
>>> suppress = lock.acquire()
>>> print(result)
Cancelled
If interruptOnCancel is set to true for the job group, then job cancellation will result
in Thread.interrupt() being called on the job's executor threads. This is useful to help
ensure that the tasks are actually stopped in a timely manner, but is off by default due
to HDFS-1208, where HDFS may respond to Thread.interrupt() by marking nodes as dead.
|
[
"Assigns",
"a",
"group",
"ID",
"to",
"all",
"the",
"jobs",
"started",
"by",
"this",
"thread",
"until",
"the",
"group",
"ID",
"is",
"set",
"to",
"a",
"different",
"value",
"or",
"cleared",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/context.py#L963-L1005
|
19,290
|
apache/spark
|
python/pyspark/context.py
|
SparkContext.runJob
|
def runJob(self, rdd, partitionFunc, partitions=None, allowLocal=False):
"""
Executes the given partitionFunc on the specified set of partitions,
returning the result as an array of elements.
If 'partitions' is not specified, this will run over all partitions.
>>> myRDD = sc.parallelize(range(6), 3)
>>> sc.runJob(myRDD, lambda part: [x * x for x in part])
[0, 1, 4, 9, 16, 25]
>>> myRDD = sc.parallelize(range(6), 3)
>>> sc.runJob(myRDD, lambda part: [x * x for x in part], [0, 2], True)
[0, 1, 16, 25]
"""
if partitions is None:
partitions = range(rdd._jrdd.partitions().size())
# Implementation note: This is implemented as a mapPartitions followed
# by runJob() in order to avoid having to pass a Python lambda into
# SparkContext#runJob.
mappedRDD = rdd.mapPartitions(partitionFunc)
sock_info = self._jvm.PythonRDD.runJob(self._jsc.sc(), mappedRDD._jrdd, partitions)
return list(_load_from_socket(sock_info, mappedRDD._jrdd_deserializer))
|
python
|
def runJob(self, rdd, partitionFunc, partitions=None, allowLocal=False):
"""
Executes the given partitionFunc on the specified set of partitions,
returning the result as an array of elements.
If 'partitions' is not specified, this will run over all partitions.
>>> myRDD = sc.parallelize(range(6), 3)
>>> sc.runJob(myRDD, lambda part: [x * x for x in part])
[0, 1, 4, 9, 16, 25]
>>> myRDD = sc.parallelize(range(6), 3)
>>> sc.runJob(myRDD, lambda part: [x * x for x in part], [0, 2], True)
[0, 1, 16, 25]
"""
if partitions is None:
partitions = range(rdd._jrdd.partitions().size())
# Implementation note: This is implemented as a mapPartitions followed
# by runJob() in order to avoid having to pass a Python lambda into
# SparkContext#runJob.
mappedRDD = rdd.mapPartitions(partitionFunc)
sock_info = self._jvm.PythonRDD.runJob(self._jsc.sc(), mappedRDD._jrdd, partitions)
return list(_load_from_socket(sock_info, mappedRDD._jrdd_deserializer))
|
[
"def",
"runJob",
"(",
"self",
",",
"rdd",
",",
"partitionFunc",
",",
"partitions",
"=",
"None",
",",
"allowLocal",
"=",
"False",
")",
":",
"if",
"partitions",
"is",
"None",
":",
"partitions",
"=",
"range",
"(",
"rdd",
".",
"_jrdd",
".",
"partitions",
"(",
")",
".",
"size",
"(",
")",
")",
"# Implementation note: This is implemented as a mapPartitions followed",
"# by runJob() in order to avoid having to pass a Python lambda into",
"# SparkContext#runJob.",
"mappedRDD",
"=",
"rdd",
".",
"mapPartitions",
"(",
"partitionFunc",
")",
"sock_info",
"=",
"self",
".",
"_jvm",
".",
"PythonRDD",
".",
"runJob",
"(",
"self",
".",
"_jsc",
".",
"sc",
"(",
")",
",",
"mappedRDD",
".",
"_jrdd",
",",
"partitions",
")",
"return",
"list",
"(",
"_load_from_socket",
"(",
"sock_info",
",",
"mappedRDD",
".",
"_jrdd_deserializer",
")",
")"
] |
Executes the given partitionFunc on the specified set of partitions,
returning the result as an array of elements.
If 'partitions' is not specified, this will run over all partitions.
>>> myRDD = sc.parallelize(range(6), 3)
>>> sc.runJob(myRDD, lambda part: [x * x for x in part])
[0, 1, 4, 9, 16, 25]
>>> myRDD = sc.parallelize(range(6), 3)
>>> sc.runJob(myRDD, lambda part: [x * x for x in part], [0, 2], True)
[0, 1, 16, 25]
|
[
"Executes",
"the",
"given",
"partitionFunc",
"on",
"the",
"specified",
"set",
"of",
"partitions",
"returning",
"the",
"result",
"as",
"an",
"array",
"of",
"elements",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/context.py#L1052-L1075
|
19,291
|
apache/spark
|
python/pyspark/mllib/fpm.py
|
FPGrowth.train
|
def train(cls, data, minSupport=0.3, numPartitions=-1):
"""
Computes an FP-Growth model that contains frequent itemsets.
:param data:
The input data set, each element contains a transaction.
:param minSupport:
The minimal support level.
(default: 0.3)
:param numPartitions:
The number of partitions used by parallel FP-growth. A value
of -1 will use the same number as input data.
(default: -1)
"""
model = callMLlibFunc("trainFPGrowthModel", data, float(minSupport), int(numPartitions))
return FPGrowthModel(model)
|
python
|
def train(cls, data, minSupport=0.3, numPartitions=-1):
"""
Computes an FP-Growth model that contains frequent itemsets.
:param data:
The input data set, each element contains a transaction.
:param minSupport:
The minimal support level.
(default: 0.3)
:param numPartitions:
The number of partitions used by parallel FP-growth. A value
of -1 will use the same number as input data.
(default: -1)
"""
model = callMLlibFunc("trainFPGrowthModel", data, float(minSupport), int(numPartitions))
return FPGrowthModel(model)
|
[
"def",
"train",
"(",
"cls",
",",
"data",
",",
"minSupport",
"=",
"0.3",
",",
"numPartitions",
"=",
"-",
"1",
")",
":",
"model",
"=",
"callMLlibFunc",
"(",
"\"trainFPGrowthModel\"",
",",
"data",
",",
"float",
"(",
"minSupport",
")",
",",
"int",
"(",
"numPartitions",
")",
")",
"return",
"FPGrowthModel",
"(",
"model",
")"
] |
Computes an FP-Growth model that contains frequent itemsets.
:param data:
The input data set, each element contains a transaction.
:param minSupport:
The minimal support level.
(default: 0.3)
:param numPartitions:
The number of partitions used by parallel FP-growth. A value
of -1 will use the same number as input data.
(default: -1)
|
[
"Computes",
"an",
"FP",
"-",
"Growth",
"model",
"that",
"contains",
"frequent",
"itemsets",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/fpm.py#L78-L93
|
19,292
|
apache/spark
|
python/pyspark/mllib/fpm.py
|
PrefixSpan.train
|
def train(cls, data, minSupport=0.1, maxPatternLength=10, maxLocalProjDBSize=32000000):
"""
Finds the complete set of frequent sequential patterns in the
input sequences of itemsets.
:param data:
The input data set, each element contains a sequence of
itemsets.
:param minSupport:
The minimal support level of the sequential pattern, any
pattern that appears more than (minSupport *
size-of-the-dataset) times will be output.
(default: 0.1)
:param maxPatternLength:
The maximal length of the sequential pattern, any pattern
that appears less than maxPatternLength will be output.
(default: 10)
:param maxLocalProjDBSize:
The maximum number of items (including delimiters used in the
internal storage format) allowed in a projected database before
local processing. If a projected database exceeds this size,
another iteration of distributed prefix growth is run.
(default: 32000000)
"""
model = callMLlibFunc("trainPrefixSpanModel",
data, minSupport, maxPatternLength, maxLocalProjDBSize)
return PrefixSpanModel(model)
|
python
|
def train(cls, data, minSupport=0.1, maxPatternLength=10, maxLocalProjDBSize=32000000):
"""
Finds the complete set of frequent sequential patterns in the
input sequences of itemsets.
:param data:
The input data set, each element contains a sequence of
itemsets.
:param minSupport:
The minimal support level of the sequential pattern, any
pattern that appears more than (minSupport *
size-of-the-dataset) times will be output.
(default: 0.1)
:param maxPatternLength:
The maximal length of the sequential pattern, any pattern
that appears less than maxPatternLength will be output.
(default: 10)
:param maxLocalProjDBSize:
The maximum number of items (including delimiters used in the
internal storage format) allowed in a projected database before
local processing. If a projected database exceeds this size,
another iteration of distributed prefix growth is run.
(default: 32000000)
"""
model = callMLlibFunc("trainPrefixSpanModel",
data, minSupport, maxPatternLength, maxLocalProjDBSize)
return PrefixSpanModel(model)
|
[
"def",
"train",
"(",
"cls",
",",
"data",
",",
"minSupport",
"=",
"0.1",
",",
"maxPatternLength",
"=",
"10",
",",
"maxLocalProjDBSize",
"=",
"32000000",
")",
":",
"model",
"=",
"callMLlibFunc",
"(",
"\"trainPrefixSpanModel\"",
",",
"data",
",",
"minSupport",
",",
"maxPatternLength",
",",
"maxLocalProjDBSize",
")",
"return",
"PrefixSpanModel",
"(",
"model",
")"
] |
Finds the complete set of frequent sequential patterns in the
input sequences of itemsets.
:param data:
The input data set, each element contains a sequence of
itemsets.
:param minSupport:
The minimal support level of the sequential pattern, any
pattern that appears more than (minSupport *
size-of-the-dataset) times will be output.
(default: 0.1)
:param maxPatternLength:
The maximal length of the sequential pattern, any pattern
that appears less than maxPatternLength will be output.
(default: 10)
:param maxLocalProjDBSize:
The maximum number of items (including delimiters used in the
internal storage format) allowed in a projected database before
local processing. If a projected database exceeds this size,
another iteration of distributed prefix growth is run.
(default: 32000000)
|
[
"Finds",
"the",
"complete",
"set",
"of",
"frequent",
"sequential",
"patterns",
"in",
"the",
"input",
"sequences",
"of",
"itemsets",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/fpm.py#L140-L166
|
19,293
|
apache/spark
|
python/pyspark/mllib/stat/KernelDensity.py
|
KernelDensity.setSample
|
def setSample(self, sample):
"""Set sample points from the population. Should be a RDD"""
if not isinstance(sample, RDD):
raise TypeError("samples should be a RDD, received %s" % type(sample))
self._sample = sample
|
python
|
def setSample(self, sample):
"""Set sample points from the population. Should be a RDD"""
if not isinstance(sample, RDD):
raise TypeError("samples should be a RDD, received %s" % type(sample))
self._sample = sample
|
[
"def",
"setSample",
"(",
"self",
",",
"sample",
")",
":",
"if",
"not",
"isinstance",
"(",
"sample",
",",
"RDD",
")",
":",
"raise",
"TypeError",
"(",
"\"samples should be a RDD, received %s\"",
"%",
"type",
"(",
"sample",
")",
")",
"self",
".",
"_sample",
"=",
"sample"
] |
Set sample points from the population. Should be a RDD
|
[
"Set",
"sample",
"points",
"from",
"the",
"population",
".",
"Should",
"be",
"a",
"RDD"
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/stat/KernelDensity.py#L48-L52
|
19,294
|
apache/spark
|
python/pyspark/mllib/stat/KernelDensity.py
|
KernelDensity.estimate
|
def estimate(self, points):
"""Estimate the probability density at points"""
points = list(points)
densities = callMLlibFunc(
"estimateKernelDensity", self._sample, self._bandwidth, points)
return np.asarray(densities)
|
python
|
def estimate(self, points):
"""Estimate the probability density at points"""
points = list(points)
densities = callMLlibFunc(
"estimateKernelDensity", self._sample, self._bandwidth, points)
return np.asarray(densities)
|
[
"def",
"estimate",
"(",
"self",
",",
"points",
")",
":",
"points",
"=",
"list",
"(",
"points",
")",
"densities",
"=",
"callMLlibFunc",
"(",
"\"estimateKernelDensity\"",
",",
"self",
".",
"_sample",
",",
"self",
".",
"_bandwidth",
",",
"points",
")",
"return",
"np",
".",
"asarray",
"(",
"densities",
")"
] |
Estimate the probability density at points
|
[
"Estimate",
"the",
"probability",
"density",
"at",
"points"
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/stat/KernelDensity.py#L54-L59
|
19,295
|
apache/spark
|
python/pyspark/accumulators.py
|
_start_update_server
|
def _start_update_server(auth_token):
"""Start a TCP server to receive accumulator updates in a daemon thread, and returns it"""
server = AccumulatorServer(("localhost", 0), _UpdateRequestHandler, auth_token)
thread = threading.Thread(target=server.serve_forever)
thread.daemon = True
thread.start()
return server
|
python
|
def _start_update_server(auth_token):
"""Start a TCP server to receive accumulator updates in a daemon thread, and returns it"""
server = AccumulatorServer(("localhost", 0), _UpdateRequestHandler, auth_token)
thread = threading.Thread(target=server.serve_forever)
thread.daemon = True
thread.start()
return server
|
[
"def",
"_start_update_server",
"(",
"auth_token",
")",
":",
"server",
"=",
"AccumulatorServer",
"(",
"(",
"\"localhost\"",
",",
"0",
")",
",",
"_UpdateRequestHandler",
",",
"auth_token",
")",
"thread",
"=",
"threading",
".",
"Thread",
"(",
"target",
"=",
"server",
".",
"serve_forever",
")",
"thread",
".",
"daemon",
"=",
"True",
"thread",
".",
"start",
"(",
")",
"return",
"server"
] |
Start a TCP server to receive accumulator updates in a daemon thread, and returns it
|
[
"Start",
"a",
"TCP",
"server",
"to",
"receive",
"accumulator",
"updates",
"in",
"a",
"daemon",
"thread",
"and",
"returns",
"it"
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/accumulators.py#L289-L295
|
19,296
|
apache/spark
|
python/pyspark/accumulators.py
|
Accumulator.add
|
def add(self, term):
"""Adds a term to this accumulator's value"""
self._value = self.accum_param.addInPlace(self._value, term)
|
python
|
def add(self, term):
"""Adds a term to this accumulator's value"""
self._value = self.accum_param.addInPlace(self._value, term)
|
[
"def",
"add",
"(",
"self",
",",
"term",
")",
":",
"self",
".",
"_value",
"=",
"self",
".",
"accum_param",
".",
"addInPlace",
"(",
"self",
".",
"_value",
",",
"term",
")"
] |
Adds a term to this accumulator's value
|
[
"Adds",
"a",
"term",
"to",
"this",
"accumulator",
"s",
"value"
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/accumulators.py#L163-L165
|
19,297
|
apache/spark
|
python/pyspark/mllib/random.py
|
RandomRDDs.normalRDD
|
def normalRDD(sc, size, numPartitions=None, seed=None):
"""
Generates an RDD comprised of i.i.d. samples from the standard normal
distribution.
To transform the distribution in the generated RDD from standard normal
to some other normal N(mean, sigma^2), use
C{RandomRDDs.normal(sc, n, p, seed)\
.map(lambda v: mean + sigma * v)}
:param sc: SparkContext used to create the RDD.
:param size: Size of the RDD.
:param numPartitions: Number of partitions in the RDD (default: `sc.defaultParallelism`).
:param seed: Random seed (default: a random long integer).
:return: RDD of float comprised of i.i.d. samples ~ N(0.0, 1.0).
>>> x = RandomRDDs.normalRDD(sc, 1000, seed=1)
>>> stats = x.stats()
>>> stats.count()
1000
>>> abs(stats.mean() - 0.0) < 0.1
True
>>> abs(stats.stdev() - 1.0) < 0.1
True
"""
return callMLlibFunc("normalRDD", sc._jsc, size, numPartitions, seed)
|
python
|
def normalRDD(sc, size, numPartitions=None, seed=None):
"""
Generates an RDD comprised of i.i.d. samples from the standard normal
distribution.
To transform the distribution in the generated RDD from standard normal
to some other normal N(mean, sigma^2), use
C{RandomRDDs.normal(sc, n, p, seed)\
.map(lambda v: mean + sigma * v)}
:param sc: SparkContext used to create the RDD.
:param size: Size of the RDD.
:param numPartitions: Number of partitions in the RDD (default: `sc.defaultParallelism`).
:param seed: Random seed (default: a random long integer).
:return: RDD of float comprised of i.i.d. samples ~ N(0.0, 1.0).
>>> x = RandomRDDs.normalRDD(sc, 1000, seed=1)
>>> stats = x.stats()
>>> stats.count()
1000
>>> abs(stats.mean() - 0.0) < 0.1
True
>>> abs(stats.stdev() - 1.0) < 0.1
True
"""
return callMLlibFunc("normalRDD", sc._jsc, size, numPartitions, seed)
|
[
"def",
"normalRDD",
"(",
"sc",
",",
"size",
",",
"numPartitions",
"=",
"None",
",",
"seed",
"=",
"None",
")",
":",
"return",
"callMLlibFunc",
"(",
"\"normalRDD\"",
",",
"sc",
".",
"_jsc",
",",
"size",
",",
"numPartitions",
",",
"seed",
")"
] |
Generates an RDD comprised of i.i.d. samples from the standard normal
distribution.
To transform the distribution in the generated RDD from standard normal
to some other normal N(mean, sigma^2), use
C{RandomRDDs.normal(sc, n, p, seed)\
.map(lambda v: mean + sigma * v)}
:param sc: SparkContext used to create the RDD.
:param size: Size of the RDD.
:param numPartitions: Number of partitions in the RDD (default: `sc.defaultParallelism`).
:param seed: Random seed (default: a random long integer).
:return: RDD of float comprised of i.i.d. samples ~ N(0.0, 1.0).
>>> x = RandomRDDs.normalRDD(sc, 1000, seed=1)
>>> stats = x.stats()
>>> stats.count()
1000
>>> abs(stats.mean() - 0.0) < 0.1
True
>>> abs(stats.stdev() - 1.0) < 0.1
True
|
[
"Generates",
"an",
"RDD",
"comprised",
"of",
"i",
".",
"i",
".",
"d",
".",
"samples",
"from",
"the",
"standard",
"normal",
"distribution",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/random.py#L81-L106
|
19,298
|
apache/spark
|
python/pyspark/mllib/random.py
|
RandomRDDs.logNormalRDD
|
def logNormalRDD(sc, mean, std, size, numPartitions=None, seed=None):
"""
Generates an RDD comprised of i.i.d. samples from the log normal
distribution with the input mean and standard distribution.
:param sc: SparkContext used to create the RDD.
:param mean: mean for the log Normal distribution
:param std: std for the log Normal distribution
:param size: Size of the RDD.
:param numPartitions: Number of partitions in the RDD (default: `sc.defaultParallelism`).
:param seed: Random seed (default: a random long integer).
:return: RDD of float comprised of i.i.d. samples ~ log N(mean, std).
>>> from math import sqrt, exp
>>> mean = 0.0
>>> std = 1.0
>>> expMean = exp(mean + 0.5 * std * std)
>>> expStd = sqrt((exp(std * std) - 1.0) * exp(2.0 * mean + std * std))
>>> x = RandomRDDs.logNormalRDD(sc, mean, std, 1000, seed=2)
>>> stats = x.stats()
>>> stats.count()
1000
>>> abs(stats.mean() - expMean) < 0.5
True
>>> from math import sqrt
>>> abs(stats.stdev() - expStd) < 0.5
True
"""
return callMLlibFunc("logNormalRDD", sc._jsc, float(mean), float(std),
size, numPartitions, seed)
|
python
|
def logNormalRDD(sc, mean, std, size, numPartitions=None, seed=None):
"""
Generates an RDD comprised of i.i.d. samples from the log normal
distribution with the input mean and standard distribution.
:param sc: SparkContext used to create the RDD.
:param mean: mean for the log Normal distribution
:param std: std for the log Normal distribution
:param size: Size of the RDD.
:param numPartitions: Number of partitions in the RDD (default: `sc.defaultParallelism`).
:param seed: Random seed (default: a random long integer).
:return: RDD of float comprised of i.i.d. samples ~ log N(mean, std).
>>> from math import sqrt, exp
>>> mean = 0.0
>>> std = 1.0
>>> expMean = exp(mean + 0.5 * std * std)
>>> expStd = sqrt((exp(std * std) - 1.0) * exp(2.0 * mean + std * std))
>>> x = RandomRDDs.logNormalRDD(sc, mean, std, 1000, seed=2)
>>> stats = x.stats()
>>> stats.count()
1000
>>> abs(stats.mean() - expMean) < 0.5
True
>>> from math import sqrt
>>> abs(stats.stdev() - expStd) < 0.5
True
"""
return callMLlibFunc("logNormalRDD", sc._jsc, float(mean), float(std),
size, numPartitions, seed)
|
[
"def",
"logNormalRDD",
"(",
"sc",
",",
"mean",
",",
"std",
",",
"size",
",",
"numPartitions",
"=",
"None",
",",
"seed",
"=",
"None",
")",
":",
"return",
"callMLlibFunc",
"(",
"\"logNormalRDD\"",
",",
"sc",
".",
"_jsc",
",",
"float",
"(",
"mean",
")",
",",
"float",
"(",
"std",
")",
",",
"size",
",",
"numPartitions",
",",
"seed",
")"
] |
Generates an RDD comprised of i.i.d. samples from the log normal
distribution with the input mean and standard distribution.
:param sc: SparkContext used to create the RDD.
:param mean: mean for the log Normal distribution
:param std: std for the log Normal distribution
:param size: Size of the RDD.
:param numPartitions: Number of partitions in the RDD (default: `sc.defaultParallelism`).
:param seed: Random seed (default: a random long integer).
:return: RDD of float comprised of i.i.d. samples ~ log N(mean, std).
>>> from math import sqrt, exp
>>> mean = 0.0
>>> std = 1.0
>>> expMean = exp(mean + 0.5 * std * std)
>>> expStd = sqrt((exp(std * std) - 1.0) * exp(2.0 * mean + std * std))
>>> x = RandomRDDs.logNormalRDD(sc, mean, std, 1000, seed=2)
>>> stats = x.stats()
>>> stats.count()
1000
>>> abs(stats.mean() - expMean) < 0.5
True
>>> from math import sqrt
>>> abs(stats.stdev() - expStd) < 0.5
True
|
[
"Generates",
"an",
"RDD",
"comprised",
"of",
"i",
".",
"i",
".",
"d",
".",
"samples",
"from",
"the",
"log",
"normal",
"distribution",
"with",
"the",
"input",
"mean",
"and",
"standard",
"distribution",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/random.py#L110-L139
|
19,299
|
apache/spark
|
python/pyspark/mllib/random.py
|
RandomRDDs.exponentialRDD
|
def exponentialRDD(sc, mean, size, numPartitions=None, seed=None):
"""
Generates an RDD comprised of i.i.d. samples from the Exponential
distribution with the input mean.
:param sc: SparkContext used to create the RDD.
:param mean: Mean, or 1 / lambda, for the Exponential distribution.
:param size: Size of the RDD.
:param numPartitions: Number of partitions in the RDD (default: `sc.defaultParallelism`).
:param seed: Random seed (default: a random long integer).
:return: RDD of float comprised of i.i.d. samples ~ Exp(mean).
>>> mean = 2.0
>>> x = RandomRDDs.exponentialRDD(sc, mean, 1000, seed=2)
>>> stats = x.stats()
>>> stats.count()
1000
>>> abs(stats.mean() - mean) < 0.5
True
>>> from math import sqrt
>>> abs(stats.stdev() - sqrt(mean)) < 0.5
True
"""
return callMLlibFunc("exponentialRDD", sc._jsc, float(mean), size, numPartitions, seed)
|
python
|
def exponentialRDD(sc, mean, size, numPartitions=None, seed=None):
"""
Generates an RDD comprised of i.i.d. samples from the Exponential
distribution with the input mean.
:param sc: SparkContext used to create the RDD.
:param mean: Mean, or 1 / lambda, for the Exponential distribution.
:param size: Size of the RDD.
:param numPartitions: Number of partitions in the RDD (default: `sc.defaultParallelism`).
:param seed: Random seed (default: a random long integer).
:return: RDD of float comprised of i.i.d. samples ~ Exp(mean).
>>> mean = 2.0
>>> x = RandomRDDs.exponentialRDD(sc, mean, 1000, seed=2)
>>> stats = x.stats()
>>> stats.count()
1000
>>> abs(stats.mean() - mean) < 0.5
True
>>> from math import sqrt
>>> abs(stats.stdev() - sqrt(mean)) < 0.5
True
"""
return callMLlibFunc("exponentialRDD", sc._jsc, float(mean), size, numPartitions, seed)
|
[
"def",
"exponentialRDD",
"(",
"sc",
",",
"mean",
",",
"size",
",",
"numPartitions",
"=",
"None",
",",
"seed",
"=",
"None",
")",
":",
"return",
"callMLlibFunc",
"(",
"\"exponentialRDD\"",
",",
"sc",
".",
"_jsc",
",",
"float",
"(",
"mean",
")",
",",
"size",
",",
"numPartitions",
",",
"seed",
")"
] |
Generates an RDD comprised of i.i.d. samples from the Exponential
distribution with the input mean.
:param sc: SparkContext used to create the RDD.
:param mean: Mean, or 1 / lambda, for the Exponential distribution.
:param size: Size of the RDD.
:param numPartitions: Number of partitions in the RDD (default: `sc.defaultParallelism`).
:param seed: Random seed (default: a random long integer).
:return: RDD of float comprised of i.i.d. samples ~ Exp(mean).
>>> mean = 2.0
>>> x = RandomRDDs.exponentialRDD(sc, mean, 1000, seed=2)
>>> stats = x.stats()
>>> stats.count()
1000
>>> abs(stats.mean() - mean) < 0.5
True
>>> from math import sqrt
>>> abs(stats.stdev() - sqrt(mean)) < 0.5
True
|
[
"Generates",
"an",
"RDD",
"comprised",
"of",
"i",
".",
"i",
".",
"d",
".",
"samples",
"from",
"the",
"Exponential",
"distribution",
"with",
"the",
"input",
"mean",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/random.py#L170-L193
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.