id
int32 0
252k
| repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
|
|---|---|---|---|---|---|---|---|---|---|---|---|
19,300
|
apache/spark
|
python/pyspark/mllib/random.py
|
RandomRDDs.gammaRDD
|
def gammaRDD(sc, shape, scale, size, numPartitions=None, seed=None):
"""
Generates an RDD comprised of i.i.d. samples from the Gamma
distribution with the input shape and scale.
:param sc: SparkContext used to create the RDD.
:param shape: shape (> 0) parameter for the Gamma distribution
:param scale: scale (> 0) parameter for the Gamma distribution
:param size: Size of the RDD.
:param numPartitions: Number of partitions in the RDD (default: `sc.defaultParallelism`).
:param seed: Random seed (default: a random long integer).
:return: RDD of float comprised of i.i.d. samples ~ Gamma(shape, scale).
>>> from math import sqrt
>>> shape = 1.0
>>> scale = 2.0
>>> expMean = shape * scale
>>> expStd = sqrt(shape * scale * scale)
>>> x = RandomRDDs.gammaRDD(sc, shape, scale, 1000, seed=2)
>>> stats = x.stats()
>>> stats.count()
1000
>>> abs(stats.mean() - expMean) < 0.5
True
>>> abs(stats.stdev() - expStd) < 0.5
True
"""
return callMLlibFunc("gammaRDD", sc._jsc, float(shape),
float(scale), size, numPartitions, seed)
|
python
|
def gammaRDD(sc, shape, scale, size, numPartitions=None, seed=None):
"""
Generates an RDD comprised of i.i.d. samples from the Gamma
distribution with the input shape and scale.
:param sc: SparkContext used to create the RDD.
:param shape: shape (> 0) parameter for the Gamma distribution
:param scale: scale (> 0) parameter for the Gamma distribution
:param size: Size of the RDD.
:param numPartitions: Number of partitions in the RDD (default: `sc.defaultParallelism`).
:param seed: Random seed (default: a random long integer).
:return: RDD of float comprised of i.i.d. samples ~ Gamma(shape, scale).
>>> from math import sqrt
>>> shape = 1.0
>>> scale = 2.0
>>> expMean = shape * scale
>>> expStd = sqrt(shape * scale * scale)
>>> x = RandomRDDs.gammaRDD(sc, shape, scale, 1000, seed=2)
>>> stats = x.stats()
>>> stats.count()
1000
>>> abs(stats.mean() - expMean) < 0.5
True
>>> abs(stats.stdev() - expStd) < 0.5
True
"""
return callMLlibFunc("gammaRDD", sc._jsc, float(shape),
float(scale), size, numPartitions, seed)
|
[
"def",
"gammaRDD",
"(",
"sc",
",",
"shape",
",",
"scale",
",",
"size",
",",
"numPartitions",
"=",
"None",
",",
"seed",
"=",
"None",
")",
":",
"return",
"callMLlibFunc",
"(",
"\"gammaRDD\"",
",",
"sc",
".",
"_jsc",
",",
"float",
"(",
"shape",
")",
",",
"float",
"(",
"scale",
")",
",",
"size",
",",
"numPartitions",
",",
"seed",
")"
] |
Generates an RDD comprised of i.i.d. samples from the Gamma
distribution with the input shape and scale.
:param sc: SparkContext used to create the RDD.
:param shape: shape (> 0) parameter for the Gamma distribution
:param scale: scale (> 0) parameter for the Gamma distribution
:param size: Size of the RDD.
:param numPartitions: Number of partitions in the RDD (default: `sc.defaultParallelism`).
:param seed: Random seed (default: a random long integer).
:return: RDD of float comprised of i.i.d. samples ~ Gamma(shape, scale).
>>> from math import sqrt
>>> shape = 1.0
>>> scale = 2.0
>>> expMean = shape * scale
>>> expStd = sqrt(shape * scale * scale)
>>> x = RandomRDDs.gammaRDD(sc, shape, scale, 1000, seed=2)
>>> stats = x.stats()
>>> stats.count()
1000
>>> abs(stats.mean() - expMean) < 0.5
True
>>> abs(stats.stdev() - expStd) < 0.5
True
|
[
"Generates",
"an",
"RDD",
"comprised",
"of",
"i",
".",
"i",
".",
"d",
".",
"samples",
"from",
"the",
"Gamma",
"distribution",
"with",
"the",
"input",
"shape",
"and",
"scale",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/random.py#L197-L225
|
19,301
|
apache/spark
|
python/pyspark/mllib/random.py
|
RandomRDDs.normalVectorRDD
|
def normalVectorRDD(sc, numRows, numCols, numPartitions=None, seed=None):
"""
Generates an RDD comprised of vectors containing i.i.d. samples drawn
from the standard normal distribution.
:param sc: SparkContext used to create the RDD.
:param numRows: Number of Vectors in the RDD.
:param numCols: Number of elements in each Vector.
:param numPartitions: Number of partitions in the RDD (default: `sc.defaultParallelism`).
:param seed: Random seed (default: a random long integer).
:return: RDD of Vector with vectors containing i.i.d. samples ~ `N(0.0, 1.0)`.
>>> import numpy as np
>>> mat = np.matrix(RandomRDDs.normalVectorRDD(sc, 100, 100, seed=1).collect())
>>> mat.shape
(100, 100)
>>> abs(mat.mean() - 0.0) < 0.1
True
>>> abs(mat.std() - 1.0) < 0.1
True
"""
return callMLlibFunc("normalVectorRDD", sc._jsc, numRows, numCols, numPartitions, seed)
|
python
|
def normalVectorRDD(sc, numRows, numCols, numPartitions=None, seed=None):
"""
Generates an RDD comprised of vectors containing i.i.d. samples drawn
from the standard normal distribution.
:param sc: SparkContext used to create the RDD.
:param numRows: Number of Vectors in the RDD.
:param numCols: Number of elements in each Vector.
:param numPartitions: Number of partitions in the RDD (default: `sc.defaultParallelism`).
:param seed: Random seed (default: a random long integer).
:return: RDD of Vector with vectors containing i.i.d. samples ~ `N(0.0, 1.0)`.
>>> import numpy as np
>>> mat = np.matrix(RandomRDDs.normalVectorRDD(sc, 100, 100, seed=1).collect())
>>> mat.shape
(100, 100)
>>> abs(mat.mean() - 0.0) < 0.1
True
>>> abs(mat.std() - 1.0) < 0.1
True
"""
return callMLlibFunc("normalVectorRDD", sc._jsc, numRows, numCols, numPartitions, seed)
|
[
"def",
"normalVectorRDD",
"(",
"sc",
",",
"numRows",
",",
"numCols",
",",
"numPartitions",
"=",
"None",
",",
"seed",
"=",
"None",
")",
":",
"return",
"callMLlibFunc",
"(",
"\"normalVectorRDD\"",
",",
"sc",
".",
"_jsc",
",",
"numRows",
",",
"numCols",
",",
"numPartitions",
",",
"seed",
")"
] |
Generates an RDD comprised of vectors containing i.i.d. samples drawn
from the standard normal distribution.
:param sc: SparkContext used to create the RDD.
:param numRows: Number of Vectors in the RDD.
:param numCols: Number of elements in each Vector.
:param numPartitions: Number of partitions in the RDD (default: `sc.defaultParallelism`).
:param seed: Random seed (default: a random long integer).
:return: RDD of Vector with vectors containing i.i.d. samples ~ `N(0.0, 1.0)`.
>>> import numpy as np
>>> mat = np.matrix(RandomRDDs.normalVectorRDD(sc, 100, 100, seed=1).collect())
>>> mat.shape
(100, 100)
>>> abs(mat.mean() - 0.0) < 0.1
True
>>> abs(mat.std() - 1.0) < 0.1
True
|
[
"Generates",
"an",
"RDD",
"comprised",
"of",
"vectors",
"containing",
"i",
".",
"i",
".",
"d",
".",
"samples",
"drawn",
"from",
"the",
"standard",
"normal",
"distribution",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/random.py#L256-L277
|
19,302
|
apache/spark
|
python/pyspark/mllib/random.py
|
RandomRDDs.logNormalVectorRDD
|
def logNormalVectorRDD(sc, mean, std, numRows, numCols, numPartitions=None, seed=None):
"""
Generates an RDD comprised of vectors containing i.i.d. samples drawn
from the log normal distribution.
:param sc: SparkContext used to create the RDD.
:param mean: Mean of the log normal distribution
:param std: Standard Deviation of the log normal distribution
:param numRows: Number of Vectors in the RDD.
:param numCols: Number of elements in each Vector.
:param numPartitions: Number of partitions in the RDD (default: `sc.defaultParallelism`).
:param seed: Random seed (default: a random long integer).
:return: RDD of Vector with vectors containing i.i.d. samples ~ log `N(mean, std)`.
>>> import numpy as np
>>> from math import sqrt, exp
>>> mean = 0.0
>>> std = 1.0
>>> expMean = exp(mean + 0.5 * std * std)
>>> expStd = sqrt((exp(std * std) - 1.0) * exp(2.0 * mean + std * std))
>>> m = RandomRDDs.logNormalVectorRDD(sc, mean, std, 100, 100, seed=1).collect()
>>> mat = np.matrix(m)
>>> mat.shape
(100, 100)
>>> abs(mat.mean() - expMean) < 0.1
True
>>> abs(mat.std() - expStd) < 0.1
True
"""
return callMLlibFunc("logNormalVectorRDD", sc._jsc, float(mean), float(std),
numRows, numCols, numPartitions, seed)
|
python
|
def logNormalVectorRDD(sc, mean, std, numRows, numCols, numPartitions=None, seed=None):
"""
Generates an RDD comprised of vectors containing i.i.d. samples drawn
from the log normal distribution.
:param sc: SparkContext used to create the RDD.
:param mean: Mean of the log normal distribution
:param std: Standard Deviation of the log normal distribution
:param numRows: Number of Vectors in the RDD.
:param numCols: Number of elements in each Vector.
:param numPartitions: Number of partitions in the RDD (default: `sc.defaultParallelism`).
:param seed: Random seed (default: a random long integer).
:return: RDD of Vector with vectors containing i.i.d. samples ~ log `N(mean, std)`.
>>> import numpy as np
>>> from math import sqrt, exp
>>> mean = 0.0
>>> std = 1.0
>>> expMean = exp(mean + 0.5 * std * std)
>>> expStd = sqrt((exp(std * std) - 1.0) * exp(2.0 * mean + std * std))
>>> m = RandomRDDs.logNormalVectorRDD(sc, mean, std, 100, 100, seed=1).collect()
>>> mat = np.matrix(m)
>>> mat.shape
(100, 100)
>>> abs(mat.mean() - expMean) < 0.1
True
>>> abs(mat.std() - expStd) < 0.1
True
"""
return callMLlibFunc("logNormalVectorRDD", sc._jsc, float(mean), float(std),
numRows, numCols, numPartitions, seed)
|
[
"def",
"logNormalVectorRDD",
"(",
"sc",
",",
"mean",
",",
"std",
",",
"numRows",
",",
"numCols",
",",
"numPartitions",
"=",
"None",
",",
"seed",
"=",
"None",
")",
":",
"return",
"callMLlibFunc",
"(",
"\"logNormalVectorRDD\"",
",",
"sc",
".",
"_jsc",
",",
"float",
"(",
"mean",
")",
",",
"float",
"(",
"std",
")",
",",
"numRows",
",",
"numCols",
",",
"numPartitions",
",",
"seed",
")"
] |
Generates an RDD comprised of vectors containing i.i.d. samples drawn
from the log normal distribution.
:param sc: SparkContext used to create the RDD.
:param mean: Mean of the log normal distribution
:param std: Standard Deviation of the log normal distribution
:param numRows: Number of Vectors in the RDD.
:param numCols: Number of elements in each Vector.
:param numPartitions: Number of partitions in the RDD (default: `sc.defaultParallelism`).
:param seed: Random seed (default: a random long integer).
:return: RDD of Vector with vectors containing i.i.d. samples ~ log `N(mean, std)`.
>>> import numpy as np
>>> from math import sqrt, exp
>>> mean = 0.0
>>> std = 1.0
>>> expMean = exp(mean + 0.5 * std * std)
>>> expStd = sqrt((exp(std * std) - 1.0) * exp(2.0 * mean + std * std))
>>> m = RandomRDDs.logNormalVectorRDD(sc, mean, std, 100, 100, seed=1).collect()
>>> mat = np.matrix(m)
>>> mat.shape
(100, 100)
>>> abs(mat.mean() - expMean) < 0.1
True
>>> abs(mat.std() - expStd) < 0.1
True
|
[
"Generates",
"an",
"RDD",
"comprised",
"of",
"vectors",
"containing",
"i",
".",
"i",
".",
"d",
".",
"samples",
"drawn",
"from",
"the",
"log",
"normal",
"distribution",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/random.py#L282-L312
|
19,303
|
apache/spark
|
python/pyspark/mllib/random.py
|
RandomRDDs.poissonVectorRDD
|
def poissonVectorRDD(sc, mean, numRows, numCols, numPartitions=None, seed=None):
"""
Generates an RDD comprised of vectors containing i.i.d. samples drawn
from the Poisson distribution with the input mean.
:param sc: SparkContext used to create the RDD.
:param mean: Mean, or lambda, for the Poisson distribution.
:param numRows: Number of Vectors in the RDD.
:param numCols: Number of elements in each Vector.
:param numPartitions: Number of partitions in the RDD (default: `sc.defaultParallelism`)
:param seed: Random seed (default: a random long integer).
:return: RDD of Vector with vectors containing i.i.d. samples ~ Pois(mean).
>>> import numpy as np
>>> mean = 100.0
>>> rdd = RandomRDDs.poissonVectorRDD(sc, mean, 100, 100, seed=1)
>>> mat = np.mat(rdd.collect())
>>> mat.shape
(100, 100)
>>> abs(mat.mean() - mean) < 0.5
True
>>> from math import sqrt
>>> abs(mat.std() - sqrt(mean)) < 0.5
True
"""
return callMLlibFunc("poissonVectorRDD", sc._jsc, float(mean), numRows, numCols,
numPartitions, seed)
|
python
|
def poissonVectorRDD(sc, mean, numRows, numCols, numPartitions=None, seed=None):
"""
Generates an RDD comprised of vectors containing i.i.d. samples drawn
from the Poisson distribution with the input mean.
:param sc: SparkContext used to create the RDD.
:param mean: Mean, or lambda, for the Poisson distribution.
:param numRows: Number of Vectors in the RDD.
:param numCols: Number of elements in each Vector.
:param numPartitions: Number of partitions in the RDD (default: `sc.defaultParallelism`)
:param seed: Random seed (default: a random long integer).
:return: RDD of Vector with vectors containing i.i.d. samples ~ Pois(mean).
>>> import numpy as np
>>> mean = 100.0
>>> rdd = RandomRDDs.poissonVectorRDD(sc, mean, 100, 100, seed=1)
>>> mat = np.mat(rdd.collect())
>>> mat.shape
(100, 100)
>>> abs(mat.mean() - mean) < 0.5
True
>>> from math import sqrt
>>> abs(mat.std() - sqrt(mean)) < 0.5
True
"""
return callMLlibFunc("poissonVectorRDD", sc._jsc, float(mean), numRows, numCols,
numPartitions, seed)
|
[
"def",
"poissonVectorRDD",
"(",
"sc",
",",
"mean",
",",
"numRows",
",",
"numCols",
",",
"numPartitions",
"=",
"None",
",",
"seed",
"=",
"None",
")",
":",
"return",
"callMLlibFunc",
"(",
"\"poissonVectorRDD\"",
",",
"sc",
".",
"_jsc",
",",
"float",
"(",
"mean",
")",
",",
"numRows",
",",
"numCols",
",",
"numPartitions",
",",
"seed",
")"
] |
Generates an RDD comprised of vectors containing i.i.d. samples drawn
from the Poisson distribution with the input mean.
:param sc: SparkContext used to create the RDD.
:param mean: Mean, or lambda, for the Poisson distribution.
:param numRows: Number of Vectors in the RDD.
:param numCols: Number of elements in each Vector.
:param numPartitions: Number of partitions in the RDD (default: `sc.defaultParallelism`)
:param seed: Random seed (default: a random long integer).
:return: RDD of Vector with vectors containing i.i.d. samples ~ Pois(mean).
>>> import numpy as np
>>> mean = 100.0
>>> rdd = RandomRDDs.poissonVectorRDD(sc, mean, 100, 100, seed=1)
>>> mat = np.mat(rdd.collect())
>>> mat.shape
(100, 100)
>>> abs(mat.mean() - mean) < 0.5
True
>>> from math import sqrt
>>> abs(mat.std() - sqrt(mean)) < 0.5
True
|
[
"Generates",
"an",
"RDD",
"comprised",
"of",
"vectors",
"containing",
"i",
".",
"i",
".",
"d",
".",
"samples",
"drawn",
"from",
"the",
"Poisson",
"distribution",
"with",
"the",
"input",
"mean",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/random.py#L317-L343
|
19,304
|
apache/spark
|
python/pyspark/mllib/random.py
|
RandomRDDs.gammaVectorRDD
|
def gammaVectorRDD(sc, shape, scale, numRows, numCols, numPartitions=None, seed=None):
"""
Generates an RDD comprised of vectors containing i.i.d. samples drawn
from the Gamma distribution.
:param sc: SparkContext used to create the RDD.
:param shape: Shape (> 0) of the Gamma distribution
:param scale: Scale (> 0) of the Gamma distribution
:param numRows: Number of Vectors in the RDD.
:param numCols: Number of elements in each Vector.
:param numPartitions: Number of partitions in the RDD (default: `sc.defaultParallelism`).
:param seed: Random seed (default: a random long integer).
:return: RDD of Vector with vectors containing i.i.d. samples ~ Gamma(shape, scale).
>>> import numpy as np
>>> from math import sqrt
>>> shape = 1.0
>>> scale = 2.0
>>> expMean = shape * scale
>>> expStd = sqrt(shape * scale * scale)
>>> mat = np.matrix(RandomRDDs.gammaVectorRDD(sc, shape, scale, 100, 100, seed=1).collect())
>>> mat.shape
(100, 100)
>>> abs(mat.mean() - expMean) < 0.1
True
>>> abs(mat.std() - expStd) < 0.1
True
"""
return callMLlibFunc("gammaVectorRDD", sc._jsc, float(shape), float(scale),
numRows, numCols, numPartitions, seed)
|
python
|
def gammaVectorRDD(sc, shape, scale, numRows, numCols, numPartitions=None, seed=None):
"""
Generates an RDD comprised of vectors containing i.i.d. samples drawn
from the Gamma distribution.
:param sc: SparkContext used to create the RDD.
:param shape: Shape (> 0) of the Gamma distribution
:param scale: Scale (> 0) of the Gamma distribution
:param numRows: Number of Vectors in the RDD.
:param numCols: Number of elements in each Vector.
:param numPartitions: Number of partitions in the RDD (default: `sc.defaultParallelism`).
:param seed: Random seed (default: a random long integer).
:return: RDD of Vector with vectors containing i.i.d. samples ~ Gamma(shape, scale).
>>> import numpy as np
>>> from math import sqrt
>>> shape = 1.0
>>> scale = 2.0
>>> expMean = shape * scale
>>> expStd = sqrt(shape * scale * scale)
>>> mat = np.matrix(RandomRDDs.gammaVectorRDD(sc, shape, scale, 100, 100, seed=1).collect())
>>> mat.shape
(100, 100)
>>> abs(mat.mean() - expMean) < 0.1
True
>>> abs(mat.std() - expStd) < 0.1
True
"""
return callMLlibFunc("gammaVectorRDD", sc._jsc, float(shape), float(scale),
numRows, numCols, numPartitions, seed)
|
[
"def",
"gammaVectorRDD",
"(",
"sc",
",",
"shape",
",",
"scale",
",",
"numRows",
",",
"numCols",
",",
"numPartitions",
"=",
"None",
",",
"seed",
"=",
"None",
")",
":",
"return",
"callMLlibFunc",
"(",
"\"gammaVectorRDD\"",
",",
"sc",
".",
"_jsc",
",",
"float",
"(",
"shape",
")",
",",
"float",
"(",
"scale",
")",
",",
"numRows",
",",
"numCols",
",",
"numPartitions",
",",
"seed",
")"
] |
Generates an RDD comprised of vectors containing i.i.d. samples drawn
from the Gamma distribution.
:param sc: SparkContext used to create the RDD.
:param shape: Shape (> 0) of the Gamma distribution
:param scale: Scale (> 0) of the Gamma distribution
:param numRows: Number of Vectors in the RDD.
:param numCols: Number of elements in each Vector.
:param numPartitions: Number of partitions in the RDD (default: `sc.defaultParallelism`).
:param seed: Random seed (default: a random long integer).
:return: RDD of Vector with vectors containing i.i.d. samples ~ Gamma(shape, scale).
>>> import numpy as np
>>> from math import sqrt
>>> shape = 1.0
>>> scale = 2.0
>>> expMean = shape * scale
>>> expStd = sqrt(shape * scale * scale)
>>> mat = np.matrix(RandomRDDs.gammaVectorRDD(sc, shape, scale, 100, 100, seed=1).collect())
>>> mat.shape
(100, 100)
>>> abs(mat.mean() - expMean) < 0.1
True
>>> abs(mat.std() - expStd) < 0.1
True
|
[
"Generates",
"an",
"RDD",
"comprised",
"of",
"vectors",
"containing",
"i",
".",
"i",
".",
"d",
".",
"samples",
"drawn",
"from",
"the",
"Gamma",
"distribution",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/random.py#L379-L408
|
19,305
|
apache/spark
|
python/pyspark/sql/session.py
|
SparkSession.conf
|
def conf(self):
"""Runtime configuration interface for Spark.
This is the interface through which the user can get and set all Spark and Hadoop
configurations that are relevant to Spark SQL. When getting the value of a config,
this defaults to the value set in the underlying :class:`SparkContext`, if any.
"""
if not hasattr(self, "_conf"):
self._conf = RuntimeConfig(self._jsparkSession.conf())
return self._conf
|
python
|
def conf(self):
"""Runtime configuration interface for Spark.
This is the interface through which the user can get and set all Spark and Hadoop
configurations that are relevant to Spark SQL. When getting the value of a config,
this defaults to the value set in the underlying :class:`SparkContext`, if any.
"""
if not hasattr(self, "_conf"):
self._conf = RuntimeConfig(self._jsparkSession.conf())
return self._conf
|
[
"def",
"conf",
"(",
"self",
")",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"\"_conf\"",
")",
":",
"self",
".",
"_conf",
"=",
"RuntimeConfig",
"(",
"self",
".",
"_jsparkSession",
".",
"conf",
"(",
")",
")",
"return",
"self",
".",
"_conf"
] |
Runtime configuration interface for Spark.
This is the interface through which the user can get and set all Spark and Hadoop
configurations that are relevant to Spark SQL. When getting the value of a config,
this defaults to the value set in the underlying :class:`SparkContext`, if any.
|
[
"Runtime",
"configuration",
"interface",
"for",
"Spark",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/session.py#L298-L307
|
19,306
|
apache/spark
|
python/pyspark/sql/session.py
|
SparkSession.catalog
|
def catalog(self):
"""Interface through which the user may create, drop, alter or query underlying
databases, tables, functions etc.
:return: :class:`Catalog`
"""
from pyspark.sql.catalog import Catalog
if not hasattr(self, "_catalog"):
self._catalog = Catalog(self)
return self._catalog
|
python
|
def catalog(self):
"""Interface through which the user may create, drop, alter or query underlying
databases, tables, functions etc.
:return: :class:`Catalog`
"""
from pyspark.sql.catalog import Catalog
if not hasattr(self, "_catalog"):
self._catalog = Catalog(self)
return self._catalog
|
[
"def",
"catalog",
"(",
"self",
")",
":",
"from",
"pyspark",
".",
"sql",
".",
"catalog",
"import",
"Catalog",
"if",
"not",
"hasattr",
"(",
"self",
",",
"\"_catalog\"",
")",
":",
"self",
".",
"_catalog",
"=",
"Catalog",
"(",
"self",
")",
"return",
"self",
".",
"_catalog"
] |
Interface through which the user may create, drop, alter or query underlying
databases, tables, functions etc.
:return: :class:`Catalog`
|
[
"Interface",
"through",
"which",
"the",
"user",
"may",
"create",
"drop",
"alter",
"or",
"query",
"underlying",
"databases",
"tables",
"functions",
"etc",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/session.py#L311-L320
|
19,307
|
apache/spark
|
python/pyspark/sql/session.py
|
SparkSession._inferSchemaFromList
|
def _inferSchemaFromList(self, data, names=None):
"""
Infer schema from list of Row or tuple.
:param data: list of Row or tuple
:param names: list of column names
:return: :class:`pyspark.sql.types.StructType`
"""
if not data:
raise ValueError("can not infer schema from empty dataset")
first = data[0]
if type(first) is dict:
warnings.warn("inferring schema from dict is deprecated,"
"please use pyspark.sql.Row instead")
schema = reduce(_merge_type, (_infer_schema(row, names) for row in data))
if _has_nulltype(schema):
raise ValueError("Some of types cannot be determined after inferring")
return schema
|
python
|
def _inferSchemaFromList(self, data, names=None):
"""
Infer schema from list of Row or tuple.
:param data: list of Row or tuple
:param names: list of column names
:return: :class:`pyspark.sql.types.StructType`
"""
if not data:
raise ValueError("can not infer schema from empty dataset")
first = data[0]
if type(first) is dict:
warnings.warn("inferring schema from dict is deprecated,"
"please use pyspark.sql.Row instead")
schema = reduce(_merge_type, (_infer_schema(row, names) for row in data))
if _has_nulltype(schema):
raise ValueError("Some of types cannot be determined after inferring")
return schema
|
[
"def",
"_inferSchemaFromList",
"(",
"self",
",",
"data",
",",
"names",
"=",
"None",
")",
":",
"if",
"not",
"data",
":",
"raise",
"ValueError",
"(",
"\"can not infer schema from empty dataset\"",
")",
"first",
"=",
"data",
"[",
"0",
"]",
"if",
"type",
"(",
"first",
")",
"is",
"dict",
":",
"warnings",
".",
"warn",
"(",
"\"inferring schema from dict is deprecated,\"",
"\"please use pyspark.sql.Row instead\"",
")",
"schema",
"=",
"reduce",
"(",
"_merge_type",
",",
"(",
"_infer_schema",
"(",
"row",
",",
"names",
")",
"for",
"row",
"in",
"data",
")",
")",
"if",
"_has_nulltype",
"(",
"schema",
")",
":",
"raise",
"ValueError",
"(",
"\"Some of types cannot be determined after inferring\"",
")",
"return",
"schema"
] |
Infer schema from list of Row or tuple.
:param data: list of Row or tuple
:param names: list of column names
:return: :class:`pyspark.sql.types.StructType`
|
[
"Infer",
"schema",
"from",
"list",
"of",
"Row",
"or",
"tuple",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/session.py#L363-L380
|
19,308
|
apache/spark
|
python/pyspark/sql/session.py
|
SparkSession._inferSchema
|
def _inferSchema(self, rdd, samplingRatio=None, names=None):
"""
Infer schema from an RDD of Row or tuple.
:param rdd: an RDD of Row or tuple
:param samplingRatio: sampling ratio, or no sampling (default)
:return: :class:`pyspark.sql.types.StructType`
"""
first = rdd.first()
if not first:
raise ValueError("The first row in RDD is empty, "
"can not infer schema")
if type(first) is dict:
warnings.warn("Using RDD of dict to inferSchema is deprecated. "
"Use pyspark.sql.Row instead")
if samplingRatio is None:
schema = _infer_schema(first, names=names)
if _has_nulltype(schema):
for row in rdd.take(100)[1:]:
schema = _merge_type(schema, _infer_schema(row, names=names))
if not _has_nulltype(schema):
break
else:
raise ValueError("Some of types cannot be determined by the "
"first 100 rows, please try again with sampling")
else:
if samplingRatio < 0.99:
rdd = rdd.sample(False, float(samplingRatio))
schema = rdd.map(lambda row: _infer_schema(row, names)).reduce(_merge_type)
return schema
|
python
|
def _inferSchema(self, rdd, samplingRatio=None, names=None):
"""
Infer schema from an RDD of Row or tuple.
:param rdd: an RDD of Row or tuple
:param samplingRatio: sampling ratio, or no sampling (default)
:return: :class:`pyspark.sql.types.StructType`
"""
first = rdd.first()
if not first:
raise ValueError("The first row in RDD is empty, "
"can not infer schema")
if type(first) is dict:
warnings.warn("Using RDD of dict to inferSchema is deprecated. "
"Use pyspark.sql.Row instead")
if samplingRatio is None:
schema = _infer_schema(first, names=names)
if _has_nulltype(schema):
for row in rdd.take(100)[1:]:
schema = _merge_type(schema, _infer_schema(row, names=names))
if not _has_nulltype(schema):
break
else:
raise ValueError("Some of types cannot be determined by the "
"first 100 rows, please try again with sampling")
else:
if samplingRatio < 0.99:
rdd = rdd.sample(False, float(samplingRatio))
schema = rdd.map(lambda row: _infer_schema(row, names)).reduce(_merge_type)
return schema
|
[
"def",
"_inferSchema",
"(",
"self",
",",
"rdd",
",",
"samplingRatio",
"=",
"None",
",",
"names",
"=",
"None",
")",
":",
"first",
"=",
"rdd",
".",
"first",
"(",
")",
"if",
"not",
"first",
":",
"raise",
"ValueError",
"(",
"\"The first row in RDD is empty, \"",
"\"can not infer schema\"",
")",
"if",
"type",
"(",
"first",
")",
"is",
"dict",
":",
"warnings",
".",
"warn",
"(",
"\"Using RDD of dict to inferSchema is deprecated. \"",
"\"Use pyspark.sql.Row instead\"",
")",
"if",
"samplingRatio",
"is",
"None",
":",
"schema",
"=",
"_infer_schema",
"(",
"first",
",",
"names",
"=",
"names",
")",
"if",
"_has_nulltype",
"(",
"schema",
")",
":",
"for",
"row",
"in",
"rdd",
".",
"take",
"(",
"100",
")",
"[",
"1",
":",
"]",
":",
"schema",
"=",
"_merge_type",
"(",
"schema",
",",
"_infer_schema",
"(",
"row",
",",
"names",
"=",
"names",
")",
")",
"if",
"not",
"_has_nulltype",
"(",
"schema",
")",
":",
"break",
"else",
":",
"raise",
"ValueError",
"(",
"\"Some of types cannot be determined by the \"",
"\"first 100 rows, please try again with sampling\"",
")",
"else",
":",
"if",
"samplingRatio",
"<",
"0.99",
":",
"rdd",
"=",
"rdd",
".",
"sample",
"(",
"False",
",",
"float",
"(",
"samplingRatio",
")",
")",
"schema",
"=",
"rdd",
".",
"map",
"(",
"lambda",
"row",
":",
"_infer_schema",
"(",
"row",
",",
"names",
")",
")",
".",
"reduce",
"(",
"_merge_type",
")",
"return",
"schema"
] |
Infer schema from an RDD of Row or tuple.
:param rdd: an RDD of Row or tuple
:param samplingRatio: sampling ratio, or no sampling (default)
:return: :class:`pyspark.sql.types.StructType`
|
[
"Infer",
"schema",
"from",
"an",
"RDD",
"of",
"Row",
"or",
"tuple",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/session.py#L382-L412
|
19,309
|
apache/spark
|
python/pyspark/sql/session.py
|
SparkSession._createFromRDD
|
def _createFromRDD(self, rdd, schema, samplingRatio):
"""
Create an RDD for DataFrame from an existing RDD, returns the RDD and schema.
"""
if schema is None or isinstance(schema, (list, tuple)):
struct = self._inferSchema(rdd, samplingRatio, names=schema)
converter = _create_converter(struct)
rdd = rdd.map(converter)
if isinstance(schema, (list, tuple)):
for i, name in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
elif not isinstance(schema, StructType):
raise TypeError("schema should be StructType or list or None, but got: %s" % schema)
# convert python objects to sql data
rdd = rdd.map(schema.toInternal)
return rdd, schema
|
python
|
def _createFromRDD(self, rdd, schema, samplingRatio):
"""
Create an RDD for DataFrame from an existing RDD, returns the RDD and schema.
"""
if schema is None or isinstance(schema, (list, tuple)):
struct = self._inferSchema(rdd, samplingRatio, names=schema)
converter = _create_converter(struct)
rdd = rdd.map(converter)
if isinstance(schema, (list, tuple)):
for i, name in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
elif not isinstance(schema, StructType):
raise TypeError("schema should be StructType or list or None, but got: %s" % schema)
# convert python objects to sql data
rdd = rdd.map(schema.toInternal)
return rdd, schema
|
[
"def",
"_createFromRDD",
"(",
"self",
",",
"rdd",
",",
"schema",
",",
"samplingRatio",
")",
":",
"if",
"schema",
"is",
"None",
"or",
"isinstance",
"(",
"schema",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"struct",
"=",
"self",
".",
"_inferSchema",
"(",
"rdd",
",",
"samplingRatio",
",",
"names",
"=",
"schema",
")",
"converter",
"=",
"_create_converter",
"(",
"struct",
")",
"rdd",
"=",
"rdd",
".",
"map",
"(",
"converter",
")",
"if",
"isinstance",
"(",
"schema",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"for",
"i",
",",
"name",
"in",
"enumerate",
"(",
"schema",
")",
":",
"struct",
".",
"fields",
"[",
"i",
"]",
".",
"name",
"=",
"name",
"struct",
".",
"names",
"[",
"i",
"]",
"=",
"name",
"schema",
"=",
"struct",
"elif",
"not",
"isinstance",
"(",
"schema",
",",
"StructType",
")",
":",
"raise",
"TypeError",
"(",
"\"schema should be StructType or list or None, but got: %s\"",
"%",
"schema",
")",
"# convert python objects to sql data",
"rdd",
"=",
"rdd",
".",
"map",
"(",
"schema",
".",
"toInternal",
")",
"return",
"rdd",
",",
"schema"
] |
Create an RDD for DataFrame from an existing RDD, returns the RDD and schema.
|
[
"Create",
"an",
"RDD",
"for",
"DataFrame",
"from",
"an",
"existing",
"RDD",
"returns",
"the",
"RDD",
"and",
"schema",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/session.py#L414-L433
|
19,310
|
apache/spark
|
python/pyspark/sql/session.py
|
SparkSession._createFromLocal
|
def _createFromLocal(self, data, schema):
"""
Create an RDD for DataFrame from a list or pandas.DataFrame, returns
the RDD and schema.
"""
# make sure data could consumed multiple times
if not isinstance(data, list):
data = list(data)
if schema is None or isinstance(schema, (list, tuple)):
struct = self._inferSchemaFromList(data, names=schema)
converter = _create_converter(struct)
data = map(converter, data)
if isinstance(schema, (list, tuple)):
for i, name in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
elif not isinstance(schema, StructType):
raise TypeError("schema should be StructType or list or None, but got: %s" % schema)
# convert python objects to sql data
data = [schema.toInternal(row) for row in data]
return self._sc.parallelize(data), schema
|
python
|
def _createFromLocal(self, data, schema):
"""
Create an RDD for DataFrame from a list or pandas.DataFrame, returns
the RDD and schema.
"""
# make sure data could consumed multiple times
if not isinstance(data, list):
data = list(data)
if schema is None or isinstance(schema, (list, tuple)):
struct = self._inferSchemaFromList(data, names=schema)
converter = _create_converter(struct)
data = map(converter, data)
if isinstance(schema, (list, tuple)):
for i, name in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
elif not isinstance(schema, StructType):
raise TypeError("schema should be StructType or list or None, but got: %s" % schema)
# convert python objects to sql data
data = [schema.toInternal(row) for row in data]
return self._sc.parallelize(data), schema
|
[
"def",
"_createFromLocal",
"(",
"self",
",",
"data",
",",
"schema",
")",
":",
"# make sure data could consumed multiple times",
"if",
"not",
"isinstance",
"(",
"data",
",",
"list",
")",
":",
"data",
"=",
"list",
"(",
"data",
")",
"if",
"schema",
"is",
"None",
"or",
"isinstance",
"(",
"schema",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"struct",
"=",
"self",
".",
"_inferSchemaFromList",
"(",
"data",
",",
"names",
"=",
"schema",
")",
"converter",
"=",
"_create_converter",
"(",
"struct",
")",
"data",
"=",
"map",
"(",
"converter",
",",
"data",
")",
"if",
"isinstance",
"(",
"schema",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"for",
"i",
",",
"name",
"in",
"enumerate",
"(",
"schema",
")",
":",
"struct",
".",
"fields",
"[",
"i",
"]",
".",
"name",
"=",
"name",
"struct",
".",
"names",
"[",
"i",
"]",
"=",
"name",
"schema",
"=",
"struct",
"elif",
"not",
"isinstance",
"(",
"schema",
",",
"StructType",
")",
":",
"raise",
"TypeError",
"(",
"\"schema should be StructType or list or None, but got: %s\"",
"%",
"schema",
")",
"# convert python objects to sql data",
"data",
"=",
"[",
"schema",
".",
"toInternal",
"(",
"row",
")",
"for",
"row",
"in",
"data",
"]",
"return",
"self",
".",
"_sc",
".",
"parallelize",
"(",
"data",
")",
",",
"schema"
] |
Create an RDD for DataFrame from a list or pandas.DataFrame, returns
the RDD and schema.
|
[
"Create",
"an",
"RDD",
"for",
"DataFrame",
"from",
"a",
"list",
"or",
"pandas",
".",
"DataFrame",
"returns",
"the",
"RDD",
"and",
"schema",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/session.py#L435-L459
|
19,311
|
apache/spark
|
python/pyspark/sql/session.py
|
SparkSession._create_from_pandas_with_arrow
|
def _create_from_pandas_with_arrow(self, pdf, schema, timezone):
"""
Create a DataFrame from a given pandas.DataFrame by slicing it into partitions, converting
to Arrow data, then sending to the JVM to parallelize. If a schema is passed in, the
data types will be used to coerce the data in Pandas to Arrow conversion.
"""
from pyspark.serializers import ArrowStreamPandasSerializer
from pyspark.sql.types import from_arrow_type, to_arrow_type, TimestampType
from pyspark.sql.utils import require_minimum_pandas_version, \
require_minimum_pyarrow_version
require_minimum_pandas_version()
require_minimum_pyarrow_version()
from pandas.api.types import is_datetime64_dtype, is_datetime64tz_dtype
import pyarrow as pa
# Create the Spark schema from list of names passed in with Arrow types
if isinstance(schema, (list, tuple)):
arrow_schema = pa.Schema.from_pandas(pdf, preserve_index=False)
struct = StructType()
for name, field in zip(schema, arrow_schema):
struct.add(name, from_arrow_type(field.type), nullable=field.nullable)
schema = struct
# Determine arrow types to coerce data when creating batches
if isinstance(schema, StructType):
arrow_types = [to_arrow_type(f.dataType) for f in schema.fields]
elif isinstance(schema, DataType):
raise ValueError("Single data type %s is not supported with Arrow" % str(schema))
else:
# Any timestamps must be coerced to be compatible with Spark
arrow_types = [to_arrow_type(TimestampType())
if is_datetime64_dtype(t) or is_datetime64tz_dtype(t) else None
for t in pdf.dtypes]
# Slice the DataFrame to be batched
step = -(-len(pdf) // self.sparkContext.defaultParallelism) # round int up
pdf_slices = (pdf[start:start + step] for start in xrange(0, len(pdf), step))
# Create list of Arrow (columns, type) for serializer dump_stream
arrow_data = [[(c, t) for (_, c), t in zip(pdf_slice.iteritems(), arrow_types)]
for pdf_slice in pdf_slices]
jsqlContext = self._wrapped._jsqlContext
safecheck = self._wrapped._conf.arrowSafeTypeConversion()
col_by_name = True # col by name only applies to StructType columns, can't happen here
ser = ArrowStreamPandasSerializer(timezone, safecheck, col_by_name)
def reader_func(temp_filename):
return self._jvm.PythonSQLUtils.readArrowStreamFromFile(jsqlContext, temp_filename)
def create_RDD_server():
return self._jvm.ArrowRDDServer(jsqlContext)
# Create Spark DataFrame from Arrow stream file, using one batch per partition
jrdd = self._sc._serialize_to_jvm(arrow_data, ser, reader_func, create_RDD_server)
jdf = self._jvm.PythonSQLUtils.toDataFrame(jrdd, schema.json(), jsqlContext)
df = DataFrame(jdf, self._wrapped)
df._schema = schema
return df
|
python
|
def _create_from_pandas_with_arrow(self, pdf, schema, timezone):
"""
Create a DataFrame from a given pandas.DataFrame by slicing it into partitions, converting
to Arrow data, then sending to the JVM to parallelize. If a schema is passed in, the
data types will be used to coerce the data in Pandas to Arrow conversion.
"""
from pyspark.serializers import ArrowStreamPandasSerializer
from pyspark.sql.types import from_arrow_type, to_arrow_type, TimestampType
from pyspark.sql.utils import require_minimum_pandas_version, \
require_minimum_pyarrow_version
require_minimum_pandas_version()
require_minimum_pyarrow_version()
from pandas.api.types import is_datetime64_dtype, is_datetime64tz_dtype
import pyarrow as pa
# Create the Spark schema from list of names passed in with Arrow types
if isinstance(schema, (list, tuple)):
arrow_schema = pa.Schema.from_pandas(pdf, preserve_index=False)
struct = StructType()
for name, field in zip(schema, arrow_schema):
struct.add(name, from_arrow_type(field.type), nullable=field.nullable)
schema = struct
# Determine arrow types to coerce data when creating batches
if isinstance(schema, StructType):
arrow_types = [to_arrow_type(f.dataType) for f in schema.fields]
elif isinstance(schema, DataType):
raise ValueError("Single data type %s is not supported with Arrow" % str(schema))
else:
# Any timestamps must be coerced to be compatible with Spark
arrow_types = [to_arrow_type(TimestampType())
if is_datetime64_dtype(t) or is_datetime64tz_dtype(t) else None
for t in pdf.dtypes]
# Slice the DataFrame to be batched
step = -(-len(pdf) // self.sparkContext.defaultParallelism) # round int up
pdf_slices = (pdf[start:start + step] for start in xrange(0, len(pdf), step))
# Create list of Arrow (columns, type) for serializer dump_stream
arrow_data = [[(c, t) for (_, c), t in zip(pdf_slice.iteritems(), arrow_types)]
for pdf_slice in pdf_slices]
jsqlContext = self._wrapped._jsqlContext
safecheck = self._wrapped._conf.arrowSafeTypeConversion()
col_by_name = True # col by name only applies to StructType columns, can't happen here
ser = ArrowStreamPandasSerializer(timezone, safecheck, col_by_name)
def reader_func(temp_filename):
return self._jvm.PythonSQLUtils.readArrowStreamFromFile(jsqlContext, temp_filename)
def create_RDD_server():
return self._jvm.ArrowRDDServer(jsqlContext)
# Create Spark DataFrame from Arrow stream file, using one batch per partition
jrdd = self._sc._serialize_to_jvm(arrow_data, ser, reader_func, create_RDD_server)
jdf = self._jvm.PythonSQLUtils.toDataFrame(jrdd, schema.json(), jsqlContext)
df = DataFrame(jdf, self._wrapped)
df._schema = schema
return df
|
[
"def",
"_create_from_pandas_with_arrow",
"(",
"self",
",",
"pdf",
",",
"schema",
",",
"timezone",
")",
":",
"from",
"pyspark",
".",
"serializers",
"import",
"ArrowStreamPandasSerializer",
"from",
"pyspark",
".",
"sql",
".",
"types",
"import",
"from_arrow_type",
",",
"to_arrow_type",
",",
"TimestampType",
"from",
"pyspark",
".",
"sql",
".",
"utils",
"import",
"require_minimum_pandas_version",
",",
"require_minimum_pyarrow_version",
"require_minimum_pandas_version",
"(",
")",
"require_minimum_pyarrow_version",
"(",
")",
"from",
"pandas",
".",
"api",
".",
"types",
"import",
"is_datetime64_dtype",
",",
"is_datetime64tz_dtype",
"import",
"pyarrow",
"as",
"pa",
"# Create the Spark schema from list of names passed in with Arrow types",
"if",
"isinstance",
"(",
"schema",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"arrow_schema",
"=",
"pa",
".",
"Schema",
".",
"from_pandas",
"(",
"pdf",
",",
"preserve_index",
"=",
"False",
")",
"struct",
"=",
"StructType",
"(",
")",
"for",
"name",
",",
"field",
"in",
"zip",
"(",
"schema",
",",
"arrow_schema",
")",
":",
"struct",
".",
"add",
"(",
"name",
",",
"from_arrow_type",
"(",
"field",
".",
"type",
")",
",",
"nullable",
"=",
"field",
".",
"nullable",
")",
"schema",
"=",
"struct",
"# Determine arrow types to coerce data when creating batches",
"if",
"isinstance",
"(",
"schema",
",",
"StructType",
")",
":",
"arrow_types",
"=",
"[",
"to_arrow_type",
"(",
"f",
".",
"dataType",
")",
"for",
"f",
"in",
"schema",
".",
"fields",
"]",
"elif",
"isinstance",
"(",
"schema",
",",
"DataType",
")",
":",
"raise",
"ValueError",
"(",
"\"Single data type %s is not supported with Arrow\"",
"%",
"str",
"(",
"schema",
")",
")",
"else",
":",
"# Any timestamps must be coerced to be compatible with Spark",
"arrow_types",
"=",
"[",
"to_arrow_type",
"(",
"TimestampType",
"(",
")",
")",
"if",
"is_datetime64_dtype",
"(",
"t",
")",
"or",
"is_datetime64tz_dtype",
"(",
"t",
")",
"else",
"None",
"for",
"t",
"in",
"pdf",
".",
"dtypes",
"]",
"# Slice the DataFrame to be batched",
"step",
"=",
"-",
"(",
"-",
"len",
"(",
"pdf",
")",
"//",
"self",
".",
"sparkContext",
".",
"defaultParallelism",
")",
"# round int up",
"pdf_slices",
"=",
"(",
"pdf",
"[",
"start",
":",
"start",
"+",
"step",
"]",
"for",
"start",
"in",
"xrange",
"(",
"0",
",",
"len",
"(",
"pdf",
")",
",",
"step",
")",
")",
"# Create list of Arrow (columns, type) for serializer dump_stream",
"arrow_data",
"=",
"[",
"[",
"(",
"c",
",",
"t",
")",
"for",
"(",
"_",
",",
"c",
")",
",",
"t",
"in",
"zip",
"(",
"pdf_slice",
".",
"iteritems",
"(",
")",
",",
"arrow_types",
")",
"]",
"for",
"pdf_slice",
"in",
"pdf_slices",
"]",
"jsqlContext",
"=",
"self",
".",
"_wrapped",
".",
"_jsqlContext",
"safecheck",
"=",
"self",
".",
"_wrapped",
".",
"_conf",
".",
"arrowSafeTypeConversion",
"(",
")",
"col_by_name",
"=",
"True",
"# col by name only applies to StructType columns, can't happen here",
"ser",
"=",
"ArrowStreamPandasSerializer",
"(",
"timezone",
",",
"safecheck",
",",
"col_by_name",
")",
"def",
"reader_func",
"(",
"temp_filename",
")",
":",
"return",
"self",
".",
"_jvm",
".",
"PythonSQLUtils",
".",
"readArrowStreamFromFile",
"(",
"jsqlContext",
",",
"temp_filename",
")",
"def",
"create_RDD_server",
"(",
")",
":",
"return",
"self",
".",
"_jvm",
".",
"ArrowRDDServer",
"(",
"jsqlContext",
")",
"# Create Spark DataFrame from Arrow stream file, using one batch per partition",
"jrdd",
"=",
"self",
".",
"_sc",
".",
"_serialize_to_jvm",
"(",
"arrow_data",
",",
"ser",
",",
"reader_func",
",",
"create_RDD_server",
")",
"jdf",
"=",
"self",
".",
"_jvm",
".",
"PythonSQLUtils",
".",
"toDataFrame",
"(",
"jrdd",
",",
"schema",
".",
"json",
"(",
")",
",",
"jsqlContext",
")",
"df",
"=",
"DataFrame",
"(",
"jdf",
",",
"self",
".",
"_wrapped",
")",
"df",
".",
"_schema",
"=",
"schema",
"return",
"df"
] |
Create a DataFrame from a given pandas.DataFrame by slicing it into partitions, converting
to Arrow data, then sending to the JVM to parallelize. If a schema is passed in, the
data types will be used to coerce the data in Pandas to Arrow conversion.
|
[
"Create",
"a",
"DataFrame",
"from",
"a",
"given",
"pandas",
".",
"DataFrame",
"by",
"slicing",
"it",
"into",
"partitions",
"converting",
"to",
"Arrow",
"data",
"then",
"sending",
"to",
"the",
"JVM",
"to",
"parallelize",
".",
"If",
"a",
"schema",
"is",
"passed",
"in",
"the",
"data",
"types",
"will",
"be",
"used",
"to",
"coerce",
"the",
"data",
"in",
"Pandas",
"to",
"Arrow",
"conversion",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/session.py#L527-L588
|
19,312
|
apache/spark
|
python/pyspark/sql/session.py
|
SparkSession._create_shell_session
|
def _create_shell_session():
"""
Initialize a SparkSession for a pyspark shell session. This is called from shell.py
to make error handling simpler without needing to declare local variables in that
script, which would expose those to users.
"""
import py4j
from pyspark.conf import SparkConf
from pyspark.context import SparkContext
try:
# Try to access HiveConf, it will raise exception if Hive is not added
conf = SparkConf()
if conf.get('spark.sql.catalogImplementation', 'hive').lower() == 'hive':
SparkContext._jvm.org.apache.hadoop.hive.conf.HiveConf()
return SparkSession.builder\
.enableHiveSupport()\
.getOrCreate()
else:
return SparkSession.builder.getOrCreate()
except (py4j.protocol.Py4JError, TypeError):
if conf.get('spark.sql.catalogImplementation', '').lower() == 'hive':
warnings.warn("Fall back to non-hive support because failing to access HiveConf, "
"please make sure you build spark with hive")
return SparkSession.builder.getOrCreate()
|
python
|
def _create_shell_session():
"""
Initialize a SparkSession for a pyspark shell session. This is called from shell.py
to make error handling simpler without needing to declare local variables in that
script, which would expose those to users.
"""
import py4j
from pyspark.conf import SparkConf
from pyspark.context import SparkContext
try:
# Try to access HiveConf, it will raise exception if Hive is not added
conf = SparkConf()
if conf.get('spark.sql.catalogImplementation', 'hive').lower() == 'hive':
SparkContext._jvm.org.apache.hadoop.hive.conf.HiveConf()
return SparkSession.builder\
.enableHiveSupport()\
.getOrCreate()
else:
return SparkSession.builder.getOrCreate()
except (py4j.protocol.Py4JError, TypeError):
if conf.get('spark.sql.catalogImplementation', '').lower() == 'hive':
warnings.warn("Fall back to non-hive support because failing to access HiveConf, "
"please make sure you build spark with hive")
return SparkSession.builder.getOrCreate()
|
[
"def",
"_create_shell_session",
"(",
")",
":",
"import",
"py4j",
"from",
"pyspark",
".",
"conf",
"import",
"SparkConf",
"from",
"pyspark",
".",
"context",
"import",
"SparkContext",
"try",
":",
"# Try to access HiveConf, it will raise exception if Hive is not added",
"conf",
"=",
"SparkConf",
"(",
")",
"if",
"conf",
".",
"get",
"(",
"'spark.sql.catalogImplementation'",
",",
"'hive'",
")",
".",
"lower",
"(",
")",
"==",
"'hive'",
":",
"SparkContext",
".",
"_jvm",
".",
"org",
".",
"apache",
".",
"hadoop",
".",
"hive",
".",
"conf",
".",
"HiveConf",
"(",
")",
"return",
"SparkSession",
".",
"builder",
".",
"enableHiveSupport",
"(",
")",
".",
"getOrCreate",
"(",
")",
"else",
":",
"return",
"SparkSession",
".",
"builder",
".",
"getOrCreate",
"(",
")",
"except",
"(",
"py4j",
".",
"protocol",
".",
"Py4JError",
",",
"TypeError",
")",
":",
"if",
"conf",
".",
"get",
"(",
"'spark.sql.catalogImplementation'",
",",
"''",
")",
".",
"lower",
"(",
")",
"==",
"'hive'",
":",
"warnings",
".",
"warn",
"(",
"\"Fall back to non-hive support because failing to access HiveConf, \"",
"\"please make sure you build spark with hive\"",
")",
"return",
"SparkSession",
".",
"builder",
".",
"getOrCreate",
"(",
")"
] |
Initialize a SparkSession for a pyspark shell session. This is called from shell.py
to make error handling simpler without needing to declare local variables in that
script, which would expose those to users.
|
[
"Initialize",
"a",
"SparkSession",
"for",
"a",
"pyspark",
"shell",
"session",
".",
"This",
"is",
"called",
"from",
"shell",
".",
"py",
"to",
"make",
"error",
"handling",
"simpler",
"without",
"needing",
"to",
"declare",
"local",
"variables",
"in",
"that",
"script",
"which",
"would",
"expose",
"those",
"to",
"users",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/session.py#L591-L615
|
19,313
|
apache/spark
|
python/pyspark/serializers.py
|
_restore
|
def _restore(name, fields, value):
""" Restore an object of namedtuple"""
k = (name, fields)
cls = __cls.get(k)
if cls is None:
cls = collections.namedtuple(name, fields)
__cls[k] = cls
return cls(*value)
|
python
|
def _restore(name, fields, value):
""" Restore an object of namedtuple"""
k = (name, fields)
cls = __cls.get(k)
if cls is None:
cls = collections.namedtuple(name, fields)
__cls[k] = cls
return cls(*value)
|
[
"def",
"_restore",
"(",
"name",
",",
"fields",
",",
"value",
")",
":",
"k",
"=",
"(",
"name",
",",
"fields",
")",
"cls",
"=",
"__cls",
".",
"get",
"(",
"k",
")",
"if",
"cls",
"is",
"None",
":",
"cls",
"=",
"collections",
".",
"namedtuple",
"(",
"name",
",",
"fields",
")",
"__cls",
"[",
"k",
"]",
"=",
"cls",
"return",
"cls",
"(",
"*",
"value",
")"
] |
Restore an object of namedtuple
|
[
"Restore",
"an",
"object",
"of",
"namedtuple"
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/serializers.py#L578-L585
|
19,314
|
apache/spark
|
python/pyspark/serializers.py
|
_hack_namedtuple
|
def _hack_namedtuple(cls):
""" Make class generated by namedtuple picklable """
name = cls.__name__
fields = cls._fields
def __reduce__(self):
return (_restore, (name, fields, tuple(self)))
cls.__reduce__ = __reduce__
cls._is_namedtuple_ = True
return cls
|
python
|
def _hack_namedtuple(cls):
""" Make class generated by namedtuple picklable """
name = cls.__name__
fields = cls._fields
def __reduce__(self):
return (_restore, (name, fields, tuple(self)))
cls.__reduce__ = __reduce__
cls._is_namedtuple_ = True
return cls
|
[
"def",
"_hack_namedtuple",
"(",
"cls",
")",
":",
"name",
"=",
"cls",
".",
"__name__",
"fields",
"=",
"cls",
".",
"_fields",
"def",
"__reduce__",
"(",
"self",
")",
":",
"return",
"(",
"_restore",
",",
"(",
"name",
",",
"fields",
",",
"tuple",
"(",
"self",
")",
")",
")",
"cls",
".",
"__reduce__",
"=",
"__reduce__",
"cls",
".",
"_is_namedtuple_",
"=",
"True",
"return",
"cls"
] |
Make class generated by namedtuple picklable
|
[
"Make",
"class",
"generated",
"by",
"namedtuple",
"picklable"
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/serializers.py#L588-L597
|
19,315
|
apache/spark
|
python/pyspark/serializers.py
|
ArrowCollectSerializer.load_stream
|
def load_stream(self, stream):
"""
Load a stream of un-ordered Arrow RecordBatches, where the last iteration yields
a list of indices that can be used to put the RecordBatches in the correct order.
"""
# load the batches
for batch in self.serializer.load_stream(stream):
yield batch
# load the batch order indices
num = read_int(stream)
batch_order = []
for i in xrange(num):
index = read_int(stream)
batch_order.append(index)
yield batch_order
|
python
|
def load_stream(self, stream):
"""
Load a stream of un-ordered Arrow RecordBatches, where the last iteration yields
a list of indices that can be used to put the RecordBatches in the correct order.
"""
# load the batches
for batch in self.serializer.load_stream(stream):
yield batch
# load the batch order indices
num = read_int(stream)
batch_order = []
for i in xrange(num):
index = read_int(stream)
batch_order.append(index)
yield batch_order
|
[
"def",
"load_stream",
"(",
"self",
",",
"stream",
")",
":",
"# load the batches",
"for",
"batch",
"in",
"self",
".",
"serializer",
".",
"load_stream",
"(",
"stream",
")",
":",
"yield",
"batch",
"# load the batch order indices",
"num",
"=",
"read_int",
"(",
"stream",
")",
"batch_order",
"=",
"[",
"]",
"for",
"i",
"in",
"xrange",
"(",
"num",
")",
":",
"index",
"=",
"read_int",
"(",
"stream",
")",
"batch_order",
".",
"append",
"(",
"index",
")",
"yield",
"batch_order"
] |
Load a stream of un-ordered Arrow RecordBatches, where the last iteration yields
a list of indices that can be used to put the RecordBatches in the correct order.
|
[
"Load",
"a",
"stream",
"of",
"un",
"-",
"ordered",
"Arrow",
"RecordBatches",
"where",
"the",
"last",
"iteration",
"yields",
"a",
"list",
"of",
"indices",
"that",
"can",
"be",
"used",
"to",
"put",
"the",
"RecordBatches",
"in",
"the",
"correct",
"order",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/serializers.py#L200-L215
|
19,316
|
apache/spark
|
python/pyspark/serializers.py
|
ArrowStreamPandasSerializer._create_batch
|
def _create_batch(self, series):
"""
Create an Arrow record batch from the given pandas.Series or list of Series,
with optional type.
:param series: A single pandas.Series, list of Series, or list of (series, arrow_type)
:return: Arrow RecordBatch
"""
import pandas as pd
import pyarrow as pa
from pyspark.sql.types import _check_series_convert_timestamps_internal
# Make input conform to [(series1, type1), (series2, type2), ...]
if not isinstance(series, (list, tuple)) or \
(len(series) == 2 and isinstance(series[1], pa.DataType)):
series = [series]
series = ((s, None) if not isinstance(s, (list, tuple)) else s for s in series)
def create_array(s, t):
mask = s.isnull()
# Ensure timestamp series are in expected form for Spark internal representation
if t is not None and pa.types.is_timestamp(t):
s = _check_series_convert_timestamps_internal(s.fillna(0), self._timezone)
# TODO: need cast after Arrow conversion, ns values cause error with pandas 0.19.2
return pa.Array.from_pandas(s, mask=mask).cast(t, safe=False)
try:
array = pa.Array.from_pandas(s, mask=mask, type=t, safe=self._safecheck)
except pa.ArrowException as e:
error_msg = "Exception thrown when converting pandas.Series (%s) to Arrow " + \
"Array (%s). It can be caused by overflows or other unsafe " + \
"conversions warned by Arrow. Arrow safe type check can be " + \
"disabled by using SQL config " + \
"`spark.sql.execution.pandas.arrowSafeTypeConversion`."
raise RuntimeError(error_msg % (s.dtype, t), e)
return array
arrs = []
for s, t in series:
if t is not None and pa.types.is_struct(t):
if not isinstance(s, pd.DataFrame):
raise ValueError("A field of type StructType expects a pandas.DataFrame, "
"but got: %s" % str(type(s)))
# Input partition and result pandas.DataFrame empty, make empty Arrays with struct
if len(s) == 0 and len(s.columns) == 0:
arrs_names = [(pa.array([], type=field.type), field.name) for field in t]
# Assign result columns by schema name if user labeled with strings
elif self._assign_cols_by_name and any(isinstance(name, basestring)
for name in s.columns):
arrs_names = [(create_array(s[field.name], field.type), field.name)
for field in t]
# Assign result columns by position
else:
arrs_names = [(create_array(s[s.columns[i]], field.type), field.name)
for i, field in enumerate(t)]
struct_arrs, struct_names = zip(*arrs_names)
arrs.append(pa.StructArray.from_arrays(struct_arrs, struct_names))
else:
arrs.append(create_array(s, t))
return pa.RecordBatch.from_arrays(arrs, ["_%d" % i for i in xrange(len(arrs))])
|
python
|
def _create_batch(self, series):
"""
Create an Arrow record batch from the given pandas.Series or list of Series,
with optional type.
:param series: A single pandas.Series, list of Series, or list of (series, arrow_type)
:return: Arrow RecordBatch
"""
import pandas as pd
import pyarrow as pa
from pyspark.sql.types import _check_series_convert_timestamps_internal
# Make input conform to [(series1, type1), (series2, type2), ...]
if not isinstance(series, (list, tuple)) or \
(len(series) == 2 and isinstance(series[1], pa.DataType)):
series = [series]
series = ((s, None) if not isinstance(s, (list, tuple)) else s for s in series)
def create_array(s, t):
mask = s.isnull()
# Ensure timestamp series are in expected form for Spark internal representation
if t is not None and pa.types.is_timestamp(t):
s = _check_series_convert_timestamps_internal(s.fillna(0), self._timezone)
# TODO: need cast after Arrow conversion, ns values cause error with pandas 0.19.2
return pa.Array.from_pandas(s, mask=mask).cast(t, safe=False)
try:
array = pa.Array.from_pandas(s, mask=mask, type=t, safe=self._safecheck)
except pa.ArrowException as e:
error_msg = "Exception thrown when converting pandas.Series (%s) to Arrow " + \
"Array (%s). It can be caused by overflows or other unsafe " + \
"conversions warned by Arrow. Arrow safe type check can be " + \
"disabled by using SQL config " + \
"`spark.sql.execution.pandas.arrowSafeTypeConversion`."
raise RuntimeError(error_msg % (s.dtype, t), e)
return array
arrs = []
for s, t in series:
if t is not None and pa.types.is_struct(t):
if not isinstance(s, pd.DataFrame):
raise ValueError("A field of type StructType expects a pandas.DataFrame, "
"but got: %s" % str(type(s)))
# Input partition and result pandas.DataFrame empty, make empty Arrays with struct
if len(s) == 0 and len(s.columns) == 0:
arrs_names = [(pa.array([], type=field.type), field.name) for field in t]
# Assign result columns by schema name if user labeled with strings
elif self._assign_cols_by_name and any(isinstance(name, basestring)
for name in s.columns):
arrs_names = [(create_array(s[field.name], field.type), field.name)
for field in t]
# Assign result columns by position
else:
arrs_names = [(create_array(s[s.columns[i]], field.type), field.name)
for i, field in enumerate(t)]
struct_arrs, struct_names = zip(*arrs_names)
arrs.append(pa.StructArray.from_arrays(struct_arrs, struct_names))
else:
arrs.append(create_array(s, t))
return pa.RecordBatch.from_arrays(arrs, ["_%d" % i for i in xrange(len(arrs))])
|
[
"def",
"_create_batch",
"(",
"self",
",",
"series",
")",
":",
"import",
"pandas",
"as",
"pd",
"import",
"pyarrow",
"as",
"pa",
"from",
"pyspark",
".",
"sql",
".",
"types",
"import",
"_check_series_convert_timestamps_internal",
"# Make input conform to [(series1, type1), (series2, type2), ...]",
"if",
"not",
"isinstance",
"(",
"series",
",",
"(",
"list",
",",
"tuple",
")",
")",
"or",
"(",
"len",
"(",
"series",
")",
"==",
"2",
"and",
"isinstance",
"(",
"series",
"[",
"1",
"]",
",",
"pa",
".",
"DataType",
")",
")",
":",
"series",
"=",
"[",
"series",
"]",
"series",
"=",
"(",
"(",
"s",
",",
"None",
")",
"if",
"not",
"isinstance",
"(",
"s",
",",
"(",
"list",
",",
"tuple",
")",
")",
"else",
"s",
"for",
"s",
"in",
"series",
")",
"def",
"create_array",
"(",
"s",
",",
"t",
")",
":",
"mask",
"=",
"s",
".",
"isnull",
"(",
")",
"# Ensure timestamp series are in expected form for Spark internal representation",
"if",
"t",
"is",
"not",
"None",
"and",
"pa",
".",
"types",
".",
"is_timestamp",
"(",
"t",
")",
":",
"s",
"=",
"_check_series_convert_timestamps_internal",
"(",
"s",
".",
"fillna",
"(",
"0",
")",
",",
"self",
".",
"_timezone",
")",
"# TODO: need cast after Arrow conversion, ns values cause error with pandas 0.19.2",
"return",
"pa",
".",
"Array",
".",
"from_pandas",
"(",
"s",
",",
"mask",
"=",
"mask",
")",
".",
"cast",
"(",
"t",
",",
"safe",
"=",
"False",
")",
"try",
":",
"array",
"=",
"pa",
".",
"Array",
".",
"from_pandas",
"(",
"s",
",",
"mask",
"=",
"mask",
",",
"type",
"=",
"t",
",",
"safe",
"=",
"self",
".",
"_safecheck",
")",
"except",
"pa",
".",
"ArrowException",
"as",
"e",
":",
"error_msg",
"=",
"\"Exception thrown when converting pandas.Series (%s) to Arrow \"",
"+",
"\"Array (%s). It can be caused by overflows or other unsafe \"",
"+",
"\"conversions warned by Arrow. Arrow safe type check can be \"",
"+",
"\"disabled by using SQL config \"",
"+",
"\"`spark.sql.execution.pandas.arrowSafeTypeConversion`.\"",
"raise",
"RuntimeError",
"(",
"error_msg",
"%",
"(",
"s",
".",
"dtype",
",",
"t",
")",
",",
"e",
")",
"return",
"array",
"arrs",
"=",
"[",
"]",
"for",
"s",
",",
"t",
"in",
"series",
":",
"if",
"t",
"is",
"not",
"None",
"and",
"pa",
".",
"types",
".",
"is_struct",
"(",
"t",
")",
":",
"if",
"not",
"isinstance",
"(",
"s",
",",
"pd",
".",
"DataFrame",
")",
":",
"raise",
"ValueError",
"(",
"\"A field of type StructType expects a pandas.DataFrame, \"",
"\"but got: %s\"",
"%",
"str",
"(",
"type",
"(",
"s",
")",
")",
")",
"# Input partition and result pandas.DataFrame empty, make empty Arrays with struct",
"if",
"len",
"(",
"s",
")",
"==",
"0",
"and",
"len",
"(",
"s",
".",
"columns",
")",
"==",
"0",
":",
"arrs_names",
"=",
"[",
"(",
"pa",
".",
"array",
"(",
"[",
"]",
",",
"type",
"=",
"field",
".",
"type",
")",
",",
"field",
".",
"name",
")",
"for",
"field",
"in",
"t",
"]",
"# Assign result columns by schema name if user labeled with strings",
"elif",
"self",
".",
"_assign_cols_by_name",
"and",
"any",
"(",
"isinstance",
"(",
"name",
",",
"basestring",
")",
"for",
"name",
"in",
"s",
".",
"columns",
")",
":",
"arrs_names",
"=",
"[",
"(",
"create_array",
"(",
"s",
"[",
"field",
".",
"name",
"]",
",",
"field",
".",
"type",
")",
",",
"field",
".",
"name",
")",
"for",
"field",
"in",
"t",
"]",
"# Assign result columns by position",
"else",
":",
"arrs_names",
"=",
"[",
"(",
"create_array",
"(",
"s",
"[",
"s",
".",
"columns",
"[",
"i",
"]",
"]",
",",
"field",
".",
"type",
")",
",",
"field",
".",
"name",
")",
"for",
"i",
",",
"field",
"in",
"enumerate",
"(",
"t",
")",
"]",
"struct_arrs",
",",
"struct_names",
"=",
"zip",
"(",
"*",
"arrs_names",
")",
"arrs",
".",
"append",
"(",
"pa",
".",
"StructArray",
".",
"from_arrays",
"(",
"struct_arrs",
",",
"struct_names",
")",
")",
"else",
":",
"arrs",
".",
"append",
"(",
"create_array",
"(",
"s",
",",
"t",
")",
")",
"return",
"pa",
".",
"RecordBatch",
".",
"from_arrays",
"(",
"arrs",
",",
"[",
"\"_%d\"",
"%",
"i",
"for",
"i",
"in",
"xrange",
"(",
"len",
"(",
"arrs",
")",
")",
"]",
")"
] |
Create an Arrow record batch from the given pandas.Series or list of Series,
with optional type.
:param series: A single pandas.Series, list of Series, or list of (series, arrow_type)
:return: Arrow RecordBatch
|
[
"Create",
"an",
"Arrow",
"record",
"batch",
"from",
"the",
"given",
"pandas",
".",
"Series",
"or",
"list",
"of",
"Series",
"with",
"optional",
"type",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/serializers.py#L274-L335
|
19,317
|
apache/spark
|
python/pyspark/serializers.py
|
ArrowStreamPandasSerializer.dump_stream
|
def dump_stream(self, iterator, stream):
"""
Make ArrowRecordBatches from Pandas Series and serialize. Input is a single series or
a list of series accompanied by an optional pyarrow type to coerce the data to.
"""
batches = (self._create_batch(series) for series in iterator)
super(ArrowStreamPandasSerializer, self).dump_stream(batches, stream)
|
python
|
def dump_stream(self, iterator, stream):
"""
Make ArrowRecordBatches from Pandas Series and serialize. Input is a single series or
a list of series accompanied by an optional pyarrow type to coerce the data to.
"""
batches = (self._create_batch(series) for series in iterator)
super(ArrowStreamPandasSerializer, self).dump_stream(batches, stream)
|
[
"def",
"dump_stream",
"(",
"self",
",",
"iterator",
",",
"stream",
")",
":",
"batches",
"=",
"(",
"self",
".",
"_create_batch",
"(",
"series",
")",
"for",
"series",
"in",
"iterator",
")",
"super",
"(",
"ArrowStreamPandasSerializer",
",",
"self",
")",
".",
"dump_stream",
"(",
"batches",
",",
"stream",
")"
] |
Make ArrowRecordBatches from Pandas Series and serialize. Input is a single series or
a list of series accompanied by an optional pyarrow type to coerce the data to.
|
[
"Make",
"ArrowRecordBatches",
"from",
"Pandas",
"Series",
"and",
"serialize",
".",
"Input",
"is",
"a",
"single",
"series",
"or",
"a",
"list",
"of",
"series",
"accompanied",
"by",
"an",
"optional",
"pyarrow",
"type",
"to",
"coerce",
"the",
"data",
"to",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/serializers.py#L337-L343
|
19,318
|
apache/spark
|
python/pyspark/serializers.py
|
ArrowStreamPandasSerializer.load_stream
|
def load_stream(self, stream):
"""
Deserialize ArrowRecordBatches to an Arrow table and return as a list of pandas.Series.
"""
batches = super(ArrowStreamPandasSerializer, self).load_stream(stream)
import pyarrow as pa
for batch in batches:
yield [self.arrow_to_pandas(c) for c in pa.Table.from_batches([batch]).itercolumns()]
|
python
|
def load_stream(self, stream):
"""
Deserialize ArrowRecordBatches to an Arrow table and return as a list of pandas.Series.
"""
batches = super(ArrowStreamPandasSerializer, self).load_stream(stream)
import pyarrow as pa
for batch in batches:
yield [self.arrow_to_pandas(c) for c in pa.Table.from_batches([batch]).itercolumns()]
|
[
"def",
"load_stream",
"(",
"self",
",",
"stream",
")",
":",
"batches",
"=",
"super",
"(",
"ArrowStreamPandasSerializer",
",",
"self",
")",
".",
"load_stream",
"(",
"stream",
")",
"import",
"pyarrow",
"as",
"pa",
"for",
"batch",
"in",
"batches",
":",
"yield",
"[",
"self",
".",
"arrow_to_pandas",
"(",
"c",
")",
"for",
"c",
"in",
"pa",
".",
"Table",
".",
"from_batches",
"(",
"[",
"batch",
"]",
")",
".",
"itercolumns",
"(",
")",
"]"
] |
Deserialize ArrowRecordBatches to an Arrow table and return as a list of pandas.Series.
|
[
"Deserialize",
"ArrowRecordBatches",
"to",
"an",
"Arrow",
"table",
"and",
"return",
"as",
"a",
"list",
"of",
"pandas",
".",
"Series",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/serializers.py#L345-L352
|
19,319
|
apache/spark
|
python/pyspark/serializers.py
|
ArrowStreamPandasUDFSerializer.dump_stream
|
def dump_stream(self, iterator, stream):
"""
Override because Pandas UDFs require a START_ARROW_STREAM before the Arrow stream is sent.
This should be sent after creating the first record batch so in case of an error, it can
be sent back to the JVM before the Arrow stream starts.
"""
def init_stream_yield_batches():
should_write_start_length = True
for series in iterator:
batch = self._create_batch(series)
if should_write_start_length:
write_int(SpecialLengths.START_ARROW_STREAM, stream)
should_write_start_length = False
yield batch
return ArrowStreamSerializer.dump_stream(self, init_stream_yield_batches(), stream)
|
python
|
def dump_stream(self, iterator, stream):
"""
Override because Pandas UDFs require a START_ARROW_STREAM before the Arrow stream is sent.
This should be sent after creating the first record batch so in case of an error, it can
be sent back to the JVM before the Arrow stream starts.
"""
def init_stream_yield_batches():
should_write_start_length = True
for series in iterator:
batch = self._create_batch(series)
if should_write_start_length:
write_int(SpecialLengths.START_ARROW_STREAM, stream)
should_write_start_length = False
yield batch
return ArrowStreamSerializer.dump_stream(self, init_stream_yield_batches(), stream)
|
[
"def",
"dump_stream",
"(",
"self",
",",
"iterator",
",",
"stream",
")",
":",
"def",
"init_stream_yield_batches",
"(",
")",
":",
"should_write_start_length",
"=",
"True",
"for",
"series",
"in",
"iterator",
":",
"batch",
"=",
"self",
".",
"_create_batch",
"(",
"series",
")",
"if",
"should_write_start_length",
":",
"write_int",
"(",
"SpecialLengths",
".",
"START_ARROW_STREAM",
",",
"stream",
")",
"should_write_start_length",
"=",
"False",
"yield",
"batch",
"return",
"ArrowStreamSerializer",
".",
"dump_stream",
"(",
"self",
",",
"init_stream_yield_batches",
"(",
")",
",",
"stream",
")"
] |
Override because Pandas UDFs require a START_ARROW_STREAM before the Arrow stream is sent.
This should be sent after creating the first record batch so in case of an error, it can
be sent back to the JVM before the Arrow stream starts.
|
[
"Override",
"because",
"Pandas",
"UDFs",
"require",
"a",
"START_ARROW_STREAM",
"before",
"the",
"Arrow",
"stream",
"is",
"sent",
".",
"This",
"should",
"be",
"sent",
"after",
"creating",
"the",
"first",
"record",
"batch",
"so",
"in",
"case",
"of",
"an",
"error",
"it",
"can",
"be",
"sent",
"back",
"to",
"the",
"JVM",
"before",
"the",
"Arrow",
"stream",
"starts",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/serializers.py#L381-L397
|
19,320
|
apache/spark
|
python/pyspark/sql/streaming.py
|
DataStreamWriter.trigger
|
def trigger(self, processingTime=None, once=None, continuous=None):
"""Set the trigger for the stream query. If this is not set it will run the query as fast
as possible, which is equivalent to setting the trigger to ``processingTime='0 seconds'``.
.. note:: Evolving.
:param processingTime: a processing time interval as a string, e.g. '5 seconds', '1 minute'.
Set a trigger that runs a query periodically based on the processing
time. Only one trigger can be set.
:param once: if set to True, set a trigger that processes only one batch of data in a
streaming query then terminates the query. Only one trigger can be set.
>>> # trigger the query for execution every 5 seconds
>>> writer = sdf.writeStream.trigger(processingTime='5 seconds')
>>> # trigger the query for just once batch of data
>>> writer = sdf.writeStream.trigger(once=True)
>>> # trigger the query for execution every 5 seconds
>>> writer = sdf.writeStream.trigger(continuous='5 seconds')
"""
params = [processingTime, once, continuous]
if params.count(None) == 3:
raise ValueError('No trigger provided')
elif params.count(None) < 2:
raise ValueError('Multiple triggers not allowed.')
jTrigger = None
if processingTime is not None:
if type(processingTime) != str or len(processingTime.strip()) == 0:
raise ValueError('Value for processingTime must be a non empty string. Got: %s' %
processingTime)
interval = processingTime.strip()
jTrigger = self._spark._sc._jvm.org.apache.spark.sql.streaming.Trigger.ProcessingTime(
interval)
elif once is not None:
if once is not True:
raise ValueError('Value for once must be True. Got: %s' % once)
jTrigger = self._spark._sc._jvm.org.apache.spark.sql.streaming.Trigger.Once()
else:
if type(continuous) != str or len(continuous.strip()) == 0:
raise ValueError('Value for continuous must be a non empty string. Got: %s' %
continuous)
interval = continuous.strip()
jTrigger = self._spark._sc._jvm.org.apache.spark.sql.streaming.Trigger.Continuous(
interval)
self._jwrite = self._jwrite.trigger(jTrigger)
return self
|
python
|
def trigger(self, processingTime=None, once=None, continuous=None):
"""Set the trigger for the stream query. If this is not set it will run the query as fast
as possible, which is equivalent to setting the trigger to ``processingTime='0 seconds'``.
.. note:: Evolving.
:param processingTime: a processing time interval as a string, e.g. '5 seconds', '1 minute'.
Set a trigger that runs a query periodically based on the processing
time. Only one trigger can be set.
:param once: if set to True, set a trigger that processes only one batch of data in a
streaming query then terminates the query. Only one trigger can be set.
>>> # trigger the query for execution every 5 seconds
>>> writer = sdf.writeStream.trigger(processingTime='5 seconds')
>>> # trigger the query for just once batch of data
>>> writer = sdf.writeStream.trigger(once=True)
>>> # trigger the query for execution every 5 seconds
>>> writer = sdf.writeStream.trigger(continuous='5 seconds')
"""
params = [processingTime, once, continuous]
if params.count(None) == 3:
raise ValueError('No trigger provided')
elif params.count(None) < 2:
raise ValueError('Multiple triggers not allowed.')
jTrigger = None
if processingTime is not None:
if type(processingTime) != str or len(processingTime.strip()) == 0:
raise ValueError('Value for processingTime must be a non empty string. Got: %s' %
processingTime)
interval = processingTime.strip()
jTrigger = self._spark._sc._jvm.org.apache.spark.sql.streaming.Trigger.ProcessingTime(
interval)
elif once is not None:
if once is not True:
raise ValueError('Value for once must be True. Got: %s' % once)
jTrigger = self._spark._sc._jvm.org.apache.spark.sql.streaming.Trigger.Once()
else:
if type(continuous) != str or len(continuous.strip()) == 0:
raise ValueError('Value for continuous must be a non empty string. Got: %s' %
continuous)
interval = continuous.strip()
jTrigger = self._spark._sc._jvm.org.apache.spark.sql.streaming.Trigger.Continuous(
interval)
self._jwrite = self._jwrite.trigger(jTrigger)
return self
|
[
"def",
"trigger",
"(",
"self",
",",
"processingTime",
"=",
"None",
",",
"once",
"=",
"None",
",",
"continuous",
"=",
"None",
")",
":",
"params",
"=",
"[",
"processingTime",
",",
"once",
",",
"continuous",
"]",
"if",
"params",
".",
"count",
"(",
"None",
")",
"==",
"3",
":",
"raise",
"ValueError",
"(",
"'No trigger provided'",
")",
"elif",
"params",
".",
"count",
"(",
"None",
")",
"<",
"2",
":",
"raise",
"ValueError",
"(",
"'Multiple triggers not allowed.'",
")",
"jTrigger",
"=",
"None",
"if",
"processingTime",
"is",
"not",
"None",
":",
"if",
"type",
"(",
"processingTime",
")",
"!=",
"str",
"or",
"len",
"(",
"processingTime",
".",
"strip",
"(",
")",
")",
"==",
"0",
":",
"raise",
"ValueError",
"(",
"'Value for processingTime must be a non empty string. Got: %s'",
"%",
"processingTime",
")",
"interval",
"=",
"processingTime",
".",
"strip",
"(",
")",
"jTrigger",
"=",
"self",
".",
"_spark",
".",
"_sc",
".",
"_jvm",
".",
"org",
".",
"apache",
".",
"spark",
".",
"sql",
".",
"streaming",
".",
"Trigger",
".",
"ProcessingTime",
"(",
"interval",
")",
"elif",
"once",
"is",
"not",
"None",
":",
"if",
"once",
"is",
"not",
"True",
":",
"raise",
"ValueError",
"(",
"'Value for once must be True. Got: %s'",
"%",
"once",
")",
"jTrigger",
"=",
"self",
".",
"_spark",
".",
"_sc",
".",
"_jvm",
".",
"org",
".",
"apache",
".",
"spark",
".",
"sql",
".",
"streaming",
".",
"Trigger",
".",
"Once",
"(",
")",
"else",
":",
"if",
"type",
"(",
"continuous",
")",
"!=",
"str",
"or",
"len",
"(",
"continuous",
".",
"strip",
"(",
")",
")",
"==",
"0",
":",
"raise",
"ValueError",
"(",
"'Value for continuous must be a non empty string. Got: %s'",
"%",
"continuous",
")",
"interval",
"=",
"continuous",
".",
"strip",
"(",
")",
"jTrigger",
"=",
"self",
".",
"_spark",
".",
"_sc",
".",
"_jvm",
".",
"org",
".",
"apache",
".",
"spark",
".",
"sql",
".",
"streaming",
".",
"Trigger",
".",
"Continuous",
"(",
"interval",
")",
"self",
".",
"_jwrite",
"=",
"self",
".",
"_jwrite",
".",
"trigger",
"(",
"jTrigger",
")",
"return",
"self"
] |
Set the trigger for the stream query. If this is not set it will run the query as fast
as possible, which is equivalent to setting the trigger to ``processingTime='0 seconds'``.
.. note:: Evolving.
:param processingTime: a processing time interval as a string, e.g. '5 seconds', '1 minute'.
Set a trigger that runs a query periodically based on the processing
time. Only one trigger can be set.
:param once: if set to True, set a trigger that processes only one batch of data in a
streaming query then terminates the query. Only one trigger can be set.
>>> # trigger the query for execution every 5 seconds
>>> writer = sdf.writeStream.trigger(processingTime='5 seconds')
>>> # trigger the query for just once batch of data
>>> writer = sdf.writeStream.trigger(once=True)
>>> # trigger the query for execution every 5 seconds
>>> writer = sdf.writeStream.trigger(continuous='5 seconds')
|
[
"Set",
"the",
"trigger",
"for",
"the",
"stream",
"query",
".",
"If",
"this",
"is",
"not",
"set",
"it",
"will",
"run",
"the",
"query",
"as",
"fast",
"as",
"possible",
"which",
"is",
"equivalent",
"to",
"setting",
"the",
"trigger",
"to",
"processingTime",
"=",
"0",
"seconds",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/streaming.py#L829-L878
|
19,321
|
apache/spark
|
python/pyspark/sql/streaming.py
|
DataStreamWriter.foreach
|
def foreach(self, f):
"""
Sets the output of the streaming query to be processed using the provided writer ``f``.
This is often used to write the output of a streaming query to arbitrary storage systems.
The processing logic can be specified in two ways.
#. A **function** that takes a row as input.
This is a simple way to express your processing logic. Note that this does
not allow you to deduplicate generated data when failures cause reprocessing of
some input data. That would require you to specify the processing logic in the next
way.
#. An **object** with a ``process`` method and optional ``open`` and ``close`` methods.
The object can have the following methods.
* ``open(partition_id, epoch_id)``: *Optional* method that initializes the processing
(for example, open a connection, start a transaction, etc). Additionally, you can
use the `partition_id` and `epoch_id` to deduplicate regenerated data
(discussed later).
* ``process(row)``: *Non-optional* method that processes each :class:`Row`.
* ``close(error)``: *Optional* method that finalizes and cleans up (for example,
close connection, commit transaction, etc.) after all rows have been processed.
The object will be used by Spark in the following way.
* A single copy of this object is responsible of all the data generated by a
single task in a query. In other words, one instance is responsible for
processing one partition of the data generated in a distributed manner.
* This object must be serializable because each task will get a fresh
serialized-deserialized copy of the provided object. Hence, it is strongly
recommended that any initialization for writing data (e.g. opening a
connection or starting a transaction) is done after the `open(...)`
method has been called, which signifies that the task is ready to generate data.
* The lifecycle of the methods are as follows.
For each partition with ``partition_id``:
... For each batch/epoch of streaming data with ``epoch_id``:
....... Method ``open(partitionId, epochId)`` is called.
....... If ``open(...)`` returns true, for each row in the partition and
batch/epoch, method ``process(row)`` is called.
....... Method ``close(errorOrNull)`` is called with error (if any) seen while
processing rows.
Important points to note:
* The `partitionId` and `epochId` can be used to deduplicate generated data when
failures cause reprocessing of some input data. This depends on the execution
mode of the query. If the streaming query is being executed in the micro-batch
mode, then every partition represented by a unique tuple (partition_id, epoch_id)
is guaranteed to have the same data. Hence, (partition_id, epoch_id) can be used
to deduplicate and/or transactionally commit data and achieve exactly-once
guarantees. However, if the streaming query is being executed in the continuous
mode, then this guarantee does not hold and therefore should not be used for
deduplication.
* The ``close()`` method (if exists) will be called if `open()` method exists and
returns successfully (irrespective of the return value), except if the Python
crashes in the middle.
.. note:: Evolving.
>>> # Print every row using a function
>>> def print_row(row):
... print(row)
...
>>> writer = sdf.writeStream.foreach(print_row)
>>> # Print every row using a object with process() method
>>> class RowPrinter:
... def open(self, partition_id, epoch_id):
... print("Opened %d, %d" % (partition_id, epoch_id))
... return True
... def process(self, row):
... print(row)
... def close(self, error):
... print("Closed with error: %s" % str(error))
...
>>> writer = sdf.writeStream.foreach(RowPrinter())
"""
from pyspark.rdd import _wrap_function
from pyspark.serializers import PickleSerializer, AutoBatchedSerializer
from pyspark.taskcontext import TaskContext
if callable(f):
# The provided object is a callable function that is supposed to be called on each row.
# Construct a function that takes an iterator and calls the provided function on each
# row.
def func_without_process(_, iterator):
for x in iterator:
f(x)
return iter([])
func = func_without_process
else:
# The provided object is not a callable function. Then it is expected to have a
# 'process(row)' method, and optional 'open(partition_id, epoch_id)' and
# 'close(error)' methods.
if not hasattr(f, 'process'):
raise Exception("Provided object does not have a 'process' method")
if not callable(getattr(f, 'process')):
raise Exception("Attribute 'process' in provided object is not callable")
def doesMethodExist(method_name):
exists = hasattr(f, method_name)
if exists and not callable(getattr(f, method_name)):
raise Exception(
"Attribute '%s' in provided object is not callable" % method_name)
return exists
open_exists = doesMethodExist('open')
close_exists = doesMethodExist('close')
def func_with_open_process_close(partition_id, iterator):
epoch_id = TaskContext.get().getLocalProperty('streaming.sql.batchId')
if epoch_id:
epoch_id = int(epoch_id)
else:
raise Exception("Could not get batch id from TaskContext")
# Check if the data should be processed
should_process = True
if open_exists:
should_process = f.open(partition_id, epoch_id)
error = None
try:
if should_process:
for x in iterator:
f.process(x)
except Exception as ex:
error = ex
finally:
if close_exists:
f.close(error)
if error:
raise error
return iter([])
func = func_with_open_process_close
serializer = AutoBatchedSerializer(PickleSerializer())
wrapped_func = _wrap_function(self._spark._sc, func, serializer, serializer)
jForeachWriter = \
self._spark._sc._jvm.org.apache.spark.sql.execution.python.PythonForeachWriter(
wrapped_func, self._df._jdf.schema())
self._jwrite.foreach(jForeachWriter)
return self
|
python
|
def foreach(self, f):
"""
Sets the output of the streaming query to be processed using the provided writer ``f``.
This is often used to write the output of a streaming query to arbitrary storage systems.
The processing logic can be specified in two ways.
#. A **function** that takes a row as input.
This is a simple way to express your processing logic. Note that this does
not allow you to deduplicate generated data when failures cause reprocessing of
some input data. That would require you to specify the processing logic in the next
way.
#. An **object** with a ``process`` method and optional ``open`` and ``close`` methods.
The object can have the following methods.
* ``open(partition_id, epoch_id)``: *Optional* method that initializes the processing
(for example, open a connection, start a transaction, etc). Additionally, you can
use the `partition_id` and `epoch_id` to deduplicate regenerated data
(discussed later).
* ``process(row)``: *Non-optional* method that processes each :class:`Row`.
* ``close(error)``: *Optional* method that finalizes and cleans up (for example,
close connection, commit transaction, etc.) after all rows have been processed.
The object will be used by Spark in the following way.
* A single copy of this object is responsible of all the data generated by a
single task in a query. In other words, one instance is responsible for
processing one partition of the data generated in a distributed manner.
* This object must be serializable because each task will get a fresh
serialized-deserialized copy of the provided object. Hence, it is strongly
recommended that any initialization for writing data (e.g. opening a
connection or starting a transaction) is done after the `open(...)`
method has been called, which signifies that the task is ready to generate data.
* The lifecycle of the methods are as follows.
For each partition with ``partition_id``:
... For each batch/epoch of streaming data with ``epoch_id``:
....... Method ``open(partitionId, epochId)`` is called.
....... If ``open(...)`` returns true, for each row in the partition and
batch/epoch, method ``process(row)`` is called.
....... Method ``close(errorOrNull)`` is called with error (if any) seen while
processing rows.
Important points to note:
* The `partitionId` and `epochId` can be used to deduplicate generated data when
failures cause reprocessing of some input data. This depends on the execution
mode of the query. If the streaming query is being executed in the micro-batch
mode, then every partition represented by a unique tuple (partition_id, epoch_id)
is guaranteed to have the same data. Hence, (partition_id, epoch_id) can be used
to deduplicate and/or transactionally commit data and achieve exactly-once
guarantees. However, if the streaming query is being executed in the continuous
mode, then this guarantee does not hold and therefore should not be used for
deduplication.
* The ``close()`` method (if exists) will be called if `open()` method exists and
returns successfully (irrespective of the return value), except if the Python
crashes in the middle.
.. note:: Evolving.
>>> # Print every row using a function
>>> def print_row(row):
... print(row)
...
>>> writer = sdf.writeStream.foreach(print_row)
>>> # Print every row using a object with process() method
>>> class RowPrinter:
... def open(self, partition_id, epoch_id):
... print("Opened %d, %d" % (partition_id, epoch_id))
... return True
... def process(self, row):
... print(row)
... def close(self, error):
... print("Closed with error: %s" % str(error))
...
>>> writer = sdf.writeStream.foreach(RowPrinter())
"""
from pyspark.rdd import _wrap_function
from pyspark.serializers import PickleSerializer, AutoBatchedSerializer
from pyspark.taskcontext import TaskContext
if callable(f):
# The provided object is a callable function that is supposed to be called on each row.
# Construct a function that takes an iterator and calls the provided function on each
# row.
def func_without_process(_, iterator):
for x in iterator:
f(x)
return iter([])
func = func_without_process
else:
# The provided object is not a callable function. Then it is expected to have a
# 'process(row)' method, and optional 'open(partition_id, epoch_id)' and
# 'close(error)' methods.
if not hasattr(f, 'process'):
raise Exception("Provided object does not have a 'process' method")
if not callable(getattr(f, 'process')):
raise Exception("Attribute 'process' in provided object is not callable")
def doesMethodExist(method_name):
exists = hasattr(f, method_name)
if exists and not callable(getattr(f, method_name)):
raise Exception(
"Attribute '%s' in provided object is not callable" % method_name)
return exists
open_exists = doesMethodExist('open')
close_exists = doesMethodExist('close')
def func_with_open_process_close(partition_id, iterator):
epoch_id = TaskContext.get().getLocalProperty('streaming.sql.batchId')
if epoch_id:
epoch_id = int(epoch_id)
else:
raise Exception("Could not get batch id from TaskContext")
# Check if the data should be processed
should_process = True
if open_exists:
should_process = f.open(partition_id, epoch_id)
error = None
try:
if should_process:
for x in iterator:
f.process(x)
except Exception as ex:
error = ex
finally:
if close_exists:
f.close(error)
if error:
raise error
return iter([])
func = func_with_open_process_close
serializer = AutoBatchedSerializer(PickleSerializer())
wrapped_func = _wrap_function(self._spark._sc, func, serializer, serializer)
jForeachWriter = \
self._spark._sc._jvm.org.apache.spark.sql.execution.python.PythonForeachWriter(
wrapped_func, self._df._jdf.schema())
self._jwrite.foreach(jForeachWriter)
return self
|
[
"def",
"foreach",
"(",
"self",
",",
"f",
")",
":",
"from",
"pyspark",
".",
"rdd",
"import",
"_wrap_function",
"from",
"pyspark",
".",
"serializers",
"import",
"PickleSerializer",
",",
"AutoBatchedSerializer",
"from",
"pyspark",
".",
"taskcontext",
"import",
"TaskContext",
"if",
"callable",
"(",
"f",
")",
":",
"# The provided object is a callable function that is supposed to be called on each row.",
"# Construct a function that takes an iterator and calls the provided function on each",
"# row.",
"def",
"func_without_process",
"(",
"_",
",",
"iterator",
")",
":",
"for",
"x",
"in",
"iterator",
":",
"f",
"(",
"x",
")",
"return",
"iter",
"(",
"[",
"]",
")",
"func",
"=",
"func_without_process",
"else",
":",
"# The provided object is not a callable function. Then it is expected to have a",
"# 'process(row)' method, and optional 'open(partition_id, epoch_id)' and",
"# 'close(error)' methods.",
"if",
"not",
"hasattr",
"(",
"f",
",",
"'process'",
")",
":",
"raise",
"Exception",
"(",
"\"Provided object does not have a 'process' method\"",
")",
"if",
"not",
"callable",
"(",
"getattr",
"(",
"f",
",",
"'process'",
")",
")",
":",
"raise",
"Exception",
"(",
"\"Attribute 'process' in provided object is not callable\"",
")",
"def",
"doesMethodExist",
"(",
"method_name",
")",
":",
"exists",
"=",
"hasattr",
"(",
"f",
",",
"method_name",
")",
"if",
"exists",
"and",
"not",
"callable",
"(",
"getattr",
"(",
"f",
",",
"method_name",
")",
")",
":",
"raise",
"Exception",
"(",
"\"Attribute '%s' in provided object is not callable\"",
"%",
"method_name",
")",
"return",
"exists",
"open_exists",
"=",
"doesMethodExist",
"(",
"'open'",
")",
"close_exists",
"=",
"doesMethodExist",
"(",
"'close'",
")",
"def",
"func_with_open_process_close",
"(",
"partition_id",
",",
"iterator",
")",
":",
"epoch_id",
"=",
"TaskContext",
".",
"get",
"(",
")",
".",
"getLocalProperty",
"(",
"'streaming.sql.batchId'",
")",
"if",
"epoch_id",
":",
"epoch_id",
"=",
"int",
"(",
"epoch_id",
")",
"else",
":",
"raise",
"Exception",
"(",
"\"Could not get batch id from TaskContext\"",
")",
"# Check if the data should be processed",
"should_process",
"=",
"True",
"if",
"open_exists",
":",
"should_process",
"=",
"f",
".",
"open",
"(",
"partition_id",
",",
"epoch_id",
")",
"error",
"=",
"None",
"try",
":",
"if",
"should_process",
":",
"for",
"x",
"in",
"iterator",
":",
"f",
".",
"process",
"(",
"x",
")",
"except",
"Exception",
"as",
"ex",
":",
"error",
"=",
"ex",
"finally",
":",
"if",
"close_exists",
":",
"f",
".",
"close",
"(",
"error",
")",
"if",
"error",
":",
"raise",
"error",
"return",
"iter",
"(",
"[",
"]",
")",
"func",
"=",
"func_with_open_process_close",
"serializer",
"=",
"AutoBatchedSerializer",
"(",
"PickleSerializer",
"(",
")",
")",
"wrapped_func",
"=",
"_wrap_function",
"(",
"self",
".",
"_spark",
".",
"_sc",
",",
"func",
",",
"serializer",
",",
"serializer",
")",
"jForeachWriter",
"=",
"self",
".",
"_spark",
".",
"_sc",
".",
"_jvm",
".",
"org",
".",
"apache",
".",
"spark",
".",
"sql",
".",
"execution",
".",
"python",
".",
"PythonForeachWriter",
"(",
"wrapped_func",
",",
"self",
".",
"_df",
".",
"_jdf",
".",
"schema",
"(",
")",
")",
"self",
".",
"_jwrite",
".",
"foreach",
"(",
"jForeachWriter",
")",
"return",
"self"
] |
Sets the output of the streaming query to be processed using the provided writer ``f``.
This is often used to write the output of a streaming query to arbitrary storage systems.
The processing logic can be specified in two ways.
#. A **function** that takes a row as input.
This is a simple way to express your processing logic. Note that this does
not allow you to deduplicate generated data when failures cause reprocessing of
some input data. That would require you to specify the processing logic in the next
way.
#. An **object** with a ``process`` method and optional ``open`` and ``close`` methods.
The object can have the following methods.
* ``open(partition_id, epoch_id)``: *Optional* method that initializes the processing
(for example, open a connection, start a transaction, etc). Additionally, you can
use the `partition_id` and `epoch_id` to deduplicate regenerated data
(discussed later).
* ``process(row)``: *Non-optional* method that processes each :class:`Row`.
* ``close(error)``: *Optional* method that finalizes and cleans up (for example,
close connection, commit transaction, etc.) after all rows have been processed.
The object will be used by Spark in the following way.
* A single copy of this object is responsible of all the data generated by a
single task in a query. In other words, one instance is responsible for
processing one partition of the data generated in a distributed manner.
* This object must be serializable because each task will get a fresh
serialized-deserialized copy of the provided object. Hence, it is strongly
recommended that any initialization for writing data (e.g. opening a
connection or starting a transaction) is done after the `open(...)`
method has been called, which signifies that the task is ready to generate data.
* The lifecycle of the methods are as follows.
For each partition with ``partition_id``:
... For each batch/epoch of streaming data with ``epoch_id``:
....... Method ``open(partitionId, epochId)`` is called.
....... If ``open(...)`` returns true, for each row in the partition and
batch/epoch, method ``process(row)`` is called.
....... Method ``close(errorOrNull)`` is called with error (if any) seen while
processing rows.
Important points to note:
* The `partitionId` and `epochId` can be used to deduplicate generated data when
failures cause reprocessing of some input data. This depends on the execution
mode of the query. If the streaming query is being executed in the micro-batch
mode, then every partition represented by a unique tuple (partition_id, epoch_id)
is guaranteed to have the same data. Hence, (partition_id, epoch_id) can be used
to deduplicate and/or transactionally commit data and achieve exactly-once
guarantees. However, if the streaming query is being executed in the continuous
mode, then this guarantee does not hold and therefore should not be used for
deduplication.
* The ``close()`` method (if exists) will be called if `open()` method exists and
returns successfully (irrespective of the return value), except if the Python
crashes in the middle.
.. note:: Evolving.
>>> # Print every row using a function
>>> def print_row(row):
... print(row)
...
>>> writer = sdf.writeStream.foreach(print_row)
>>> # Print every row using a object with process() method
>>> class RowPrinter:
... def open(self, partition_id, epoch_id):
... print("Opened %d, %d" % (partition_id, epoch_id))
... return True
... def process(self, row):
... print(row)
... def close(self, error):
... print("Closed with error: %s" % str(error))
...
>>> writer = sdf.writeStream.foreach(RowPrinter())
|
[
"Sets",
"the",
"output",
"of",
"the",
"streaming",
"query",
"to",
"be",
"processed",
"using",
"the",
"provided",
"writer",
"f",
".",
"This",
"is",
"often",
"used",
"to",
"write",
"the",
"output",
"of",
"a",
"streaming",
"query",
"to",
"arbitrary",
"storage",
"systems",
".",
"The",
"processing",
"logic",
"can",
"be",
"specified",
"in",
"two",
"ways",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/streaming.py#L881-L1040
|
19,322
|
apache/spark
|
python/pyspark/cloudpickle.py
|
dumps
|
def dumps(obj, protocol=None):
"""Serialize obj as a string of bytes allocated in memory
protocol defaults to cloudpickle.DEFAULT_PROTOCOL which is an alias to
pickle.HIGHEST_PROTOCOL. This setting favors maximum communication speed
between processes running the same Python version.
Set protocol=pickle.DEFAULT_PROTOCOL instead if you need to ensure
compatibility with older versions of Python.
"""
file = StringIO()
try:
cp = CloudPickler(file, protocol=protocol)
cp.dump(obj)
return file.getvalue()
finally:
file.close()
|
python
|
def dumps(obj, protocol=None):
"""Serialize obj as a string of bytes allocated in memory
protocol defaults to cloudpickle.DEFAULT_PROTOCOL which is an alias to
pickle.HIGHEST_PROTOCOL. This setting favors maximum communication speed
between processes running the same Python version.
Set protocol=pickle.DEFAULT_PROTOCOL instead if you need to ensure
compatibility with older versions of Python.
"""
file = StringIO()
try:
cp = CloudPickler(file, protocol=protocol)
cp.dump(obj)
return file.getvalue()
finally:
file.close()
|
[
"def",
"dumps",
"(",
"obj",
",",
"protocol",
"=",
"None",
")",
":",
"file",
"=",
"StringIO",
"(",
")",
"try",
":",
"cp",
"=",
"CloudPickler",
"(",
"file",
",",
"protocol",
"=",
"protocol",
")",
"cp",
".",
"dump",
"(",
"obj",
")",
"return",
"file",
".",
"getvalue",
"(",
")",
"finally",
":",
"file",
".",
"close",
"(",
")"
] |
Serialize obj as a string of bytes allocated in memory
protocol defaults to cloudpickle.DEFAULT_PROTOCOL which is an alias to
pickle.HIGHEST_PROTOCOL. This setting favors maximum communication speed
between processes running the same Python version.
Set protocol=pickle.DEFAULT_PROTOCOL instead if you need to ensure
compatibility with older versions of Python.
|
[
"Serialize",
"obj",
"as",
"a",
"string",
"of",
"bytes",
"allocated",
"in",
"memory"
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/cloudpickle.py#L939-L955
|
19,323
|
apache/spark
|
python/pyspark/cloudpickle.py
|
_fill_function
|
def _fill_function(*args):
"""Fills in the rest of function data into the skeleton function object
The skeleton itself is create by _make_skel_func().
"""
if len(args) == 2:
func = args[0]
state = args[1]
elif len(args) == 5:
# Backwards compat for cloudpickle v0.4.0, after which the `module`
# argument was introduced
func = args[0]
keys = ['globals', 'defaults', 'dict', 'closure_values']
state = dict(zip(keys, args[1:]))
elif len(args) == 6:
# Backwards compat for cloudpickle v0.4.1, after which the function
# state was passed as a dict to the _fill_function it-self.
func = args[0]
keys = ['globals', 'defaults', 'dict', 'module', 'closure_values']
state = dict(zip(keys, args[1:]))
else:
raise ValueError('Unexpected _fill_value arguments: %r' % (args,))
# - At pickling time, any dynamic global variable used by func is
# serialized by value (in state['globals']).
# - At unpickling time, func's __globals__ attribute is initialized by
# first retrieving an empty isolated namespace that will be shared
# with other functions pickled from the same original module
# by the same CloudPickler instance and then updated with the
# content of state['globals'] to populate the shared isolated
# namespace with all the global variables that are specifically
# referenced for this function.
func.__globals__.update(state['globals'])
func.__defaults__ = state['defaults']
func.__dict__ = state['dict']
if 'annotations' in state:
func.__annotations__ = state['annotations']
if 'doc' in state:
func.__doc__ = state['doc']
if 'name' in state:
func.__name__ = state['name']
if 'module' in state:
func.__module__ = state['module']
if 'qualname' in state:
func.__qualname__ = state['qualname']
cells = func.__closure__
if cells is not None:
for cell, value in zip(cells, state['closure_values']):
if value is not _empty_cell_value:
cell_set(cell, value)
return func
|
python
|
def _fill_function(*args):
"""Fills in the rest of function data into the skeleton function object
The skeleton itself is create by _make_skel_func().
"""
if len(args) == 2:
func = args[0]
state = args[1]
elif len(args) == 5:
# Backwards compat for cloudpickle v0.4.0, after which the `module`
# argument was introduced
func = args[0]
keys = ['globals', 'defaults', 'dict', 'closure_values']
state = dict(zip(keys, args[1:]))
elif len(args) == 6:
# Backwards compat for cloudpickle v0.4.1, after which the function
# state was passed as a dict to the _fill_function it-self.
func = args[0]
keys = ['globals', 'defaults', 'dict', 'module', 'closure_values']
state = dict(zip(keys, args[1:]))
else:
raise ValueError('Unexpected _fill_value arguments: %r' % (args,))
# - At pickling time, any dynamic global variable used by func is
# serialized by value (in state['globals']).
# - At unpickling time, func's __globals__ attribute is initialized by
# first retrieving an empty isolated namespace that will be shared
# with other functions pickled from the same original module
# by the same CloudPickler instance and then updated with the
# content of state['globals'] to populate the shared isolated
# namespace with all the global variables that are specifically
# referenced for this function.
func.__globals__.update(state['globals'])
func.__defaults__ = state['defaults']
func.__dict__ = state['dict']
if 'annotations' in state:
func.__annotations__ = state['annotations']
if 'doc' in state:
func.__doc__ = state['doc']
if 'name' in state:
func.__name__ = state['name']
if 'module' in state:
func.__module__ = state['module']
if 'qualname' in state:
func.__qualname__ = state['qualname']
cells = func.__closure__
if cells is not None:
for cell, value in zip(cells, state['closure_values']):
if value is not _empty_cell_value:
cell_set(cell, value)
return func
|
[
"def",
"_fill_function",
"(",
"*",
"args",
")",
":",
"if",
"len",
"(",
"args",
")",
"==",
"2",
":",
"func",
"=",
"args",
"[",
"0",
"]",
"state",
"=",
"args",
"[",
"1",
"]",
"elif",
"len",
"(",
"args",
")",
"==",
"5",
":",
"# Backwards compat for cloudpickle v0.4.0, after which the `module`",
"# argument was introduced",
"func",
"=",
"args",
"[",
"0",
"]",
"keys",
"=",
"[",
"'globals'",
",",
"'defaults'",
",",
"'dict'",
",",
"'closure_values'",
"]",
"state",
"=",
"dict",
"(",
"zip",
"(",
"keys",
",",
"args",
"[",
"1",
":",
"]",
")",
")",
"elif",
"len",
"(",
"args",
")",
"==",
"6",
":",
"# Backwards compat for cloudpickle v0.4.1, after which the function",
"# state was passed as a dict to the _fill_function it-self.",
"func",
"=",
"args",
"[",
"0",
"]",
"keys",
"=",
"[",
"'globals'",
",",
"'defaults'",
",",
"'dict'",
",",
"'module'",
",",
"'closure_values'",
"]",
"state",
"=",
"dict",
"(",
"zip",
"(",
"keys",
",",
"args",
"[",
"1",
":",
"]",
")",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Unexpected _fill_value arguments: %r'",
"%",
"(",
"args",
",",
")",
")",
"# - At pickling time, any dynamic global variable used by func is",
"# serialized by value (in state['globals']).",
"# - At unpickling time, func's __globals__ attribute is initialized by",
"# first retrieving an empty isolated namespace that will be shared",
"# with other functions pickled from the same original module",
"# by the same CloudPickler instance and then updated with the",
"# content of state['globals'] to populate the shared isolated",
"# namespace with all the global variables that are specifically",
"# referenced for this function.",
"func",
".",
"__globals__",
".",
"update",
"(",
"state",
"[",
"'globals'",
"]",
")",
"func",
".",
"__defaults__",
"=",
"state",
"[",
"'defaults'",
"]",
"func",
".",
"__dict__",
"=",
"state",
"[",
"'dict'",
"]",
"if",
"'annotations'",
"in",
"state",
":",
"func",
".",
"__annotations__",
"=",
"state",
"[",
"'annotations'",
"]",
"if",
"'doc'",
"in",
"state",
":",
"func",
".",
"__doc__",
"=",
"state",
"[",
"'doc'",
"]",
"if",
"'name'",
"in",
"state",
":",
"func",
".",
"__name__",
"=",
"state",
"[",
"'name'",
"]",
"if",
"'module'",
"in",
"state",
":",
"func",
".",
"__module__",
"=",
"state",
"[",
"'module'",
"]",
"if",
"'qualname'",
"in",
"state",
":",
"func",
".",
"__qualname__",
"=",
"state",
"[",
"'qualname'",
"]",
"cells",
"=",
"func",
".",
"__closure__",
"if",
"cells",
"is",
"not",
"None",
":",
"for",
"cell",
",",
"value",
"in",
"zip",
"(",
"cells",
",",
"state",
"[",
"'closure_values'",
"]",
")",
":",
"if",
"value",
"is",
"not",
"_empty_cell_value",
":",
"cell_set",
"(",
"cell",
",",
"value",
")",
"return",
"func"
] |
Fills in the rest of function data into the skeleton function object
The skeleton itself is create by _make_skel_func().
|
[
"Fills",
"in",
"the",
"rest",
"of",
"function",
"data",
"into",
"the",
"skeleton",
"function",
"object"
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/cloudpickle.py#L1060-L1113
|
19,324
|
apache/spark
|
python/pyspark/cloudpickle.py
|
_is_dynamic
|
def _is_dynamic(module):
"""
Return True if the module is special module that cannot be imported by its
name.
"""
# Quick check: module that have __file__ attribute are not dynamic modules.
if hasattr(module, '__file__'):
return False
if hasattr(module, '__spec__'):
return module.__spec__ is None
else:
# Backward compat for Python 2
import imp
try:
path = None
for part in module.__name__.split('.'):
if path is not None:
path = [path]
f, path, description = imp.find_module(part, path)
if f is not None:
f.close()
except ImportError:
return True
return False
|
python
|
def _is_dynamic(module):
"""
Return True if the module is special module that cannot be imported by its
name.
"""
# Quick check: module that have __file__ attribute are not dynamic modules.
if hasattr(module, '__file__'):
return False
if hasattr(module, '__spec__'):
return module.__spec__ is None
else:
# Backward compat for Python 2
import imp
try:
path = None
for part in module.__name__.split('.'):
if path is not None:
path = [path]
f, path, description = imp.find_module(part, path)
if f is not None:
f.close()
except ImportError:
return True
return False
|
[
"def",
"_is_dynamic",
"(",
"module",
")",
":",
"# Quick check: module that have __file__ attribute are not dynamic modules.",
"if",
"hasattr",
"(",
"module",
",",
"'__file__'",
")",
":",
"return",
"False",
"if",
"hasattr",
"(",
"module",
",",
"'__spec__'",
")",
":",
"return",
"module",
".",
"__spec__",
"is",
"None",
"else",
":",
"# Backward compat for Python 2",
"import",
"imp",
"try",
":",
"path",
"=",
"None",
"for",
"part",
"in",
"module",
".",
"__name__",
".",
"split",
"(",
"'.'",
")",
":",
"if",
"path",
"is",
"not",
"None",
":",
"path",
"=",
"[",
"path",
"]",
"f",
",",
"path",
",",
"description",
"=",
"imp",
".",
"find_module",
"(",
"part",
",",
"path",
")",
"if",
"f",
"is",
"not",
"None",
":",
"f",
".",
"close",
"(",
")",
"except",
"ImportError",
":",
"return",
"True",
"return",
"False"
] |
Return True if the module is special module that cannot be imported by its
name.
|
[
"Return",
"True",
"if",
"the",
"module",
"is",
"special",
"module",
"that",
"cannot",
"be",
"imported",
"by",
"its",
"name",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/cloudpickle.py#L1164-L1188
|
19,325
|
apache/spark
|
python/pyspark/cloudpickle.py
|
CloudPickler.save_function
|
def save_function(self, obj, name=None):
""" Registered with the dispatch to handle all function types.
Determines what kind of function obj is (e.g. lambda, defined at
interactive prompt, etc) and handles the pickling appropriately.
"""
try:
should_special_case = obj in _BUILTIN_TYPE_CONSTRUCTORS
except TypeError:
# Methods of builtin types aren't hashable in python 2.
should_special_case = False
if should_special_case:
# We keep a special-cased cache of built-in type constructors at
# global scope, because these functions are structured very
# differently in different python versions and implementations (for
# example, they're instances of types.BuiltinFunctionType in
# CPython, but they're ordinary types.FunctionType instances in
# PyPy).
#
# If the function we've received is in that cache, we just
# serialize it as a lookup into the cache.
return self.save_reduce(_BUILTIN_TYPE_CONSTRUCTORS[obj], (), obj=obj)
write = self.write
if name is None:
name = obj.__name__
try:
# whichmodule() could fail, see
# https://bitbucket.org/gutworth/six/issues/63/importing-six-breaks-pickling
modname = pickle.whichmodule(obj, name)
except Exception:
modname = None
# print('which gives %s %s %s' % (modname, obj, name))
try:
themodule = sys.modules[modname]
except KeyError:
# eval'd items such as namedtuple give invalid items for their function __module__
modname = '__main__'
if modname == '__main__':
themodule = None
try:
lookedup_by_name = getattr(themodule, name, None)
except Exception:
lookedup_by_name = None
if themodule:
if lookedup_by_name is obj:
return self.save_global(obj, name)
# a builtin_function_or_method which comes in as an attribute of some
# object (e.g., itertools.chain.from_iterable) will end
# up with modname "__main__" and so end up here. But these functions
# have no __code__ attribute in CPython, so the handling for
# user-defined functions below will fail.
# So we pickle them here using save_reduce; have to do it differently
# for different python versions.
if not hasattr(obj, '__code__'):
if PY3: # pragma: no branch
rv = obj.__reduce_ex__(self.proto)
else:
if hasattr(obj, '__self__'):
rv = (getattr, (obj.__self__, name))
else:
raise pickle.PicklingError("Can't pickle %r" % obj)
return self.save_reduce(obj=obj, *rv)
# if func is lambda, def'ed at prompt, is in main, or is nested, then
# we'll pickle the actual function object rather than simply saving a
# reference (as is done in default pickler), via save_function_tuple.
if (islambda(obj)
or getattr(obj.__code__, 'co_filename', None) == '<stdin>'
or themodule is None):
self.save_function_tuple(obj)
return
else:
# func is nested
if lookedup_by_name is None or lookedup_by_name is not obj:
self.save_function_tuple(obj)
return
if obj.__dict__:
# essentially save_reduce, but workaround needed to avoid recursion
self.save(_restore_attr)
write(pickle.MARK + pickle.GLOBAL + modname + '\n' + name + '\n')
self.memoize(obj)
self.save(obj.__dict__)
write(pickle.TUPLE + pickle.REDUCE)
else:
write(pickle.GLOBAL + modname + '\n' + name + '\n')
self.memoize(obj)
|
python
|
def save_function(self, obj, name=None):
""" Registered with the dispatch to handle all function types.
Determines what kind of function obj is (e.g. lambda, defined at
interactive prompt, etc) and handles the pickling appropriately.
"""
try:
should_special_case = obj in _BUILTIN_TYPE_CONSTRUCTORS
except TypeError:
# Methods of builtin types aren't hashable in python 2.
should_special_case = False
if should_special_case:
# We keep a special-cased cache of built-in type constructors at
# global scope, because these functions are structured very
# differently in different python versions and implementations (for
# example, they're instances of types.BuiltinFunctionType in
# CPython, but they're ordinary types.FunctionType instances in
# PyPy).
#
# If the function we've received is in that cache, we just
# serialize it as a lookup into the cache.
return self.save_reduce(_BUILTIN_TYPE_CONSTRUCTORS[obj], (), obj=obj)
write = self.write
if name is None:
name = obj.__name__
try:
# whichmodule() could fail, see
# https://bitbucket.org/gutworth/six/issues/63/importing-six-breaks-pickling
modname = pickle.whichmodule(obj, name)
except Exception:
modname = None
# print('which gives %s %s %s' % (modname, obj, name))
try:
themodule = sys.modules[modname]
except KeyError:
# eval'd items such as namedtuple give invalid items for their function __module__
modname = '__main__'
if modname == '__main__':
themodule = None
try:
lookedup_by_name = getattr(themodule, name, None)
except Exception:
lookedup_by_name = None
if themodule:
if lookedup_by_name is obj:
return self.save_global(obj, name)
# a builtin_function_or_method which comes in as an attribute of some
# object (e.g., itertools.chain.from_iterable) will end
# up with modname "__main__" and so end up here. But these functions
# have no __code__ attribute in CPython, so the handling for
# user-defined functions below will fail.
# So we pickle them here using save_reduce; have to do it differently
# for different python versions.
if not hasattr(obj, '__code__'):
if PY3: # pragma: no branch
rv = obj.__reduce_ex__(self.proto)
else:
if hasattr(obj, '__self__'):
rv = (getattr, (obj.__self__, name))
else:
raise pickle.PicklingError("Can't pickle %r" % obj)
return self.save_reduce(obj=obj, *rv)
# if func is lambda, def'ed at prompt, is in main, or is nested, then
# we'll pickle the actual function object rather than simply saving a
# reference (as is done in default pickler), via save_function_tuple.
if (islambda(obj)
or getattr(obj.__code__, 'co_filename', None) == '<stdin>'
or themodule is None):
self.save_function_tuple(obj)
return
else:
# func is nested
if lookedup_by_name is None or lookedup_by_name is not obj:
self.save_function_tuple(obj)
return
if obj.__dict__:
# essentially save_reduce, but workaround needed to avoid recursion
self.save(_restore_attr)
write(pickle.MARK + pickle.GLOBAL + modname + '\n' + name + '\n')
self.memoize(obj)
self.save(obj.__dict__)
write(pickle.TUPLE + pickle.REDUCE)
else:
write(pickle.GLOBAL + modname + '\n' + name + '\n')
self.memoize(obj)
|
[
"def",
"save_function",
"(",
"self",
",",
"obj",
",",
"name",
"=",
"None",
")",
":",
"try",
":",
"should_special_case",
"=",
"obj",
"in",
"_BUILTIN_TYPE_CONSTRUCTORS",
"except",
"TypeError",
":",
"# Methods of builtin types aren't hashable in python 2.",
"should_special_case",
"=",
"False",
"if",
"should_special_case",
":",
"# We keep a special-cased cache of built-in type constructors at",
"# global scope, because these functions are structured very",
"# differently in different python versions and implementations (for",
"# example, they're instances of types.BuiltinFunctionType in",
"# CPython, but they're ordinary types.FunctionType instances in",
"# PyPy).",
"#",
"# If the function we've received is in that cache, we just",
"# serialize it as a lookup into the cache.",
"return",
"self",
".",
"save_reduce",
"(",
"_BUILTIN_TYPE_CONSTRUCTORS",
"[",
"obj",
"]",
",",
"(",
")",
",",
"obj",
"=",
"obj",
")",
"write",
"=",
"self",
".",
"write",
"if",
"name",
"is",
"None",
":",
"name",
"=",
"obj",
".",
"__name__",
"try",
":",
"# whichmodule() could fail, see",
"# https://bitbucket.org/gutworth/six/issues/63/importing-six-breaks-pickling",
"modname",
"=",
"pickle",
".",
"whichmodule",
"(",
"obj",
",",
"name",
")",
"except",
"Exception",
":",
"modname",
"=",
"None",
"# print('which gives %s %s %s' % (modname, obj, name))",
"try",
":",
"themodule",
"=",
"sys",
".",
"modules",
"[",
"modname",
"]",
"except",
"KeyError",
":",
"# eval'd items such as namedtuple give invalid items for their function __module__",
"modname",
"=",
"'__main__'",
"if",
"modname",
"==",
"'__main__'",
":",
"themodule",
"=",
"None",
"try",
":",
"lookedup_by_name",
"=",
"getattr",
"(",
"themodule",
",",
"name",
",",
"None",
")",
"except",
"Exception",
":",
"lookedup_by_name",
"=",
"None",
"if",
"themodule",
":",
"if",
"lookedup_by_name",
"is",
"obj",
":",
"return",
"self",
".",
"save_global",
"(",
"obj",
",",
"name",
")",
"# a builtin_function_or_method which comes in as an attribute of some",
"# object (e.g., itertools.chain.from_iterable) will end",
"# up with modname \"__main__\" and so end up here. But these functions",
"# have no __code__ attribute in CPython, so the handling for",
"# user-defined functions below will fail.",
"# So we pickle them here using save_reduce; have to do it differently",
"# for different python versions.",
"if",
"not",
"hasattr",
"(",
"obj",
",",
"'__code__'",
")",
":",
"if",
"PY3",
":",
"# pragma: no branch",
"rv",
"=",
"obj",
".",
"__reduce_ex__",
"(",
"self",
".",
"proto",
")",
"else",
":",
"if",
"hasattr",
"(",
"obj",
",",
"'__self__'",
")",
":",
"rv",
"=",
"(",
"getattr",
",",
"(",
"obj",
".",
"__self__",
",",
"name",
")",
")",
"else",
":",
"raise",
"pickle",
".",
"PicklingError",
"(",
"\"Can't pickle %r\"",
"%",
"obj",
")",
"return",
"self",
".",
"save_reduce",
"(",
"obj",
"=",
"obj",
",",
"*",
"rv",
")",
"# if func is lambda, def'ed at prompt, is in main, or is nested, then",
"# we'll pickle the actual function object rather than simply saving a",
"# reference (as is done in default pickler), via save_function_tuple.",
"if",
"(",
"islambda",
"(",
"obj",
")",
"or",
"getattr",
"(",
"obj",
".",
"__code__",
",",
"'co_filename'",
",",
"None",
")",
"==",
"'<stdin>'",
"or",
"themodule",
"is",
"None",
")",
":",
"self",
".",
"save_function_tuple",
"(",
"obj",
")",
"return",
"else",
":",
"# func is nested",
"if",
"lookedup_by_name",
"is",
"None",
"or",
"lookedup_by_name",
"is",
"not",
"obj",
":",
"self",
".",
"save_function_tuple",
"(",
"obj",
")",
"return",
"if",
"obj",
".",
"__dict__",
":",
"# essentially save_reduce, but workaround needed to avoid recursion",
"self",
".",
"save",
"(",
"_restore_attr",
")",
"write",
"(",
"pickle",
".",
"MARK",
"+",
"pickle",
".",
"GLOBAL",
"+",
"modname",
"+",
"'\\n'",
"+",
"name",
"+",
"'\\n'",
")",
"self",
".",
"memoize",
"(",
"obj",
")",
"self",
".",
"save",
"(",
"obj",
".",
"__dict__",
")",
"write",
"(",
"pickle",
".",
"TUPLE",
"+",
"pickle",
".",
"REDUCE",
")",
"else",
":",
"write",
"(",
"pickle",
".",
"GLOBAL",
"+",
"modname",
"+",
"'\\n'",
"+",
"name",
"+",
"'\\n'",
")",
"self",
".",
"memoize",
"(",
"obj",
")"
] |
Registered with the dispatch to handle all function types.
Determines what kind of function obj is (e.g. lambda, defined at
interactive prompt, etc) and handles the pickling appropriately.
|
[
"Registered",
"with",
"the",
"dispatch",
"to",
"handle",
"all",
"function",
"types",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/cloudpickle.py#L319-L412
|
19,326
|
apache/spark
|
python/pyspark/cloudpickle.py
|
CloudPickler.save_inst
|
def save_inst(self, obj):
"""Inner logic to save instance. Based off pickle.save_inst"""
cls = obj.__class__
# Try the dispatch table (pickle module doesn't do it)
f = self.dispatch.get(cls)
if f:
f(self, obj) # Call unbound method with explicit self
return
memo = self.memo
write = self.write
save = self.save
if hasattr(obj, '__getinitargs__'):
args = obj.__getinitargs__()
len(args) # XXX Assert it's a sequence
pickle._keep_alive(args, memo)
else:
args = ()
write(pickle.MARK)
if self.bin:
save(cls)
for arg in args:
save(arg)
write(pickle.OBJ)
else:
for arg in args:
save(arg)
write(pickle.INST + cls.__module__ + '\n' + cls.__name__ + '\n')
self.memoize(obj)
try:
getstate = obj.__getstate__
except AttributeError:
stuff = obj.__dict__
else:
stuff = getstate()
pickle._keep_alive(stuff, memo)
save(stuff)
write(pickle.BUILD)
|
python
|
def save_inst(self, obj):
"""Inner logic to save instance. Based off pickle.save_inst"""
cls = obj.__class__
# Try the dispatch table (pickle module doesn't do it)
f = self.dispatch.get(cls)
if f:
f(self, obj) # Call unbound method with explicit self
return
memo = self.memo
write = self.write
save = self.save
if hasattr(obj, '__getinitargs__'):
args = obj.__getinitargs__()
len(args) # XXX Assert it's a sequence
pickle._keep_alive(args, memo)
else:
args = ()
write(pickle.MARK)
if self.bin:
save(cls)
for arg in args:
save(arg)
write(pickle.OBJ)
else:
for arg in args:
save(arg)
write(pickle.INST + cls.__module__ + '\n' + cls.__name__ + '\n')
self.memoize(obj)
try:
getstate = obj.__getstate__
except AttributeError:
stuff = obj.__dict__
else:
stuff = getstate()
pickle._keep_alive(stuff, memo)
save(stuff)
write(pickle.BUILD)
|
[
"def",
"save_inst",
"(",
"self",
",",
"obj",
")",
":",
"cls",
"=",
"obj",
".",
"__class__",
"# Try the dispatch table (pickle module doesn't do it)",
"f",
"=",
"self",
".",
"dispatch",
".",
"get",
"(",
"cls",
")",
"if",
"f",
":",
"f",
"(",
"self",
",",
"obj",
")",
"# Call unbound method with explicit self",
"return",
"memo",
"=",
"self",
".",
"memo",
"write",
"=",
"self",
".",
"write",
"save",
"=",
"self",
".",
"save",
"if",
"hasattr",
"(",
"obj",
",",
"'__getinitargs__'",
")",
":",
"args",
"=",
"obj",
".",
"__getinitargs__",
"(",
")",
"len",
"(",
"args",
")",
"# XXX Assert it's a sequence",
"pickle",
".",
"_keep_alive",
"(",
"args",
",",
"memo",
")",
"else",
":",
"args",
"=",
"(",
")",
"write",
"(",
"pickle",
".",
"MARK",
")",
"if",
"self",
".",
"bin",
":",
"save",
"(",
"cls",
")",
"for",
"arg",
"in",
"args",
":",
"save",
"(",
"arg",
")",
"write",
"(",
"pickle",
".",
"OBJ",
")",
"else",
":",
"for",
"arg",
"in",
"args",
":",
"save",
"(",
"arg",
")",
"write",
"(",
"pickle",
".",
"INST",
"+",
"cls",
".",
"__module__",
"+",
"'\\n'",
"+",
"cls",
".",
"__name__",
"+",
"'\\n'",
")",
"self",
".",
"memoize",
"(",
"obj",
")",
"try",
":",
"getstate",
"=",
"obj",
".",
"__getstate__",
"except",
"AttributeError",
":",
"stuff",
"=",
"obj",
".",
"__dict__",
"else",
":",
"stuff",
"=",
"getstate",
"(",
")",
"pickle",
".",
"_keep_alive",
"(",
"stuff",
",",
"memo",
")",
"save",
"(",
"stuff",
")",
"write",
"(",
"pickle",
".",
"BUILD",
")"
] |
Inner logic to save instance. Based off pickle.save_inst
|
[
"Inner",
"logic",
"to",
"save",
"instance",
".",
"Based",
"off",
"pickle",
".",
"save_inst"
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/cloudpickle.py#L725-L768
|
19,327
|
apache/spark
|
python/pyspark/ml/param/__init__.py
|
Param._copy_new_parent
|
def _copy_new_parent(self, parent):
"""Copy the current param to a new parent, must be a dummy param."""
if self.parent == "undefined":
param = copy.copy(self)
param.parent = parent.uid
return param
else:
raise ValueError("Cannot copy from non-dummy parent %s." % parent)
|
python
|
def _copy_new_parent(self, parent):
"""Copy the current param to a new parent, must be a dummy param."""
if self.parent == "undefined":
param = copy.copy(self)
param.parent = parent.uid
return param
else:
raise ValueError("Cannot copy from non-dummy parent %s." % parent)
|
[
"def",
"_copy_new_parent",
"(",
"self",
",",
"parent",
")",
":",
"if",
"self",
".",
"parent",
"==",
"\"undefined\"",
":",
"param",
"=",
"copy",
".",
"copy",
"(",
"self",
")",
"param",
".",
"parent",
"=",
"parent",
".",
"uid",
"return",
"param",
"else",
":",
"raise",
"ValueError",
"(",
"\"Cannot copy from non-dummy parent %s.\"",
"%",
"parent",
")"
] |
Copy the current param to a new parent, must be a dummy param.
|
[
"Copy",
"the",
"current",
"param",
"to",
"a",
"new",
"parent",
"must",
"be",
"a",
"dummy",
"param",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/param/__init__.py#L52-L59
|
19,328
|
apache/spark
|
python/pyspark/ml/param/__init__.py
|
TypeConverters.toList
|
def toList(value):
"""
Convert a value to a list, if possible.
"""
if type(value) == list:
return value
elif type(value) in [np.ndarray, tuple, xrange, array.array]:
return list(value)
elif isinstance(value, Vector):
return list(value.toArray())
else:
raise TypeError("Could not convert %s to list" % value)
|
python
|
def toList(value):
"""
Convert a value to a list, if possible.
"""
if type(value) == list:
return value
elif type(value) in [np.ndarray, tuple, xrange, array.array]:
return list(value)
elif isinstance(value, Vector):
return list(value.toArray())
else:
raise TypeError("Could not convert %s to list" % value)
|
[
"def",
"toList",
"(",
"value",
")",
":",
"if",
"type",
"(",
"value",
")",
"==",
"list",
":",
"return",
"value",
"elif",
"type",
"(",
"value",
")",
"in",
"[",
"np",
".",
"ndarray",
",",
"tuple",
",",
"xrange",
",",
"array",
".",
"array",
"]",
":",
"return",
"list",
"(",
"value",
")",
"elif",
"isinstance",
"(",
"value",
",",
"Vector",
")",
":",
"return",
"list",
"(",
"value",
".",
"toArray",
"(",
")",
")",
"else",
":",
"raise",
"TypeError",
"(",
"\"Could not convert %s to list\"",
"%",
"value",
")"
] |
Convert a value to a list, if possible.
|
[
"Convert",
"a",
"value",
"to",
"a",
"list",
"if",
"possible",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/param/__init__.py#L113-L124
|
19,329
|
apache/spark
|
python/pyspark/ml/param/__init__.py
|
TypeConverters.toListFloat
|
def toListFloat(value):
"""
Convert a value to list of floats, if possible.
"""
if TypeConverters._can_convert_to_list(value):
value = TypeConverters.toList(value)
if all(map(lambda v: TypeConverters._is_numeric(v), value)):
return [float(v) for v in value]
raise TypeError("Could not convert %s to list of floats" % value)
|
python
|
def toListFloat(value):
"""
Convert a value to list of floats, if possible.
"""
if TypeConverters._can_convert_to_list(value):
value = TypeConverters.toList(value)
if all(map(lambda v: TypeConverters._is_numeric(v), value)):
return [float(v) for v in value]
raise TypeError("Could not convert %s to list of floats" % value)
|
[
"def",
"toListFloat",
"(",
"value",
")",
":",
"if",
"TypeConverters",
".",
"_can_convert_to_list",
"(",
"value",
")",
":",
"value",
"=",
"TypeConverters",
".",
"toList",
"(",
"value",
")",
"if",
"all",
"(",
"map",
"(",
"lambda",
"v",
":",
"TypeConverters",
".",
"_is_numeric",
"(",
"v",
")",
",",
"value",
")",
")",
":",
"return",
"[",
"float",
"(",
"v",
")",
"for",
"v",
"in",
"value",
"]",
"raise",
"TypeError",
"(",
"\"Could not convert %s to list of floats\"",
"%",
"value",
")"
] |
Convert a value to list of floats, if possible.
|
[
"Convert",
"a",
"value",
"to",
"list",
"of",
"floats",
"if",
"possible",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/param/__init__.py#L127-L135
|
19,330
|
apache/spark
|
python/pyspark/ml/param/__init__.py
|
TypeConverters.toListInt
|
def toListInt(value):
"""
Convert a value to list of ints, if possible.
"""
if TypeConverters._can_convert_to_list(value):
value = TypeConverters.toList(value)
if all(map(lambda v: TypeConverters._is_integer(v), value)):
return [int(v) for v in value]
raise TypeError("Could not convert %s to list of ints" % value)
|
python
|
def toListInt(value):
"""
Convert a value to list of ints, if possible.
"""
if TypeConverters._can_convert_to_list(value):
value = TypeConverters.toList(value)
if all(map(lambda v: TypeConverters._is_integer(v), value)):
return [int(v) for v in value]
raise TypeError("Could not convert %s to list of ints" % value)
|
[
"def",
"toListInt",
"(",
"value",
")",
":",
"if",
"TypeConverters",
".",
"_can_convert_to_list",
"(",
"value",
")",
":",
"value",
"=",
"TypeConverters",
".",
"toList",
"(",
"value",
")",
"if",
"all",
"(",
"map",
"(",
"lambda",
"v",
":",
"TypeConverters",
".",
"_is_integer",
"(",
"v",
")",
",",
"value",
")",
")",
":",
"return",
"[",
"int",
"(",
"v",
")",
"for",
"v",
"in",
"value",
"]",
"raise",
"TypeError",
"(",
"\"Could not convert %s to list of ints\"",
"%",
"value",
")"
] |
Convert a value to list of ints, if possible.
|
[
"Convert",
"a",
"value",
"to",
"list",
"of",
"ints",
"if",
"possible",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/param/__init__.py#L138-L146
|
19,331
|
apache/spark
|
python/pyspark/ml/param/__init__.py
|
TypeConverters.toListString
|
def toListString(value):
"""
Convert a value to list of strings, if possible.
"""
if TypeConverters._can_convert_to_list(value):
value = TypeConverters.toList(value)
if all(map(lambda v: TypeConverters._can_convert_to_string(v), value)):
return [TypeConverters.toString(v) for v in value]
raise TypeError("Could not convert %s to list of strings" % value)
|
python
|
def toListString(value):
"""
Convert a value to list of strings, if possible.
"""
if TypeConverters._can_convert_to_list(value):
value = TypeConverters.toList(value)
if all(map(lambda v: TypeConverters._can_convert_to_string(v), value)):
return [TypeConverters.toString(v) for v in value]
raise TypeError("Could not convert %s to list of strings" % value)
|
[
"def",
"toListString",
"(",
"value",
")",
":",
"if",
"TypeConverters",
".",
"_can_convert_to_list",
"(",
"value",
")",
":",
"value",
"=",
"TypeConverters",
".",
"toList",
"(",
"value",
")",
"if",
"all",
"(",
"map",
"(",
"lambda",
"v",
":",
"TypeConverters",
".",
"_can_convert_to_string",
"(",
"v",
")",
",",
"value",
")",
")",
":",
"return",
"[",
"TypeConverters",
".",
"toString",
"(",
"v",
")",
"for",
"v",
"in",
"value",
"]",
"raise",
"TypeError",
"(",
"\"Could not convert %s to list of strings\"",
"%",
"value",
")"
] |
Convert a value to list of strings, if possible.
|
[
"Convert",
"a",
"value",
"to",
"list",
"of",
"strings",
"if",
"possible",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/param/__init__.py#L149-L157
|
19,332
|
apache/spark
|
python/pyspark/ml/param/__init__.py
|
TypeConverters.toVector
|
def toVector(value):
"""
Convert a value to a MLlib Vector, if possible.
"""
if isinstance(value, Vector):
return value
elif TypeConverters._can_convert_to_list(value):
value = TypeConverters.toList(value)
if all(map(lambda v: TypeConverters._is_numeric(v), value)):
return DenseVector(value)
raise TypeError("Could not convert %s to vector" % value)
|
python
|
def toVector(value):
"""
Convert a value to a MLlib Vector, if possible.
"""
if isinstance(value, Vector):
return value
elif TypeConverters._can_convert_to_list(value):
value = TypeConverters.toList(value)
if all(map(lambda v: TypeConverters._is_numeric(v), value)):
return DenseVector(value)
raise TypeError("Could not convert %s to vector" % value)
|
[
"def",
"toVector",
"(",
"value",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"Vector",
")",
":",
"return",
"value",
"elif",
"TypeConverters",
".",
"_can_convert_to_list",
"(",
"value",
")",
":",
"value",
"=",
"TypeConverters",
".",
"toList",
"(",
"value",
")",
"if",
"all",
"(",
"map",
"(",
"lambda",
"v",
":",
"TypeConverters",
".",
"_is_numeric",
"(",
"v",
")",
",",
"value",
")",
")",
":",
"return",
"DenseVector",
"(",
"value",
")",
"raise",
"TypeError",
"(",
"\"Could not convert %s to vector\"",
"%",
"value",
")"
] |
Convert a value to a MLlib Vector, if possible.
|
[
"Convert",
"a",
"value",
"to",
"a",
"MLlib",
"Vector",
"if",
"possible",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/param/__init__.py#L160-L170
|
19,333
|
apache/spark
|
python/pyspark/ml/param/__init__.py
|
TypeConverters.toString
|
def toString(value):
"""
Convert a value to a string, if possible.
"""
if isinstance(value, basestring):
return value
elif type(value) in [np.string_, np.str_]:
return str(value)
elif type(value) == np.unicode_:
return unicode(value)
else:
raise TypeError("Could not convert %s to string type" % type(value))
|
python
|
def toString(value):
"""
Convert a value to a string, if possible.
"""
if isinstance(value, basestring):
return value
elif type(value) in [np.string_, np.str_]:
return str(value)
elif type(value) == np.unicode_:
return unicode(value)
else:
raise TypeError("Could not convert %s to string type" % type(value))
|
[
"def",
"toString",
"(",
"value",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"basestring",
")",
":",
"return",
"value",
"elif",
"type",
"(",
"value",
")",
"in",
"[",
"np",
".",
"string_",
",",
"np",
".",
"str_",
"]",
":",
"return",
"str",
"(",
"value",
")",
"elif",
"type",
"(",
"value",
")",
"==",
"np",
".",
"unicode_",
":",
"return",
"unicode",
"(",
"value",
")",
"else",
":",
"raise",
"TypeError",
"(",
"\"Could not convert %s to string type\"",
"%",
"type",
"(",
"value",
")",
")"
] |
Convert a value to a string, if possible.
|
[
"Convert",
"a",
"value",
"to",
"a",
"string",
"if",
"possible",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/param/__init__.py#L202-L213
|
19,334
|
apache/spark
|
python/pyspark/ml/param/__init__.py
|
Params._copy_params
|
def _copy_params(self):
"""
Copy all params defined on the class to current object.
"""
cls = type(self)
src_name_attrs = [(x, getattr(cls, x)) for x in dir(cls)]
src_params = list(filter(lambda nameAttr: isinstance(nameAttr[1], Param), src_name_attrs))
for name, param in src_params:
setattr(self, name, param._copy_new_parent(self))
|
python
|
def _copy_params(self):
"""
Copy all params defined on the class to current object.
"""
cls = type(self)
src_name_attrs = [(x, getattr(cls, x)) for x in dir(cls)]
src_params = list(filter(lambda nameAttr: isinstance(nameAttr[1], Param), src_name_attrs))
for name, param in src_params:
setattr(self, name, param._copy_new_parent(self))
|
[
"def",
"_copy_params",
"(",
"self",
")",
":",
"cls",
"=",
"type",
"(",
"self",
")",
"src_name_attrs",
"=",
"[",
"(",
"x",
",",
"getattr",
"(",
"cls",
",",
"x",
")",
")",
"for",
"x",
"in",
"dir",
"(",
"cls",
")",
"]",
"src_params",
"=",
"list",
"(",
"filter",
"(",
"lambda",
"nameAttr",
":",
"isinstance",
"(",
"nameAttr",
"[",
"1",
"]",
",",
"Param",
")",
",",
"src_name_attrs",
")",
")",
"for",
"name",
",",
"param",
"in",
"src_params",
":",
"setattr",
"(",
"self",
",",
"name",
",",
"param",
".",
"_copy_new_parent",
"(",
"self",
")",
")"
] |
Copy all params defined on the class to current object.
|
[
"Copy",
"all",
"params",
"defined",
"on",
"the",
"class",
"to",
"current",
"object",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/param/__init__.py#L250-L258
|
19,335
|
apache/spark
|
python/pyspark/ml/param/__init__.py
|
Params.explainParam
|
def explainParam(self, param):
"""
Explains a single param and returns its name, doc, and optional
default value and user-supplied value in a string.
"""
param = self._resolveParam(param)
values = []
if self.isDefined(param):
if param in self._defaultParamMap:
values.append("default: %s" % self._defaultParamMap[param])
if param in self._paramMap:
values.append("current: %s" % self._paramMap[param])
else:
values.append("undefined")
valueStr = "(" + ", ".join(values) + ")"
return "%s: %s %s" % (param.name, param.doc, valueStr)
|
python
|
def explainParam(self, param):
"""
Explains a single param and returns its name, doc, and optional
default value and user-supplied value in a string.
"""
param = self._resolveParam(param)
values = []
if self.isDefined(param):
if param in self._defaultParamMap:
values.append("default: %s" % self._defaultParamMap[param])
if param in self._paramMap:
values.append("current: %s" % self._paramMap[param])
else:
values.append("undefined")
valueStr = "(" + ", ".join(values) + ")"
return "%s: %s %s" % (param.name, param.doc, valueStr)
|
[
"def",
"explainParam",
"(",
"self",
",",
"param",
")",
":",
"param",
"=",
"self",
".",
"_resolveParam",
"(",
"param",
")",
"values",
"=",
"[",
"]",
"if",
"self",
".",
"isDefined",
"(",
"param",
")",
":",
"if",
"param",
"in",
"self",
".",
"_defaultParamMap",
":",
"values",
".",
"append",
"(",
"\"default: %s\"",
"%",
"self",
".",
"_defaultParamMap",
"[",
"param",
"]",
")",
"if",
"param",
"in",
"self",
".",
"_paramMap",
":",
"values",
".",
"append",
"(",
"\"current: %s\"",
"%",
"self",
".",
"_paramMap",
"[",
"param",
"]",
")",
"else",
":",
"values",
".",
"append",
"(",
"\"undefined\"",
")",
"valueStr",
"=",
"\"(\"",
"+",
"\", \"",
".",
"join",
"(",
"values",
")",
"+",
"\")\"",
"return",
"\"%s: %s %s\"",
"%",
"(",
"param",
".",
"name",
",",
"param",
".",
"doc",
",",
"valueStr",
")"
] |
Explains a single param and returns its name, doc, and optional
default value and user-supplied value in a string.
|
[
"Explains",
"a",
"single",
"param",
"and",
"returns",
"its",
"name",
"doc",
"and",
"optional",
"default",
"value",
"and",
"user",
"-",
"supplied",
"value",
"in",
"a",
"string",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/param/__init__.py#L273-L288
|
19,336
|
apache/spark
|
python/pyspark/ml/param/__init__.py
|
Params.getParam
|
def getParam(self, paramName):
"""
Gets a param by its name.
"""
param = getattr(self, paramName)
if isinstance(param, Param):
return param
else:
raise ValueError("Cannot find param with name %s." % paramName)
|
python
|
def getParam(self, paramName):
"""
Gets a param by its name.
"""
param = getattr(self, paramName)
if isinstance(param, Param):
return param
else:
raise ValueError("Cannot find param with name %s." % paramName)
|
[
"def",
"getParam",
"(",
"self",
",",
"paramName",
")",
":",
"param",
"=",
"getattr",
"(",
"self",
",",
"paramName",
")",
"if",
"isinstance",
"(",
"param",
",",
"Param",
")",
":",
"return",
"param",
"else",
":",
"raise",
"ValueError",
"(",
"\"Cannot find param with name %s.\"",
"%",
"paramName",
")"
] |
Gets a param by its name.
|
[
"Gets",
"a",
"param",
"by",
"its",
"name",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/param/__init__.py#L297-L305
|
19,337
|
apache/spark
|
python/pyspark/ml/param/__init__.py
|
Params.isSet
|
def isSet(self, param):
"""
Checks whether a param is explicitly set by user.
"""
param = self._resolveParam(param)
return param in self._paramMap
|
python
|
def isSet(self, param):
"""
Checks whether a param is explicitly set by user.
"""
param = self._resolveParam(param)
return param in self._paramMap
|
[
"def",
"isSet",
"(",
"self",
",",
"param",
")",
":",
"param",
"=",
"self",
".",
"_resolveParam",
"(",
"param",
")",
"return",
"param",
"in",
"self",
".",
"_paramMap"
] |
Checks whether a param is explicitly set by user.
|
[
"Checks",
"whether",
"a",
"param",
"is",
"explicitly",
"set",
"by",
"user",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/param/__init__.py#L307-L312
|
19,338
|
apache/spark
|
python/pyspark/ml/param/__init__.py
|
Params.hasDefault
|
def hasDefault(self, param):
"""
Checks whether a param has a default value.
"""
param = self._resolveParam(param)
return param in self._defaultParamMap
|
python
|
def hasDefault(self, param):
"""
Checks whether a param has a default value.
"""
param = self._resolveParam(param)
return param in self._defaultParamMap
|
[
"def",
"hasDefault",
"(",
"self",
",",
"param",
")",
":",
"param",
"=",
"self",
".",
"_resolveParam",
"(",
"param",
")",
"return",
"param",
"in",
"self",
".",
"_defaultParamMap"
] |
Checks whether a param has a default value.
|
[
"Checks",
"whether",
"a",
"param",
"has",
"a",
"default",
"value",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/param/__init__.py#L314-L319
|
19,339
|
apache/spark
|
python/pyspark/ml/param/__init__.py
|
Params.getOrDefault
|
def getOrDefault(self, param):
"""
Gets the value of a param in the user-supplied param map or its
default value. Raises an error if neither is set.
"""
param = self._resolveParam(param)
if param in self._paramMap:
return self._paramMap[param]
else:
return self._defaultParamMap[param]
|
python
|
def getOrDefault(self, param):
"""
Gets the value of a param in the user-supplied param map or its
default value. Raises an error if neither is set.
"""
param = self._resolveParam(param)
if param in self._paramMap:
return self._paramMap[param]
else:
return self._defaultParamMap[param]
|
[
"def",
"getOrDefault",
"(",
"self",
",",
"param",
")",
":",
"param",
"=",
"self",
".",
"_resolveParam",
"(",
"param",
")",
"if",
"param",
"in",
"self",
".",
"_paramMap",
":",
"return",
"self",
".",
"_paramMap",
"[",
"param",
"]",
"else",
":",
"return",
"self",
".",
"_defaultParamMap",
"[",
"param",
"]"
] |
Gets the value of a param in the user-supplied param map or its
default value. Raises an error if neither is set.
|
[
"Gets",
"the",
"value",
"of",
"a",
"param",
"in",
"the",
"user",
"-",
"supplied",
"param",
"map",
"or",
"its",
"default",
"value",
".",
"Raises",
"an",
"error",
"if",
"neither",
"is",
"set",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/param/__init__.py#L339-L348
|
19,340
|
apache/spark
|
python/pyspark/ml/param/__init__.py
|
Params.set
|
def set(self, param, value):
"""
Sets a parameter in the embedded param map.
"""
self._shouldOwn(param)
try:
value = param.typeConverter(value)
except ValueError as e:
raise ValueError('Invalid param value given for param "%s". %s' % (param.name, e))
self._paramMap[param] = value
|
python
|
def set(self, param, value):
"""
Sets a parameter in the embedded param map.
"""
self._shouldOwn(param)
try:
value = param.typeConverter(value)
except ValueError as e:
raise ValueError('Invalid param value given for param "%s". %s' % (param.name, e))
self._paramMap[param] = value
|
[
"def",
"set",
"(",
"self",
",",
"param",
",",
"value",
")",
":",
"self",
".",
"_shouldOwn",
"(",
"param",
")",
"try",
":",
"value",
"=",
"param",
".",
"typeConverter",
"(",
"value",
")",
"except",
"ValueError",
"as",
"e",
":",
"raise",
"ValueError",
"(",
"'Invalid param value given for param \"%s\". %s'",
"%",
"(",
"param",
".",
"name",
",",
"e",
")",
")",
"self",
".",
"_paramMap",
"[",
"param",
"]",
"=",
"value"
] |
Sets a parameter in the embedded param map.
|
[
"Sets",
"a",
"parameter",
"in",
"the",
"embedded",
"param",
"map",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/param/__init__.py#L387-L396
|
19,341
|
apache/spark
|
python/pyspark/ml/param/__init__.py
|
Params._shouldOwn
|
def _shouldOwn(self, param):
"""
Validates that the input param belongs to this Params instance.
"""
if not (self.uid == param.parent and self.hasParam(param.name)):
raise ValueError("Param %r does not belong to %r." % (param, self))
|
python
|
def _shouldOwn(self, param):
"""
Validates that the input param belongs to this Params instance.
"""
if not (self.uid == param.parent and self.hasParam(param.name)):
raise ValueError("Param %r does not belong to %r." % (param, self))
|
[
"def",
"_shouldOwn",
"(",
"self",
",",
"param",
")",
":",
"if",
"not",
"(",
"self",
".",
"uid",
"==",
"param",
".",
"parent",
"and",
"self",
".",
"hasParam",
"(",
"param",
".",
"name",
")",
")",
":",
"raise",
"ValueError",
"(",
"\"Param %r does not belong to %r.\"",
"%",
"(",
"param",
",",
"self",
")",
")"
] |
Validates that the input param belongs to this Params instance.
|
[
"Validates",
"that",
"the",
"input",
"param",
"belongs",
"to",
"this",
"Params",
"instance",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/param/__init__.py#L398-L403
|
19,342
|
apache/spark
|
python/pyspark/ml/param/__init__.py
|
Params._resolveParam
|
def _resolveParam(self, param):
"""
Resolves a param and validates the ownership.
:param param: param name or the param instance, which must
belong to this Params instance
:return: resolved param instance
"""
if isinstance(param, Param):
self._shouldOwn(param)
return param
elif isinstance(param, basestring):
return self.getParam(param)
else:
raise ValueError("Cannot resolve %r as a param." % param)
|
python
|
def _resolveParam(self, param):
"""
Resolves a param and validates the ownership.
:param param: param name or the param instance, which must
belong to this Params instance
:return: resolved param instance
"""
if isinstance(param, Param):
self._shouldOwn(param)
return param
elif isinstance(param, basestring):
return self.getParam(param)
else:
raise ValueError("Cannot resolve %r as a param." % param)
|
[
"def",
"_resolveParam",
"(",
"self",
",",
"param",
")",
":",
"if",
"isinstance",
"(",
"param",
",",
"Param",
")",
":",
"self",
".",
"_shouldOwn",
"(",
"param",
")",
"return",
"param",
"elif",
"isinstance",
"(",
"param",
",",
"basestring",
")",
":",
"return",
"self",
".",
"getParam",
"(",
"param",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Cannot resolve %r as a param.\"",
"%",
"param",
")"
] |
Resolves a param and validates the ownership.
:param param: param name or the param instance, which must
belong to this Params instance
:return: resolved param instance
|
[
"Resolves",
"a",
"param",
"and",
"validates",
"the",
"ownership",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/param/__init__.py#L405-L419
|
19,343
|
apache/spark
|
python/pyspark/ml/param/__init__.py
|
Params._set
|
def _set(self, **kwargs):
"""
Sets user-supplied params.
"""
for param, value in kwargs.items():
p = getattr(self, param)
if value is not None:
try:
value = p.typeConverter(value)
except TypeError as e:
raise TypeError('Invalid param value given for param "%s". %s' % (p.name, e))
self._paramMap[p] = value
return self
|
python
|
def _set(self, **kwargs):
"""
Sets user-supplied params.
"""
for param, value in kwargs.items():
p = getattr(self, param)
if value is not None:
try:
value = p.typeConverter(value)
except TypeError as e:
raise TypeError('Invalid param value given for param "%s". %s' % (p.name, e))
self._paramMap[p] = value
return self
|
[
"def",
"_set",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"for",
"param",
",",
"value",
"in",
"kwargs",
".",
"items",
"(",
")",
":",
"p",
"=",
"getattr",
"(",
"self",
",",
"param",
")",
"if",
"value",
"is",
"not",
"None",
":",
"try",
":",
"value",
"=",
"p",
".",
"typeConverter",
"(",
"value",
")",
"except",
"TypeError",
"as",
"e",
":",
"raise",
"TypeError",
"(",
"'Invalid param value given for param \"%s\". %s'",
"%",
"(",
"p",
".",
"name",
",",
"e",
")",
")",
"self",
".",
"_paramMap",
"[",
"p",
"]",
"=",
"value",
"return",
"self"
] |
Sets user-supplied params.
|
[
"Sets",
"user",
"-",
"supplied",
"params",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/param/__init__.py#L431-L443
|
19,344
|
apache/spark
|
python/pyspark/ml/param/__init__.py
|
Params._setDefault
|
def _setDefault(self, **kwargs):
"""
Sets default params.
"""
for param, value in kwargs.items():
p = getattr(self, param)
if value is not None and not isinstance(value, JavaObject):
try:
value = p.typeConverter(value)
except TypeError as e:
raise TypeError('Invalid default param value given for param "%s". %s'
% (p.name, e))
self._defaultParamMap[p] = value
return self
|
python
|
def _setDefault(self, **kwargs):
"""
Sets default params.
"""
for param, value in kwargs.items():
p = getattr(self, param)
if value is not None and not isinstance(value, JavaObject):
try:
value = p.typeConverter(value)
except TypeError as e:
raise TypeError('Invalid default param value given for param "%s". %s'
% (p.name, e))
self._defaultParamMap[p] = value
return self
|
[
"def",
"_setDefault",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"for",
"param",
",",
"value",
"in",
"kwargs",
".",
"items",
"(",
")",
":",
"p",
"=",
"getattr",
"(",
"self",
",",
"param",
")",
"if",
"value",
"is",
"not",
"None",
"and",
"not",
"isinstance",
"(",
"value",
",",
"JavaObject",
")",
":",
"try",
":",
"value",
"=",
"p",
".",
"typeConverter",
"(",
"value",
")",
"except",
"TypeError",
"as",
"e",
":",
"raise",
"TypeError",
"(",
"'Invalid default param value given for param \"%s\". %s'",
"%",
"(",
"p",
".",
"name",
",",
"e",
")",
")",
"self",
".",
"_defaultParamMap",
"[",
"p",
"]",
"=",
"value",
"return",
"self"
] |
Sets default params.
|
[
"Sets",
"default",
"params",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/param/__init__.py#L452-L465
|
19,345
|
apache/spark
|
python/pyspark/ml/param/__init__.py
|
Params._copyValues
|
def _copyValues(self, to, extra=None):
"""
Copies param values from this instance to another instance for
params shared by them.
:param to: the target instance
:param extra: extra params to be copied
:return: the target instance with param values copied
"""
paramMap = self._paramMap.copy()
if extra is not None:
paramMap.update(extra)
for param in self.params:
# copy default params
if param in self._defaultParamMap and to.hasParam(param.name):
to._defaultParamMap[to.getParam(param.name)] = self._defaultParamMap[param]
# copy explicitly set params
if param in paramMap and to.hasParam(param.name):
to._set(**{param.name: paramMap[param]})
return to
|
python
|
def _copyValues(self, to, extra=None):
"""
Copies param values from this instance to another instance for
params shared by them.
:param to: the target instance
:param extra: extra params to be copied
:return: the target instance with param values copied
"""
paramMap = self._paramMap.copy()
if extra is not None:
paramMap.update(extra)
for param in self.params:
# copy default params
if param in self._defaultParamMap and to.hasParam(param.name):
to._defaultParamMap[to.getParam(param.name)] = self._defaultParamMap[param]
# copy explicitly set params
if param in paramMap and to.hasParam(param.name):
to._set(**{param.name: paramMap[param]})
return to
|
[
"def",
"_copyValues",
"(",
"self",
",",
"to",
",",
"extra",
"=",
"None",
")",
":",
"paramMap",
"=",
"self",
".",
"_paramMap",
".",
"copy",
"(",
")",
"if",
"extra",
"is",
"not",
"None",
":",
"paramMap",
".",
"update",
"(",
"extra",
")",
"for",
"param",
"in",
"self",
".",
"params",
":",
"# copy default params",
"if",
"param",
"in",
"self",
".",
"_defaultParamMap",
"and",
"to",
".",
"hasParam",
"(",
"param",
".",
"name",
")",
":",
"to",
".",
"_defaultParamMap",
"[",
"to",
".",
"getParam",
"(",
"param",
".",
"name",
")",
"]",
"=",
"self",
".",
"_defaultParamMap",
"[",
"param",
"]",
"# copy explicitly set params",
"if",
"param",
"in",
"paramMap",
"and",
"to",
".",
"hasParam",
"(",
"param",
".",
"name",
")",
":",
"to",
".",
"_set",
"(",
"*",
"*",
"{",
"param",
".",
"name",
":",
"paramMap",
"[",
"param",
"]",
"}",
")",
"return",
"to"
] |
Copies param values from this instance to another instance for
params shared by them.
:param to: the target instance
:param extra: extra params to be copied
:return: the target instance with param values copied
|
[
"Copies",
"param",
"values",
"from",
"this",
"instance",
"to",
"another",
"instance",
"for",
"params",
"shared",
"by",
"them",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/param/__init__.py#L467-L486
|
19,346
|
apache/spark
|
python/pyspark/ml/common.py
|
_to_java_object_rdd
|
def _to_java_object_rdd(rdd):
""" Return an JavaRDD of Object by unpickling
It will convert each Python object into Java object by Pyrolite, whenever the
RDD is serialized in batch or not.
"""
rdd = rdd._reserialize(AutoBatchedSerializer(PickleSerializer()))
return rdd.ctx._jvm.org.apache.spark.ml.python.MLSerDe.pythonToJava(rdd._jrdd, True)
|
python
|
def _to_java_object_rdd(rdd):
""" Return an JavaRDD of Object by unpickling
It will convert each Python object into Java object by Pyrolite, whenever the
RDD is serialized in batch or not.
"""
rdd = rdd._reserialize(AutoBatchedSerializer(PickleSerializer()))
return rdd.ctx._jvm.org.apache.spark.ml.python.MLSerDe.pythonToJava(rdd._jrdd, True)
|
[
"def",
"_to_java_object_rdd",
"(",
"rdd",
")",
":",
"rdd",
"=",
"rdd",
".",
"_reserialize",
"(",
"AutoBatchedSerializer",
"(",
"PickleSerializer",
"(",
")",
")",
")",
"return",
"rdd",
".",
"ctx",
".",
"_jvm",
".",
"org",
".",
"apache",
".",
"spark",
".",
"ml",
".",
"python",
".",
"MLSerDe",
".",
"pythonToJava",
"(",
"rdd",
".",
"_jrdd",
",",
"True",
")"
] |
Return an JavaRDD of Object by unpickling
It will convert each Python object into Java object by Pyrolite, whenever the
RDD is serialized in batch or not.
|
[
"Return",
"an",
"JavaRDD",
"of",
"Object",
"by",
"unpickling"
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/common.py#L60-L67
|
19,347
|
apache/spark
|
python/pyspark/broadcast.py
|
Broadcast.value
|
def value(self):
""" Return the broadcasted value
"""
if not hasattr(self, "_value") and self._path is not None:
# we only need to decrypt it here when encryption is enabled and
# if its on the driver, since executor decryption is handled already
if self._sc is not None and self._sc._encryption_enabled:
port, auth_secret = self._python_broadcast.setupDecryptionServer()
(decrypted_sock_file, _) = local_connect_and_auth(port, auth_secret)
self._python_broadcast.waitTillBroadcastDataSent()
return self.load(decrypted_sock_file)
else:
self._value = self.load_from_path(self._path)
return self._value
|
python
|
def value(self):
""" Return the broadcasted value
"""
if not hasattr(self, "_value") and self._path is not None:
# we only need to decrypt it here when encryption is enabled and
# if its on the driver, since executor decryption is handled already
if self._sc is not None and self._sc._encryption_enabled:
port, auth_secret = self._python_broadcast.setupDecryptionServer()
(decrypted_sock_file, _) = local_connect_and_auth(port, auth_secret)
self._python_broadcast.waitTillBroadcastDataSent()
return self.load(decrypted_sock_file)
else:
self._value = self.load_from_path(self._path)
return self._value
|
[
"def",
"value",
"(",
"self",
")",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"\"_value\"",
")",
"and",
"self",
".",
"_path",
"is",
"not",
"None",
":",
"# we only need to decrypt it here when encryption is enabled and",
"# if its on the driver, since executor decryption is handled already",
"if",
"self",
".",
"_sc",
"is",
"not",
"None",
"and",
"self",
".",
"_sc",
".",
"_encryption_enabled",
":",
"port",
",",
"auth_secret",
"=",
"self",
".",
"_python_broadcast",
".",
"setupDecryptionServer",
"(",
")",
"(",
"decrypted_sock_file",
",",
"_",
")",
"=",
"local_connect_and_auth",
"(",
"port",
",",
"auth_secret",
")",
"self",
".",
"_python_broadcast",
".",
"waitTillBroadcastDataSent",
"(",
")",
"return",
"self",
".",
"load",
"(",
"decrypted_sock_file",
")",
"else",
":",
"self",
".",
"_value",
"=",
"self",
".",
"load_from_path",
"(",
"self",
".",
"_path",
")",
"return",
"self",
".",
"_value"
] |
Return the broadcasted value
|
[
"Return",
"the",
"broadcasted",
"value"
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/broadcast.py#L135-L148
|
19,348
|
apache/spark
|
python/pyspark/broadcast.py
|
Broadcast.unpersist
|
def unpersist(self, blocking=False):
"""
Delete cached copies of this broadcast on the executors. If the
broadcast is used after this is called, it will need to be
re-sent to each executor.
:param blocking: Whether to block until unpersisting has completed
"""
if self._jbroadcast is None:
raise Exception("Broadcast can only be unpersisted in driver")
self._jbroadcast.unpersist(blocking)
|
python
|
def unpersist(self, blocking=False):
"""
Delete cached copies of this broadcast on the executors. If the
broadcast is used after this is called, it will need to be
re-sent to each executor.
:param blocking: Whether to block until unpersisting has completed
"""
if self._jbroadcast is None:
raise Exception("Broadcast can only be unpersisted in driver")
self._jbroadcast.unpersist(blocking)
|
[
"def",
"unpersist",
"(",
"self",
",",
"blocking",
"=",
"False",
")",
":",
"if",
"self",
".",
"_jbroadcast",
"is",
"None",
":",
"raise",
"Exception",
"(",
"\"Broadcast can only be unpersisted in driver\"",
")",
"self",
".",
"_jbroadcast",
".",
"unpersist",
"(",
"blocking",
")"
] |
Delete cached copies of this broadcast on the executors. If the
broadcast is used after this is called, it will need to be
re-sent to each executor.
:param blocking: Whether to block until unpersisting has completed
|
[
"Delete",
"cached",
"copies",
"of",
"this",
"broadcast",
"on",
"the",
"executors",
".",
"If",
"the",
"broadcast",
"is",
"used",
"after",
"this",
"is",
"called",
"it",
"will",
"need",
"to",
"be",
"re",
"-",
"sent",
"to",
"each",
"executor",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/broadcast.py#L150-L160
|
19,349
|
apache/spark
|
python/pyspark/broadcast.py
|
Broadcast.destroy
|
def destroy(self, blocking=False):
"""
Destroy all data and metadata related to this broadcast variable.
Use this with caution; once a broadcast variable has been destroyed,
it cannot be used again.
.. versionchanged:: 3.0.0
Added optional argument `blocking` to specify whether to block until all
blocks are deleted.
"""
if self._jbroadcast is None:
raise Exception("Broadcast can only be destroyed in driver")
self._jbroadcast.destroy(blocking)
os.unlink(self._path)
|
python
|
def destroy(self, blocking=False):
"""
Destroy all data and metadata related to this broadcast variable.
Use this with caution; once a broadcast variable has been destroyed,
it cannot be used again.
.. versionchanged:: 3.0.0
Added optional argument `blocking` to specify whether to block until all
blocks are deleted.
"""
if self._jbroadcast is None:
raise Exception("Broadcast can only be destroyed in driver")
self._jbroadcast.destroy(blocking)
os.unlink(self._path)
|
[
"def",
"destroy",
"(",
"self",
",",
"blocking",
"=",
"False",
")",
":",
"if",
"self",
".",
"_jbroadcast",
"is",
"None",
":",
"raise",
"Exception",
"(",
"\"Broadcast can only be destroyed in driver\"",
")",
"self",
".",
"_jbroadcast",
".",
"destroy",
"(",
"blocking",
")",
"os",
".",
"unlink",
"(",
"self",
".",
"_path",
")"
] |
Destroy all data and metadata related to this broadcast variable.
Use this with caution; once a broadcast variable has been destroyed,
it cannot be used again.
.. versionchanged:: 3.0.0
Added optional argument `blocking` to specify whether to block until all
blocks are deleted.
|
[
"Destroy",
"all",
"data",
"and",
"metadata",
"related",
"to",
"this",
"broadcast",
"variable",
".",
"Use",
"this",
"with",
"caution",
";",
"once",
"a",
"broadcast",
"variable",
"has",
"been",
"destroyed",
"it",
"cannot",
"be",
"used",
"again",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/broadcast.py#L162-L175
|
19,350
|
apache/spark
|
python/pyspark/sql/udf.py
|
UserDefinedFunction._wrapped
|
def _wrapped(self):
"""
Wrap this udf with a function and attach docstring from func
"""
# It is possible for a callable instance without __name__ attribute or/and
# __module__ attribute to be wrapped here. For example, functools.partial. In this case,
# we should avoid wrapping the attributes from the wrapped function to the wrapper
# function. So, we take out these attribute names from the default names to set and
# then manually assign it after being wrapped.
assignments = tuple(
a for a in functools.WRAPPER_ASSIGNMENTS if a != '__name__' and a != '__module__')
@functools.wraps(self.func, assigned=assignments)
def wrapper(*args):
return self(*args)
wrapper.__name__ = self._name
wrapper.__module__ = (self.func.__module__ if hasattr(self.func, '__module__')
else self.func.__class__.__module__)
wrapper.func = self.func
wrapper.returnType = self.returnType
wrapper.evalType = self.evalType
wrapper.deterministic = self.deterministic
wrapper.asNondeterministic = functools.wraps(
self.asNondeterministic)(lambda: self.asNondeterministic()._wrapped())
return wrapper
|
python
|
def _wrapped(self):
"""
Wrap this udf with a function and attach docstring from func
"""
# It is possible for a callable instance without __name__ attribute or/and
# __module__ attribute to be wrapped here. For example, functools.partial. In this case,
# we should avoid wrapping the attributes from the wrapped function to the wrapper
# function. So, we take out these attribute names from the default names to set and
# then manually assign it after being wrapped.
assignments = tuple(
a for a in functools.WRAPPER_ASSIGNMENTS if a != '__name__' and a != '__module__')
@functools.wraps(self.func, assigned=assignments)
def wrapper(*args):
return self(*args)
wrapper.__name__ = self._name
wrapper.__module__ = (self.func.__module__ if hasattr(self.func, '__module__')
else self.func.__class__.__module__)
wrapper.func = self.func
wrapper.returnType = self.returnType
wrapper.evalType = self.evalType
wrapper.deterministic = self.deterministic
wrapper.asNondeterministic = functools.wraps(
self.asNondeterministic)(lambda: self.asNondeterministic()._wrapped())
return wrapper
|
[
"def",
"_wrapped",
"(",
"self",
")",
":",
"# It is possible for a callable instance without __name__ attribute or/and",
"# __module__ attribute to be wrapped here. For example, functools.partial. In this case,",
"# we should avoid wrapping the attributes from the wrapped function to the wrapper",
"# function. So, we take out these attribute names from the default names to set and",
"# then manually assign it after being wrapped.",
"assignments",
"=",
"tuple",
"(",
"a",
"for",
"a",
"in",
"functools",
".",
"WRAPPER_ASSIGNMENTS",
"if",
"a",
"!=",
"'__name__'",
"and",
"a",
"!=",
"'__module__'",
")",
"@",
"functools",
".",
"wraps",
"(",
"self",
".",
"func",
",",
"assigned",
"=",
"assignments",
")",
"def",
"wrapper",
"(",
"*",
"args",
")",
":",
"return",
"self",
"(",
"*",
"args",
")",
"wrapper",
".",
"__name__",
"=",
"self",
".",
"_name",
"wrapper",
".",
"__module__",
"=",
"(",
"self",
".",
"func",
".",
"__module__",
"if",
"hasattr",
"(",
"self",
".",
"func",
",",
"'__module__'",
")",
"else",
"self",
".",
"func",
".",
"__class__",
".",
"__module__",
")",
"wrapper",
".",
"func",
"=",
"self",
".",
"func",
"wrapper",
".",
"returnType",
"=",
"self",
".",
"returnType",
"wrapper",
".",
"evalType",
"=",
"self",
".",
"evalType",
"wrapper",
".",
"deterministic",
"=",
"self",
".",
"deterministic",
"wrapper",
".",
"asNondeterministic",
"=",
"functools",
".",
"wraps",
"(",
"self",
".",
"asNondeterministic",
")",
"(",
"lambda",
":",
"self",
".",
"asNondeterministic",
"(",
")",
".",
"_wrapped",
"(",
")",
")",
"return",
"wrapper"
] |
Wrap this udf with a function and attach docstring from func
|
[
"Wrap",
"this",
"udf",
"with",
"a",
"function",
"and",
"attach",
"docstring",
"from",
"func"
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/udf.py#L177-L204
|
19,351
|
apache/spark
|
python/pyspark/sql/udf.py
|
UDFRegistration.registerJavaFunction
|
def registerJavaFunction(self, name, javaClassName, returnType=None):
"""Register a Java user-defined function as a SQL function.
In addition to a name and the function itself, the return type can be optionally specified.
When the return type is not specified we would infer it via reflection.
:param name: name of the user-defined function
:param javaClassName: fully qualified name of java class
:param returnType: the return type of the registered Java function. The value can be either
a :class:`pyspark.sql.types.DataType` object or a DDL-formatted type string.
>>> from pyspark.sql.types import IntegerType
>>> spark.udf.registerJavaFunction(
... "javaStringLength", "test.org.apache.spark.sql.JavaStringLength", IntegerType())
>>> spark.sql("SELECT javaStringLength('test')").collect()
[Row(UDF:javaStringLength(test)=4)]
>>> spark.udf.registerJavaFunction(
... "javaStringLength2", "test.org.apache.spark.sql.JavaStringLength")
>>> spark.sql("SELECT javaStringLength2('test')").collect()
[Row(UDF:javaStringLength2(test)=4)]
>>> spark.udf.registerJavaFunction(
... "javaStringLength3", "test.org.apache.spark.sql.JavaStringLength", "integer")
>>> spark.sql("SELECT javaStringLength3('test')").collect()
[Row(UDF:javaStringLength3(test)=4)]
"""
jdt = None
if returnType is not None:
if not isinstance(returnType, DataType):
returnType = _parse_datatype_string(returnType)
jdt = self.sparkSession._jsparkSession.parseDataType(returnType.json())
self.sparkSession._jsparkSession.udf().registerJava(name, javaClassName, jdt)
|
python
|
def registerJavaFunction(self, name, javaClassName, returnType=None):
"""Register a Java user-defined function as a SQL function.
In addition to a name and the function itself, the return type can be optionally specified.
When the return type is not specified we would infer it via reflection.
:param name: name of the user-defined function
:param javaClassName: fully qualified name of java class
:param returnType: the return type of the registered Java function. The value can be either
a :class:`pyspark.sql.types.DataType` object or a DDL-formatted type string.
>>> from pyspark.sql.types import IntegerType
>>> spark.udf.registerJavaFunction(
... "javaStringLength", "test.org.apache.spark.sql.JavaStringLength", IntegerType())
>>> spark.sql("SELECT javaStringLength('test')").collect()
[Row(UDF:javaStringLength(test)=4)]
>>> spark.udf.registerJavaFunction(
... "javaStringLength2", "test.org.apache.spark.sql.JavaStringLength")
>>> spark.sql("SELECT javaStringLength2('test')").collect()
[Row(UDF:javaStringLength2(test)=4)]
>>> spark.udf.registerJavaFunction(
... "javaStringLength3", "test.org.apache.spark.sql.JavaStringLength", "integer")
>>> spark.sql("SELECT javaStringLength3('test')").collect()
[Row(UDF:javaStringLength3(test)=4)]
"""
jdt = None
if returnType is not None:
if not isinstance(returnType, DataType):
returnType = _parse_datatype_string(returnType)
jdt = self.sparkSession._jsparkSession.parseDataType(returnType.json())
self.sparkSession._jsparkSession.udf().registerJava(name, javaClassName, jdt)
|
[
"def",
"registerJavaFunction",
"(",
"self",
",",
"name",
",",
"javaClassName",
",",
"returnType",
"=",
"None",
")",
":",
"jdt",
"=",
"None",
"if",
"returnType",
"is",
"not",
"None",
":",
"if",
"not",
"isinstance",
"(",
"returnType",
",",
"DataType",
")",
":",
"returnType",
"=",
"_parse_datatype_string",
"(",
"returnType",
")",
"jdt",
"=",
"self",
".",
"sparkSession",
".",
"_jsparkSession",
".",
"parseDataType",
"(",
"returnType",
".",
"json",
"(",
")",
")",
"self",
".",
"sparkSession",
".",
"_jsparkSession",
".",
"udf",
"(",
")",
".",
"registerJava",
"(",
"name",
",",
"javaClassName",
",",
"jdt",
")"
] |
Register a Java user-defined function as a SQL function.
In addition to a name and the function itself, the return type can be optionally specified.
When the return type is not specified we would infer it via reflection.
:param name: name of the user-defined function
:param javaClassName: fully qualified name of java class
:param returnType: the return type of the registered Java function. The value can be either
a :class:`pyspark.sql.types.DataType` object or a DDL-formatted type string.
>>> from pyspark.sql.types import IntegerType
>>> spark.udf.registerJavaFunction(
... "javaStringLength", "test.org.apache.spark.sql.JavaStringLength", IntegerType())
>>> spark.sql("SELECT javaStringLength('test')").collect()
[Row(UDF:javaStringLength(test)=4)]
>>> spark.udf.registerJavaFunction(
... "javaStringLength2", "test.org.apache.spark.sql.JavaStringLength")
>>> spark.sql("SELECT javaStringLength2('test')").collect()
[Row(UDF:javaStringLength2(test)=4)]
>>> spark.udf.registerJavaFunction(
... "javaStringLength3", "test.org.apache.spark.sql.JavaStringLength", "integer")
>>> spark.sql("SELECT javaStringLength3('test')").collect()
[Row(UDF:javaStringLength3(test)=4)]
|
[
"Register",
"a",
"Java",
"user",
"-",
"defined",
"function",
"as",
"a",
"SQL",
"function",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/udf.py#L345-L378
|
19,352
|
apache/spark
|
python/pyspark/sql/udf.py
|
UDFRegistration.registerJavaUDAF
|
def registerJavaUDAF(self, name, javaClassName):
"""Register a Java user-defined aggregate function as a SQL function.
:param name: name of the user-defined aggregate function
:param javaClassName: fully qualified name of java class
>>> spark.udf.registerJavaUDAF("javaUDAF", "test.org.apache.spark.sql.MyDoubleAvg")
>>> df = spark.createDataFrame([(1, "a"),(2, "b"), (3, "a")],["id", "name"])
>>> df.createOrReplaceTempView("df")
>>> spark.sql("SELECT name, javaUDAF(id) as avg from df group by name").collect()
[Row(name=u'b', avg=102.0), Row(name=u'a', avg=102.0)]
"""
self.sparkSession._jsparkSession.udf().registerJavaUDAF(name, javaClassName)
|
python
|
def registerJavaUDAF(self, name, javaClassName):
"""Register a Java user-defined aggregate function as a SQL function.
:param name: name of the user-defined aggregate function
:param javaClassName: fully qualified name of java class
>>> spark.udf.registerJavaUDAF("javaUDAF", "test.org.apache.spark.sql.MyDoubleAvg")
>>> df = spark.createDataFrame([(1, "a"),(2, "b"), (3, "a")],["id", "name"])
>>> df.createOrReplaceTempView("df")
>>> spark.sql("SELECT name, javaUDAF(id) as avg from df group by name").collect()
[Row(name=u'b', avg=102.0), Row(name=u'a', avg=102.0)]
"""
self.sparkSession._jsparkSession.udf().registerJavaUDAF(name, javaClassName)
|
[
"def",
"registerJavaUDAF",
"(",
"self",
",",
"name",
",",
"javaClassName",
")",
":",
"self",
".",
"sparkSession",
".",
"_jsparkSession",
".",
"udf",
"(",
")",
".",
"registerJavaUDAF",
"(",
"name",
",",
"javaClassName",
")"
] |
Register a Java user-defined aggregate function as a SQL function.
:param name: name of the user-defined aggregate function
:param javaClassName: fully qualified name of java class
>>> spark.udf.registerJavaUDAF("javaUDAF", "test.org.apache.spark.sql.MyDoubleAvg")
>>> df = spark.createDataFrame([(1, "a"),(2, "b"), (3, "a")],["id", "name"])
>>> df.createOrReplaceTempView("df")
>>> spark.sql("SELECT name, javaUDAF(id) as avg from df group by name").collect()
[Row(name=u'b', avg=102.0), Row(name=u'a', avg=102.0)]
|
[
"Register",
"a",
"Java",
"user",
"-",
"defined",
"aggregate",
"function",
"as",
"a",
"SQL",
"function",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/udf.py#L382-L395
|
19,353
|
apache/spark
|
python/pyspark/streaming/context.py
|
StreamingContext.getOrCreate
|
def getOrCreate(cls, checkpointPath, setupFunc):
"""
Either recreate a StreamingContext from checkpoint data or create a new StreamingContext.
If checkpoint data exists in the provided `checkpointPath`, then StreamingContext will be
recreated from the checkpoint data. If the data does not exist, then the provided setupFunc
will be used to create a new context.
@param checkpointPath: Checkpoint directory used in an earlier streaming program
@param setupFunc: Function to create a new context and setup DStreams
"""
cls._ensure_initialized()
gw = SparkContext._gateway
# Check whether valid checkpoint information exists in the given path
ssc_option = gw.jvm.StreamingContextPythonHelper().tryRecoverFromCheckpoint(checkpointPath)
if ssc_option.isEmpty():
ssc = setupFunc()
ssc.checkpoint(checkpointPath)
return ssc
jssc = gw.jvm.JavaStreamingContext(ssc_option.get())
# If there is already an active instance of Python SparkContext use it, or create a new one
if not SparkContext._active_spark_context:
jsc = jssc.sparkContext()
conf = SparkConf(_jconf=jsc.getConf())
SparkContext(conf=conf, gateway=gw, jsc=jsc)
sc = SparkContext._active_spark_context
# update ctx in serializer
cls._transformerSerializer.ctx = sc
return StreamingContext(sc, None, jssc)
|
python
|
def getOrCreate(cls, checkpointPath, setupFunc):
"""
Either recreate a StreamingContext from checkpoint data or create a new StreamingContext.
If checkpoint data exists in the provided `checkpointPath`, then StreamingContext will be
recreated from the checkpoint data. If the data does not exist, then the provided setupFunc
will be used to create a new context.
@param checkpointPath: Checkpoint directory used in an earlier streaming program
@param setupFunc: Function to create a new context and setup DStreams
"""
cls._ensure_initialized()
gw = SparkContext._gateway
# Check whether valid checkpoint information exists in the given path
ssc_option = gw.jvm.StreamingContextPythonHelper().tryRecoverFromCheckpoint(checkpointPath)
if ssc_option.isEmpty():
ssc = setupFunc()
ssc.checkpoint(checkpointPath)
return ssc
jssc = gw.jvm.JavaStreamingContext(ssc_option.get())
# If there is already an active instance of Python SparkContext use it, or create a new one
if not SparkContext._active_spark_context:
jsc = jssc.sparkContext()
conf = SparkConf(_jconf=jsc.getConf())
SparkContext(conf=conf, gateway=gw, jsc=jsc)
sc = SparkContext._active_spark_context
# update ctx in serializer
cls._transformerSerializer.ctx = sc
return StreamingContext(sc, None, jssc)
|
[
"def",
"getOrCreate",
"(",
"cls",
",",
"checkpointPath",
",",
"setupFunc",
")",
":",
"cls",
".",
"_ensure_initialized",
"(",
")",
"gw",
"=",
"SparkContext",
".",
"_gateway",
"# Check whether valid checkpoint information exists in the given path",
"ssc_option",
"=",
"gw",
".",
"jvm",
".",
"StreamingContextPythonHelper",
"(",
")",
".",
"tryRecoverFromCheckpoint",
"(",
"checkpointPath",
")",
"if",
"ssc_option",
".",
"isEmpty",
"(",
")",
":",
"ssc",
"=",
"setupFunc",
"(",
")",
"ssc",
".",
"checkpoint",
"(",
"checkpointPath",
")",
"return",
"ssc",
"jssc",
"=",
"gw",
".",
"jvm",
".",
"JavaStreamingContext",
"(",
"ssc_option",
".",
"get",
"(",
")",
")",
"# If there is already an active instance of Python SparkContext use it, or create a new one",
"if",
"not",
"SparkContext",
".",
"_active_spark_context",
":",
"jsc",
"=",
"jssc",
".",
"sparkContext",
"(",
")",
"conf",
"=",
"SparkConf",
"(",
"_jconf",
"=",
"jsc",
".",
"getConf",
"(",
")",
")",
"SparkContext",
"(",
"conf",
"=",
"conf",
",",
"gateway",
"=",
"gw",
",",
"jsc",
"=",
"jsc",
")",
"sc",
"=",
"SparkContext",
".",
"_active_spark_context",
"# update ctx in serializer",
"cls",
".",
"_transformerSerializer",
".",
"ctx",
"=",
"sc",
"return",
"StreamingContext",
"(",
"sc",
",",
"None",
",",
"jssc",
")"
] |
Either recreate a StreamingContext from checkpoint data or create a new StreamingContext.
If checkpoint data exists in the provided `checkpointPath`, then StreamingContext will be
recreated from the checkpoint data. If the data does not exist, then the provided setupFunc
will be used to create a new context.
@param checkpointPath: Checkpoint directory used in an earlier streaming program
@param setupFunc: Function to create a new context and setup DStreams
|
[
"Either",
"recreate",
"a",
"StreamingContext",
"from",
"checkpoint",
"data",
"or",
"create",
"a",
"new",
"StreamingContext",
".",
"If",
"checkpoint",
"data",
"exists",
"in",
"the",
"provided",
"checkpointPath",
"then",
"StreamingContext",
"will",
"be",
"recreated",
"from",
"the",
"checkpoint",
"data",
".",
"If",
"the",
"data",
"does",
"not",
"exist",
"then",
"the",
"provided",
"setupFunc",
"will",
"be",
"used",
"to",
"create",
"a",
"new",
"context",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/streaming/context.py#L88-L120
|
19,354
|
apache/spark
|
python/pyspark/streaming/context.py
|
StreamingContext.awaitTermination
|
def awaitTermination(self, timeout=None):
"""
Wait for the execution to stop.
@param timeout: time to wait in seconds
"""
if timeout is None:
self._jssc.awaitTermination()
else:
self._jssc.awaitTerminationOrTimeout(int(timeout * 1000))
|
python
|
def awaitTermination(self, timeout=None):
"""
Wait for the execution to stop.
@param timeout: time to wait in seconds
"""
if timeout is None:
self._jssc.awaitTermination()
else:
self._jssc.awaitTerminationOrTimeout(int(timeout * 1000))
|
[
"def",
"awaitTermination",
"(",
"self",
",",
"timeout",
"=",
"None",
")",
":",
"if",
"timeout",
"is",
"None",
":",
"self",
".",
"_jssc",
".",
"awaitTermination",
"(",
")",
"else",
":",
"self",
".",
"_jssc",
".",
"awaitTerminationOrTimeout",
"(",
"int",
"(",
"timeout",
"*",
"1000",
")",
")"
] |
Wait for the execution to stop.
@param timeout: time to wait in seconds
|
[
"Wait",
"for",
"the",
"execution",
"to",
"stop",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/streaming/context.py#L182-L191
|
19,355
|
apache/spark
|
python/pyspark/streaming/context.py
|
StreamingContext.stop
|
def stop(self, stopSparkContext=True, stopGraceFully=False):
"""
Stop the execution of the streams, with option of ensuring all
received data has been processed.
@param stopSparkContext: Stop the associated SparkContext or not
@param stopGracefully: Stop gracefully by waiting for the processing
of all received data to be completed
"""
self._jssc.stop(stopSparkContext, stopGraceFully)
StreamingContext._activeContext = None
if stopSparkContext:
self._sc.stop()
|
python
|
def stop(self, stopSparkContext=True, stopGraceFully=False):
"""
Stop the execution of the streams, with option of ensuring all
received data has been processed.
@param stopSparkContext: Stop the associated SparkContext or not
@param stopGracefully: Stop gracefully by waiting for the processing
of all received data to be completed
"""
self._jssc.stop(stopSparkContext, stopGraceFully)
StreamingContext._activeContext = None
if stopSparkContext:
self._sc.stop()
|
[
"def",
"stop",
"(",
"self",
",",
"stopSparkContext",
"=",
"True",
",",
"stopGraceFully",
"=",
"False",
")",
":",
"self",
".",
"_jssc",
".",
"stop",
"(",
"stopSparkContext",
",",
"stopGraceFully",
")",
"StreamingContext",
".",
"_activeContext",
"=",
"None",
"if",
"stopSparkContext",
":",
"self",
".",
"_sc",
".",
"stop",
"(",
")"
] |
Stop the execution of the streams, with option of ensuring all
received data has been processed.
@param stopSparkContext: Stop the associated SparkContext or not
@param stopGracefully: Stop gracefully by waiting for the processing
of all received data to be completed
|
[
"Stop",
"the",
"execution",
"of",
"the",
"streams",
"with",
"option",
"of",
"ensuring",
"all",
"received",
"data",
"has",
"been",
"processed",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/streaming/context.py#L203-L215
|
19,356
|
apache/spark
|
python/pyspark/streaming/context.py
|
StreamingContext.textFileStream
|
def textFileStream(self, directory):
"""
Create an input stream that monitors a Hadoop-compatible file system
for new files and reads them as text files. Files must be wrriten to the
monitored directory by "moving" them from another location within the same
file system. File names starting with . are ignored.
The text files must be encoded as UTF-8.
"""
return DStream(self._jssc.textFileStream(directory), self, UTF8Deserializer())
|
python
|
def textFileStream(self, directory):
"""
Create an input stream that monitors a Hadoop-compatible file system
for new files and reads them as text files. Files must be wrriten to the
monitored directory by "moving" them from another location within the same
file system. File names starting with . are ignored.
The text files must be encoded as UTF-8.
"""
return DStream(self._jssc.textFileStream(directory), self, UTF8Deserializer())
|
[
"def",
"textFileStream",
"(",
"self",
",",
"directory",
")",
":",
"return",
"DStream",
"(",
"self",
".",
"_jssc",
".",
"textFileStream",
"(",
"directory",
")",
",",
"self",
",",
"UTF8Deserializer",
"(",
")",
")"
] |
Create an input stream that monitors a Hadoop-compatible file system
for new files and reads them as text files. Files must be wrriten to the
monitored directory by "moving" them from another location within the same
file system. File names starting with . are ignored.
The text files must be encoded as UTF-8.
|
[
"Create",
"an",
"input",
"stream",
"that",
"monitors",
"a",
"Hadoop",
"-",
"compatible",
"file",
"system",
"for",
"new",
"files",
"and",
"reads",
"them",
"as",
"text",
"files",
".",
"Files",
"must",
"be",
"wrriten",
"to",
"the",
"monitored",
"directory",
"by",
"moving",
"them",
"from",
"another",
"location",
"within",
"the",
"same",
"file",
"system",
".",
"File",
"names",
"starting",
"with",
".",
"are",
"ignored",
".",
"The",
"text",
"files",
"must",
"be",
"encoded",
"as",
"UTF",
"-",
"8",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/streaming/context.py#L255-L263
|
19,357
|
apache/spark
|
python/pyspark/streaming/context.py
|
StreamingContext.binaryRecordsStream
|
def binaryRecordsStream(self, directory, recordLength):
"""
Create an input stream that monitors a Hadoop-compatible file system
for new files and reads them as flat binary files with records of
fixed length. Files must be written to the monitored directory by "moving"
them from another location within the same file system.
File names starting with . are ignored.
@param directory: Directory to load data from
@param recordLength: Length of each record in bytes
"""
return DStream(self._jssc.binaryRecordsStream(directory, recordLength), self,
NoOpSerializer())
|
python
|
def binaryRecordsStream(self, directory, recordLength):
"""
Create an input stream that monitors a Hadoop-compatible file system
for new files and reads them as flat binary files with records of
fixed length. Files must be written to the monitored directory by "moving"
them from another location within the same file system.
File names starting with . are ignored.
@param directory: Directory to load data from
@param recordLength: Length of each record in bytes
"""
return DStream(self._jssc.binaryRecordsStream(directory, recordLength), self,
NoOpSerializer())
|
[
"def",
"binaryRecordsStream",
"(",
"self",
",",
"directory",
",",
"recordLength",
")",
":",
"return",
"DStream",
"(",
"self",
".",
"_jssc",
".",
"binaryRecordsStream",
"(",
"directory",
",",
"recordLength",
")",
",",
"self",
",",
"NoOpSerializer",
"(",
")",
")"
] |
Create an input stream that monitors a Hadoop-compatible file system
for new files and reads them as flat binary files with records of
fixed length. Files must be written to the monitored directory by "moving"
them from another location within the same file system.
File names starting with . are ignored.
@param directory: Directory to load data from
@param recordLength: Length of each record in bytes
|
[
"Create",
"an",
"input",
"stream",
"that",
"monitors",
"a",
"Hadoop",
"-",
"compatible",
"file",
"system",
"for",
"new",
"files",
"and",
"reads",
"them",
"as",
"flat",
"binary",
"files",
"with",
"records",
"of",
"fixed",
"length",
".",
"Files",
"must",
"be",
"written",
"to",
"the",
"monitored",
"directory",
"by",
"moving",
"them",
"from",
"another",
"location",
"within",
"the",
"same",
"file",
"system",
".",
"File",
"names",
"starting",
"with",
".",
"are",
"ignored",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/streaming/context.py#L265-L277
|
19,358
|
apache/spark
|
python/pyspark/streaming/context.py
|
StreamingContext.queueStream
|
def queueStream(self, rdds, oneAtATime=True, default=None):
"""
Create an input stream from a queue of RDDs or list. In each batch,
it will process either one or all of the RDDs returned by the queue.
.. note:: Changes to the queue after the stream is created will not be recognized.
@param rdds: Queue of RDDs
@param oneAtATime: pick one rdd each time or pick all of them once.
@param default: The default rdd if no more in rdds
"""
if default and not isinstance(default, RDD):
default = self._sc.parallelize(default)
if not rdds and default:
rdds = [rdds]
if rdds and not isinstance(rdds[0], RDD):
rdds = [self._sc.parallelize(input) for input in rdds]
self._check_serializers(rdds)
queue = self._jvm.PythonDStream.toRDDQueue([r._jrdd for r in rdds])
if default:
default = default._reserialize(rdds[0]._jrdd_deserializer)
jdstream = self._jssc.queueStream(queue, oneAtATime, default._jrdd)
else:
jdstream = self._jssc.queueStream(queue, oneAtATime)
return DStream(jdstream, self, rdds[0]._jrdd_deserializer)
|
python
|
def queueStream(self, rdds, oneAtATime=True, default=None):
"""
Create an input stream from a queue of RDDs or list. In each batch,
it will process either one or all of the RDDs returned by the queue.
.. note:: Changes to the queue after the stream is created will not be recognized.
@param rdds: Queue of RDDs
@param oneAtATime: pick one rdd each time or pick all of them once.
@param default: The default rdd if no more in rdds
"""
if default and not isinstance(default, RDD):
default = self._sc.parallelize(default)
if not rdds and default:
rdds = [rdds]
if rdds and not isinstance(rdds[0], RDD):
rdds = [self._sc.parallelize(input) for input in rdds]
self._check_serializers(rdds)
queue = self._jvm.PythonDStream.toRDDQueue([r._jrdd for r in rdds])
if default:
default = default._reserialize(rdds[0]._jrdd_deserializer)
jdstream = self._jssc.queueStream(queue, oneAtATime, default._jrdd)
else:
jdstream = self._jssc.queueStream(queue, oneAtATime)
return DStream(jdstream, self, rdds[0]._jrdd_deserializer)
|
[
"def",
"queueStream",
"(",
"self",
",",
"rdds",
",",
"oneAtATime",
"=",
"True",
",",
"default",
"=",
"None",
")",
":",
"if",
"default",
"and",
"not",
"isinstance",
"(",
"default",
",",
"RDD",
")",
":",
"default",
"=",
"self",
".",
"_sc",
".",
"parallelize",
"(",
"default",
")",
"if",
"not",
"rdds",
"and",
"default",
":",
"rdds",
"=",
"[",
"rdds",
"]",
"if",
"rdds",
"and",
"not",
"isinstance",
"(",
"rdds",
"[",
"0",
"]",
",",
"RDD",
")",
":",
"rdds",
"=",
"[",
"self",
".",
"_sc",
".",
"parallelize",
"(",
"input",
")",
"for",
"input",
"in",
"rdds",
"]",
"self",
".",
"_check_serializers",
"(",
"rdds",
")",
"queue",
"=",
"self",
".",
"_jvm",
".",
"PythonDStream",
".",
"toRDDQueue",
"(",
"[",
"r",
".",
"_jrdd",
"for",
"r",
"in",
"rdds",
"]",
")",
"if",
"default",
":",
"default",
"=",
"default",
".",
"_reserialize",
"(",
"rdds",
"[",
"0",
"]",
".",
"_jrdd_deserializer",
")",
"jdstream",
"=",
"self",
".",
"_jssc",
".",
"queueStream",
"(",
"queue",
",",
"oneAtATime",
",",
"default",
".",
"_jrdd",
")",
"else",
":",
"jdstream",
"=",
"self",
".",
"_jssc",
".",
"queueStream",
"(",
"queue",
",",
"oneAtATime",
")",
"return",
"DStream",
"(",
"jdstream",
",",
"self",
",",
"rdds",
"[",
"0",
"]",
".",
"_jrdd_deserializer",
")"
] |
Create an input stream from a queue of RDDs or list. In each batch,
it will process either one or all of the RDDs returned by the queue.
.. note:: Changes to the queue after the stream is created will not be recognized.
@param rdds: Queue of RDDs
@param oneAtATime: pick one rdd each time or pick all of them once.
@param default: The default rdd if no more in rdds
|
[
"Create",
"an",
"input",
"stream",
"from",
"a",
"queue",
"of",
"RDDs",
"or",
"list",
".",
"In",
"each",
"batch",
"it",
"will",
"process",
"either",
"one",
"or",
"all",
"of",
"the",
"RDDs",
"returned",
"by",
"the",
"queue",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/streaming/context.py#L286-L313
|
19,359
|
apache/spark
|
python/pyspark/streaming/context.py
|
StreamingContext.transform
|
def transform(self, dstreams, transformFunc):
"""
Create a new DStream in which each RDD is generated by applying
a function on RDDs of the DStreams. The order of the JavaRDDs in
the transform function parameter will be the same as the order
of corresponding DStreams in the list.
"""
jdstreams = [d._jdstream for d in dstreams]
# change the final serializer to sc.serializer
func = TransformFunction(self._sc,
lambda t, *rdds: transformFunc(rdds),
*[d._jrdd_deserializer for d in dstreams])
jfunc = self._jvm.TransformFunction(func)
jdstream = self._jssc.transform(jdstreams, jfunc)
return DStream(jdstream, self, self._sc.serializer)
|
python
|
def transform(self, dstreams, transformFunc):
"""
Create a new DStream in which each RDD is generated by applying
a function on RDDs of the DStreams. The order of the JavaRDDs in
the transform function parameter will be the same as the order
of corresponding DStreams in the list.
"""
jdstreams = [d._jdstream for d in dstreams]
# change the final serializer to sc.serializer
func = TransformFunction(self._sc,
lambda t, *rdds: transformFunc(rdds),
*[d._jrdd_deserializer for d in dstreams])
jfunc = self._jvm.TransformFunction(func)
jdstream = self._jssc.transform(jdstreams, jfunc)
return DStream(jdstream, self, self._sc.serializer)
|
[
"def",
"transform",
"(",
"self",
",",
"dstreams",
",",
"transformFunc",
")",
":",
"jdstreams",
"=",
"[",
"d",
".",
"_jdstream",
"for",
"d",
"in",
"dstreams",
"]",
"# change the final serializer to sc.serializer",
"func",
"=",
"TransformFunction",
"(",
"self",
".",
"_sc",
",",
"lambda",
"t",
",",
"*",
"rdds",
":",
"transformFunc",
"(",
"rdds",
")",
",",
"*",
"[",
"d",
".",
"_jrdd_deserializer",
"for",
"d",
"in",
"dstreams",
"]",
")",
"jfunc",
"=",
"self",
".",
"_jvm",
".",
"TransformFunction",
"(",
"func",
")",
"jdstream",
"=",
"self",
".",
"_jssc",
".",
"transform",
"(",
"jdstreams",
",",
"jfunc",
")",
"return",
"DStream",
"(",
"jdstream",
",",
"self",
",",
"self",
".",
"_sc",
".",
"serializer",
")"
] |
Create a new DStream in which each RDD is generated by applying
a function on RDDs of the DStreams. The order of the JavaRDDs in
the transform function parameter will be the same as the order
of corresponding DStreams in the list.
|
[
"Create",
"a",
"new",
"DStream",
"in",
"which",
"each",
"RDD",
"is",
"generated",
"by",
"applying",
"a",
"function",
"on",
"RDDs",
"of",
"the",
"DStreams",
".",
"The",
"order",
"of",
"the",
"JavaRDDs",
"in",
"the",
"transform",
"function",
"parameter",
"will",
"be",
"the",
"same",
"as",
"the",
"order",
"of",
"corresponding",
"DStreams",
"in",
"the",
"list",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/streaming/context.py#L315-L329
|
19,360
|
apache/spark
|
python/pyspark/streaming/context.py
|
StreamingContext.union
|
def union(self, *dstreams):
"""
Create a unified DStream from multiple DStreams of the same
type and same slide duration.
"""
if not dstreams:
raise ValueError("should have at least one DStream to union")
if len(dstreams) == 1:
return dstreams[0]
if len(set(s._jrdd_deserializer for s in dstreams)) > 1:
raise ValueError("All DStreams should have same serializer")
if len(set(s._slideDuration for s in dstreams)) > 1:
raise ValueError("All DStreams should have same slide duration")
cls = SparkContext._jvm.org.apache.spark.streaming.api.java.JavaDStream
jdstreams = SparkContext._gateway.new_array(cls, len(dstreams))
for i in range(0, len(dstreams)):
jdstreams[i] = dstreams[i]._jdstream
return DStream(self._jssc.union(jdstreams), self, dstreams[0]._jrdd_deserializer)
|
python
|
def union(self, *dstreams):
"""
Create a unified DStream from multiple DStreams of the same
type and same slide duration.
"""
if not dstreams:
raise ValueError("should have at least one DStream to union")
if len(dstreams) == 1:
return dstreams[0]
if len(set(s._jrdd_deserializer for s in dstreams)) > 1:
raise ValueError("All DStreams should have same serializer")
if len(set(s._slideDuration for s in dstreams)) > 1:
raise ValueError("All DStreams should have same slide duration")
cls = SparkContext._jvm.org.apache.spark.streaming.api.java.JavaDStream
jdstreams = SparkContext._gateway.new_array(cls, len(dstreams))
for i in range(0, len(dstreams)):
jdstreams[i] = dstreams[i]._jdstream
return DStream(self._jssc.union(jdstreams), self, dstreams[0]._jrdd_deserializer)
|
[
"def",
"union",
"(",
"self",
",",
"*",
"dstreams",
")",
":",
"if",
"not",
"dstreams",
":",
"raise",
"ValueError",
"(",
"\"should have at least one DStream to union\"",
")",
"if",
"len",
"(",
"dstreams",
")",
"==",
"1",
":",
"return",
"dstreams",
"[",
"0",
"]",
"if",
"len",
"(",
"set",
"(",
"s",
".",
"_jrdd_deserializer",
"for",
"s",
"in",
"dstreams",
")",
")",
">",
"1",
":",
"raise",
"ValueError",
"(",
"\"All DStreams should have same serializer\"",
")",
"if",
"len",
"(",
"set",
"(",
"s",
".",
"_slideDuration",
"for",
"s",
"in",
"dstreams",
")",
")",
">",
"1",
":",
"raise",
"ValueError",
"(",
"\"All DStreams should have same slide duration\"",
")",
"cls",
"=",
"SparkContext",
".",
"_jvm",
".",
"org",
".",
"apache",
".",
"spark",
".",
"streaming",
".",
"api",
".",
"java",
".",
"JavaDStream",
"jdstreams",
"=",
"SparkContext",
".",
"_gateway",
".",
"new_array",
"(",
"cls",
",",
"len",
"(",
"dstreams",
")",
")",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"dstreams",
")",
")",
":",
"jdstreams",
"[",
"i",
"]",
"=",
"dstreams",
"[",
"i",
"]",
".",
"_jdstream",
"return",
"DStream",
"(",
"self",
".",
"_jssc",
".",
"union",
"(",
"jdstreams",
")",
",",
"self",
",",
"dstreams",
"[",
"0",
"]",
".",
"_jrdd_deserializer",
")"
] |
Create a unified DStream from multiple DStreams of the same
type and same slide duration.
|
[
"Create",
"a",
"unified",
"DStream",
"from",
"multiple",
"DStreams",
"of",
"the",
"same",
"type",
"and",
"same",
"slide",
"duration",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/streaming/context.py#L331-L348
|
19,361
|
huggingface/pytorch-pretrained-BERT
|
pytorch_pretrained_bert/modeling_gpt2.py
|
GPT2Config.from_json_file
|
def from_json_file(cls, json_file):
"""Constructs a `GPT2Config` from a json file of parameters."""
with open(json_file, "r", encoding="utf-8") as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
|
python
|
def from_json_file(cls, json_file):
"""Constructs a `GPT2Config` from a json file of parameters."""
with open(json_file, "r", encoding="utf-8") as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
|
[
"def",
"from_json_file",
"(",
"cls",
",",
"json_file",
")",
":",
"with",
"open",
"(",
"json_file",
",",
"\"r\"",
",",
"encoding",
"=",
"\"utf-8\"",
")",
"as",
"reader",
":",
"text",
"=",
"reader",
".",
"read",
"(",
")",
"return",
"cls",
".",
"from_dict",
"(",
"json",
".",
"loads",
"(",
"text",
")",
")"
] |
Constructs a `GPT2Config` from a json file of parameters.
|
[
"Constructs",
"a",
"GPT2Config",
"from",
"a",
"json",
"file",
"of",
"parameters",
"."
] |
b832d5bb8a6dfc5965015b828e577677eace601e
|
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/pytorch_pretrained_bert/modeling_gpt2.py#L162-L166
|
19,362
|
huggingface/pytorch-pretrained-BERT
|
pytorch_pretrained_bert/modeling_gpt2.py
|
GPT2Config.to_json_file
|
def to_json_file(self, json_file_path):
""" Save this instance to a json file."""
with open(json_file_path, "w", encoding='utf-8') as writer:
writer.write(self.to_json_string())
|
python
|
def to_json_file(self, json_file_path):
""" Save this instance to a json file."""
with open(json_file_path, "w", encoding='utf-8') as writer:
writer.write(self.to_json_string())
|
[
"def",
"to_json_file",
"(",
"self",
",",
"json_file_path",
")",
":",
"with",
"open",
"(",
"json_file_path",
",",
"\"w\"",
",",
"encoding",
"=",
"'utf-8'",
")",
"as",
"writer",
":",
"writer",
".",
"write",
"(",
"self",
".",
"to_json_string",
"(",
")",
")"
] |
Save this instance to a json file.
|
[
"Save",
"this",
"instance",
"to",
"a",
"json",
"file",
"."
] |
b832d5bb8a6dfc5965015b828e577677eace601e
|
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/pytorch_pretrained_bert/modeling_gpt2.py#L180-L183
|
19,363
|
huggingface/pytorch-pretrained-BERT
|
examples/extract_features.py
|
convert_examples_to_features
|
def convert_examples_to_features(examples, seq_length, tokenizer):
"""Loads a data file into a list of `InputFeature`s."""
features = []
for (ex_index, example) in enumerate(examples):
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
if tokens_b:
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > seq_length - 2:
tokens_a = tokens_a[0:(seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambigiously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = []
input_type_ids = []
tokens.append("[CLS]")
input_type_ids.append(0)
for token in tokens_a:
tokens.append(token)
input_type_ids.append(0)
tokens.append("[SEP]")
input_type_ids.append(0)
if tokens_b:
for token in tokens_b:
tokens.append(token)
input_type_ids.append(1)
tokens.append("[SEP]")
input_type_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < seq_length:
input_ids.append(0)
input_mask.append(0)
input_type_ids.append(0)
assert len(input_ids) == seq_length
assert len(input_mask) == seq_length
assert len(input_type_ids) == seq_length
if ex_index < 5:
logger.info("*** Example ***")
logger.info("unique_id: %s" % (example.unique_id))
logger.info("tokens: %s" % " ".join([str(x) for x in tokens]))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
logger.info(
"input_type_ids: %s" % " ".join([str(x) for x in input_type_ids]))
features.append(
InputFeatures(
unique_id=example.unique_id,
tokens=tokens,
input_ids=input_ids,
input_mask=input_mask,
input_type_ids=input_type_ids))
return features
|
python
|
def convert_examples_to_features(examples, seq_length, tokenizer):
"""Loads a data file into a list of `InputFeature`s."""
features = []
for (ex_index, example) in enumerate(examples):
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
if tokens_b:
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > seq_length - 2:
tokens_a = tokens_a[0:(seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambigiously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = []
input_type_ids = []
tokens.append("[CLS]")
input_type_ids.append(0)
for token in tokens_a:
tokens.append(token)
input_type_ids.append(0)
tokens.append("[SEP]")
input_type_ids.append(0)
if tokens_b:
for token in tokens_b:
tokens.append(token)
input_type_ids.append(1)
tokens.append("[SEP]")
input_type_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < seq_length:
input_ids.append(0)
input_mask.append(0)
input_type_ids.append(0)
assert len(input_ids) == seq_length
assert len(input_mask) == seq_length
assert len(input_type_ids) == seq_length
if ex_index < 5:
logger.info("*** Example ***")
logger.info("unique_id: %s" % (example.unique_id))
logger.info("tokens: %s" % " ".join([str(x) for x in tokens]))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
logger.info(
"input_type_ids: %s" % " ".join([str(x) for x in input_type_ids]))
features.append(
InputFeatures(
unique_id=example.unique_id,
tokens=tokens,
input_ids=input_ids,
input_mask=input_mask,
input_type_ids=input_type_ids))
return features
|
[
"def",
"convert_examples_to_features",
"(",
"examples",
",",
"seq_length",
",",
"tokenizer",
")",
":",
"features",
"=",
"[",
"]",
"for",
"(",
"ex_index",
",",
"example",
")",
"in",
"enumerate",
"(",
"examples",
")",
":",
"tokens_a",
"=",
"tokenizer",
".",
"tokenize",
"(",
"example",
".",
"text_a",
")",
"tokens_b",
"=",
"None",
"if",
"example",
".",
"text_b",
":",
"tokens_b",
"=",
"tokenizer",
".",
"tokenize",
"(",
"example",
".",
"text_b",
")",
"if",
"tokens_b",
":",
"# Modifies `tokens_a` and `tokens_b` in place so that the total",
"# length is less than the specified length.",
"# Account for [CLS], [SEP], [SEP] with \"- 3\"",
"_truncate_seq_pair",
"(",
"tokens_a",
",",
"tokens_b",
",",
"seq_length",
"-",
"3",
")",
"else",
":",
"# Account for [CLS] and [SEP] with \"- 2\"",
"if",
"len",
"(",
"tokens_a",
")",
">",
"seq_length",
"-",
"2",
":",
"tokens_a",
"=",
"tokens_a",
"[",
"0",
":",
"(",
"seq_length",
"-",
"2",
")",
"]",
"# The convention in BERT is:",
"# (a) For sequence pairs:",
"# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]",
"# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1",
"# (b) For single sequences:",
"# tokens: [CLS] the dog is hairy . [SEP]",
"# type_ids: 0 0 0 0 0 0 0",
"#",
"# Where \"type_ids\" are used to indicate whether this is the first",
"# sequence or the second sequence. The embedding vectors for `type=0` and",
"# `type=1` were learned during pre-training and are added to the wordpiece",
"# embedding vector (and position vector). This is not *strictly* necessary",
"# since the [SEP] token unambigiously separates the sequences, but it makes",
"# it easier for the model to learn the concept of sequences.",
"#",
"# For classification tasks, the first vector (corresponding to [CLS]) is",
"# used as as the \"sentence vector\". Note that this only makes sense because",
"# the entire model is fine-tuned.",
"tokens",
"=",
"[",
"]",
"input_type_ids",
"=",
"[",
"]",
"tokens",
".",
"append",
"(",
"\"[CLS]\"",
")",
"input_type_ids",
".",
"append",
"(",
"0",
")",
"for",
"token",
"in",
"tokens_a",
":",
"tokens",
".",
"append",
"(",
"token",
")",
"input_type_ids",
".",
"append",
"(",
"0",
")",
"tokens",
".",
"append",
"(",
"\"[SEP]\"",
")",
"input_type_ids",
".",
"append",
"(",
"0",
")",
"if",
"tokens_b",
":",
"for",
"token",
"in",
"tokens_b",
":",
"tokens",
".",
"append",
"(",
"token",
")",
"input_type_ids",
".",
"append",
"(",
"1",
")",
"tokens",
".",
"append",
"(",
"\"[SEP]\"",
")",
"input_type_ids",
".",
"append",
"(",
"1",
")",
"input_ids",
"=",
"tokenizer",
".",
"convert_tokens_to_ids",
"(",
"tokens",
")",
"# The mask has 1 for real tokens and 0 for padding tokens. Only real",
"# tokens are attended to.",
"input_mask",
"=",
"[",
"1",
"]",
"*",
"len",
"(",
"input_ids",
")",
"# Zero-pad up to the sequence length.",
"while",
"len",
"(",
"input_ids",
")",
"<",
"seq_length",
":",
"input_ids",
".",
"append",
"(",
"0",
")",
"input_mask",
".",
"append",
"(",
"0",
")",
"input_type_ids",
".",
"append",
"(",
"0",
")",
"assert",
"len",
"(",
"input_ids",
")",
"==",
"seq_length",
"assert",
"len",
"(",
"input_mask",
")",
"==",
"seq_length",
"assert",
"len",
"(",
"input_type_ids",
")",
"==",
"seq_length",
"if",
"ex_index",
"<",
"5",
":",
"logger",
".",
"info",
"(",
"\"*** Example ***\"",
")",
"logger",
".",
"info",
"(",
"\"unique_id: %s\"",
"%",
"(",
"example",
".",
"unique_id",
")",
")",
"logger",
".",
"info",
"(",
"\"tokens: %s\"",
"%",
"\" \"",
".",
"join",
"(",
"[",
"str",
"(",
"x",
")",
"for",
"x",
"in",
"tokens",
"]",
")",
")",
"logger",
".",
"info",
"(",
"\"input_ids: %s\"",
"%",
"\" \"",
".",
"join",
"(",
"[",
"str",
"(",
"x",
")",
"for",
"x",
"in",
"input_ids",
"]",
")",
")",
"logger",
".",
"info",
"(",
"\"input_mask: %s\"",
"%",
"\" \"",
".",
"join",
"(",
"[",
"str",
"(",
"x",
")",
"for",
"x",
"in",
"input_mask",
"]",
")",
")",
"logger",
".",
"info",
"(",
"\"input_type_ids: %s\"",
"%",
"\" \"",
".",
"join",
"(",
"[",
"str",
"(",
"x",
")",
"for",
"x",
"in",
"input_type_ids",
"]",
")",
")",
"features",
".",
"append",
"(",
"InputFeatures",
"(",
"unique_id",
"=",
"example",
".",
"unique_id",
",",
"tokens",
"=",
"tokens",
",",
"input_ids",
"=",
"input_ids",
",",
"input_mask",
"=",
"input_mask",
",",
"input_type_ids",
"=",
"input_type_ids",
")",
")",
"return",
"features"
] |
Loads a data file into a list of `InputFeature`s.
|
[
"Loads",
"a",
"data",
"file",
"into",
"a",
"list",
"of",
"InputFeature",
"s",
"."
] |
b832d5bb8a6dfc5965015b828e577677eace601e
|
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/examples/extract_features.py#L59-L147
|
19,364
|
huggingface/pytorch-pretrained-BERT
|
examples/extract_features.py
|
read_examples
|
def read_examples(input_file):
"""Read a list of `InputExample`s from an input file."""
examples = []
unique_id = 0
with open(input_file, "r", encoding='utf-8') as reader:
while True:
line = reader.readline()
if not line:
break
line = line.strip()
text_a = None
text_b = None
m = re.match(r"^(.*) \|\|\| (.*)$", line)
if m is None:
text_a = line
else:
text_a = m.group(1)
text_b = m.group(2)
examples.append(
InputExample(unique_id=unique_id, text_a=text_a, text_b=text_b))
unique_id += 1
return examples
|
python
|
def read_examples(input_file):
"""Read a list of `InputExample`s from an input file."""
examples = []
unique_id = 0
with open(input_file, "r", encoding='utf-8') as reader:
while True:
line = reader.readline()
if not line:
break
line = line.strip()
text_a = None
text_b = None
m = re.match(r"^(.*) \|\|\| (.*)$", line)
if m is None:
text_a = line
else:
text_a = m.group(1)
text_b = m.group(2)
examples.append(
InputExample(unique_id=unique_id, text_a=text_a, text_b=text_b))
unique_id += 1
return examples
|
[
"def",
"read_examples",
"(",
"input_file",
")",
":",
"examples",
"=",
"[",
"]",
"unique_id",
"=",
"0",
"with",
"open",
"(",
"input_file",
",",
"\"r\"",
",",
"encoding",
"=",
"'utf-8'",
")",
"as",
"reader",
":",
"while",
"True",
":",
"line",
"=",
"reader",
".",
"readline",
"(",
")",
"if",
"not",
"line",
":",
"break",
"line",
"=",
"line",
".",
"strip",
"(",
")",
"text_a",
"=",
"None",
"text_b",
"=",
"None",
"m",
"=",
"re",
".",
"match",
"(",
"r\"^(.*) \\|\\|\\| (.*)$\"",
",",
"line",
")",
"if",
"m",
"is",
"None",
":",
"text_a",
"=",
"line",
"else",
":",
"text_a",
"=",
"m",
".",
"group",
"(",
"1",
")",
"text_b",
"=",
"m",
".",
"group",
"(",
"2",
")",
"examples",
".",
"append",
"(",
"InputExample",
"(",
"unique_id",
"=",
"unique_id",
",",
"text_a",
"=",
"text_a",
",",
"text_b",
"=",
"text_b",
")",
")",
"unique_id",
"+=",
"1",
"return",
"examples"
] |
Read a list of `InputExample`s from an input file.
|
[
"Read",
"a",
"list",
"of",
"InputExample",
"s",
"from",
"an",
"input",
"file",
"."
] |
b832d5bb8a6dfc5965015b828e577677eace601e
|
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/examples/extract_features.py#L167-L188
|
19,365
|
huggingface/pytorch-pretrained-BERT
|
examples/run_squad.py
|
read_squad_examples
|
def read_squad_examples(input_file, is_training, version_2_with_negative):
"""Read a SQuAD json file into a list of SquadExample."""
with open(input_file, "r", encoding='utf-8') as reader:
input_data = json.load(reader)["data"]
def is_whitespace(c):
if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
return True
return False
examples = []
for entry in input_data:
for paragraph in entry["paragraphs"]:
paragraph_text = paragraph["context"]
doc_tokens = []
char_to_word_offset = []
prev_is_whitespace = True
for c in paragraph_text:
if is_whitespace(c):
prev_is_whitespace = True
else:
if prev_is_whitespace:
doc_tokens.append(c)
else:
doc_tokens[-1] += c
prev_is_whitespace = False
char_to_word_offset.append(len(doc_tokens) - 1)
for qa in paragraph["qas"]:
qas_id = qa["id"]
question_text = qa["question"]
start_position = None
end_position = None
orig_answer_text = None
is_impossible = False
if is_training:
if version_2_with_negative:
is_impossible = qa["is_impossible"]
if (len(qa["answers"]) != 1) and (not is_impossible):
raise ValueError(
"For training, each question should have exactly 1 answer.")
if not is_impossible:
answer = qa["answers"][0]
orig_answer_text = answer["text"]
answer_offset = answer["answer_start"]
answer_length = len(orig_answer_text)
start_position = char_to_word_offset[answer_offset]
end_position = char_to_word_offset[answer_offset + answer_length - 1]
# Only add answers where the text can be exactly recovered from the
# document. If this CAN'T happen it's likely due to weird Unicode
# stuff so we will just skip the example.
#
# Note that this means for training mode, every example is NOT
# guaranteed to be preserved.
actual_text = " ".join(doc_tokens[start_position:(end_position + 1)])
cleaned_answer_text = " ".join(
whitespace_tokenize(orig_answer_text))
if actual_text.find(cleaned_answer_text) == -1:
logger.warning("Could not find answer: '%s' vs. '%s'",
actual_text, cleaned_answer_text)
continue
else:
start_position = -1
end_position = -1
orig_answer_text = ""
example = SquadExample(
qas_id=qas_id,
question_text=question_text,
doc_tokens=doc_tokens,
orig_answer_text=orig_answer_text,
start_position=start_position,
end_position=end_position,
is_impossible=is_impossible)
examples.append(example)
return examples
|
python
|
def read_squad_examples(input_file, is_training, version_2_with_negative):
"""Read a SQuAD json file into a list of SquadExample."""
with open(input_file, "r", encoding='utf-8') as reader:
input_data = json.load(reader)["data"]
def is_whitespace(c):
if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
return True
return False
examples = []
for entry in input_data:
for paragraph in entry["paragraphs"]:
paragraph_text = paragraph["context"]
doc_tokens = []
char_to_word_offset = []
prev_is_whitespace = True
for c in paragraph_text:
if is_whitespace(c):
prev_is_whitespace = True
else:
if prev_is_whitespace:
doc_tokens.append(c)
else:
doc_tokens[-1] += c
prev_is_whitespace = False
char_to_word_offset.append(len(doc_tokens) - 1)
for qa in paragraph["qas"]:
qas_id = qa["id"]
question_text = qa["question"]
start_position = None
end_position = None
orig_answer_text = None
is_impossible = False
if is_training:
if version_2_with_negative:
is_impossible = qa["is_impossible"]
if (len(qa["answers"]) != 1) and (not is_impossible):
raise ValueError(
"For training, each question should have exactly 1 answer.")
if not is_impossible:
answer = qa["answers"][0]
orig_answer_text = answer["text"]
answer_offset = answer["answer_start"]
answer_length = len(orig_answer_text)
start_position = char_to_word_offset[answer_offset]
end_position = char_to_word_offset[answer_offset + answer_length - 1]
# Only add answers where the text can be exactly recovered from the
# document. If this CAN'T happen it's likely due to weird Unicode
# stuff so we will just skip the example.
#
# Note that this means for training mode, every example is NOT
# guaranteed to be preserved.
actual_text = " ".join(doc_tokens[start_position:(end_position + 1)])
cleaned_answer_text = " ".join(
whitespace_tokenize(orig_answer_text))
if actual_text.find(cleaned_answer_text) == -1:
logger.warning("Could not find answer: '%s' vs. '%s'",
actual_text, cleaned_answer_text)
continue
else:
start_position = -1
end_position = -1
orig_answer_text = ""
example = SquadExample(
qas_id=qas_id,
question_text=question_text,
doc_tokens=doc_tokens,
orig_answer_text=orig_answer_text,
start_position=start_position,
end_position=end_position,
is_impossible=is_impossible)
examples.append(example)
return examples
|
[
"def",
"read_squad_examples",
"(",
"input_file",
",",
"is_training",
",",
"version_2_with_negative",
")",
":",
"with",
"open",
"(",
"input_file",
",",
"\"r\"",
",",
"encoding",
"=",
"'utf-8'",
")",
"as",
"reader",
":",
"input_data",
"=",
"json",
".",
"load",
"(",
"reader",
")",
"[",
"\"data\"",
"]",
"def",
"is_whitespace",
"(",
"c",
")",
":",
"if",
"c",
"==",
"\" \"",
"or",
"c",
"==",
"\"\\t\"",
"or",
"c",
"==",
"\"\\r\"",
"or",
"c",
"==",
"\"\\n\"",
"or",
"ord",
"(",
"c",
")",
"==",
"0x202F",
":",
"return",
"True",
"return",
"False",
"examples",
"=",
"[",
"]",
"for",
"entry",
"in",
"input_data",
":",
"for",
"paragraph",
"in",
"entry",
"[",
"\"paragraphs\"",
"]",
":",
"paragraph_text",
"=",
"paragraph",
"[",
"\"context\"",
"]",
"doc_tokens",
"=",
"[",
"]",
"char_to_word_offset",
"=",
"[",
"]",
"prev_is_whitespace",
"=",
"True",
"for",
"c",
"in",
"paragraph_text",
":",
"if",
"is_whitespace",
"(",
"c",
")",
":",
"prev_is_whitespace",
"=",
"True",
"else",
":",
"if",
"prev_is_whitespace",
":",
"doc_tokens",
".",
"append",
"(",
"c",
")",
"else",
":",
"doc_tokens",
"[",
"-",
"1",
"]",
"+=",
"c",
"prev_is_whitespace",
"=",
"False",
"char_to_word_offset",
".",
"append",
"(",
"len",
"(",
"doc_tokens",
")",
"-",
"1",
")",
"for",
"qa",
"in",
"paragraph",
"[",
"\"qas\"",
"]",
":",
"qas_id",
"=",
"qa",
"[",
"\"id\"",
"]",
"question_text",
"=",
"qa",
"[",
"\"question\"",
"]",
"start_position",
"=",
"None",
"end_position",
"=",
"None",
"orig_answer_text",
"=",
"None",
"is_impossible",
"=",
"False",
"if",
"is_training",
":",
"if",
"version_2_with_negative",
":",
"is_impossible",
"=",
"qa",
"[",
"\"is_impossible\"",
"]",
"if",
"(",
"len",
"(",
"qa",
"[",
"\"answers\"",
"]",
")",
"!=",
"1",
")",
"and",
"(",
"not",
"is_impossible",
")",
":",
"raise",
"ValueError",
"(",
"\"For training, each question should have exactly 1 answer.\"",
")",
"if",
"not",
"is_impossible",
":",
"answer",
"=",
"qa",
"[",
"\"answers\"",
"]",
"[",
"0",
"]",
"orig_answer_text",
"=",
"answer",
"[",
"\"text\"",
"]",
"answer_offset",
"=",
"answer",
"[",
"\"answer_start\"",
"]",
"answer_length",
"=",
"len",
"(",
"orig_answer_text",
")",
"start_position",
"=",
"char_to_word_offset",
"[",
"answer_offset",
"]",
"end_position",
"=",
"char_to_word_offset",
"[",
"answer_offset",
"+",
"answer_length",
"-",
"1",
"]",
"# Only add answers where the text can be exactly recovered from the",
"# document. If this CAN'T happen it's likely due to weird Unicode",
"# stuff so we will just skip the example.",
"#",
"# Note that this means for training mode, every example is NOT",
"# guaranteed to be preserved.",
"actual_text",
"=",
"\" \"",
".",
"join",
"(",
"doc_tokens",
"[",
"start_position",
":",
"(",
"end_position",
"+",
"1",
")",
"]",
")",
"cleaned_answer_text",
"=",
"\" \"",
".",
"join",
"(",
"whitespace_tokenize",
"(",
"orig_answer_text",
")",
")",
"if",
"actual_text",
".",
"find",
"(",
"cleaned_answer_text",
")",
"==",
"-",
"1",
":",
"logger",
".",
"warning",
"(",
"\"Could not find answer: '%s' vs. '%s'\"",
",",
"actual_text",
",",
"cleaned_answer_text",
")",
"continue",
"else",
":",
"start_position",
"=",
"-",
"1",
"end_position",
"=",
"-",
"1",
"orig_answer_text",
"=",
"\"\"",
"example",
"=",
"SquadExample",
"(",
"qas_id",
"=",
"qas_id",
",",
"question_text",
"=",
"question_text",
",",
"doc_tokens",
"=",
"doc_tokens",
",",
"orig_answer_text",
"=",
"orig_answer_text",
",",
"start_position",
"=",
"start_position",
",",
"end_position",
"=",
"end_position",
",",
"is_impossible",
"=",
"is_impossible",
")",
"examples",
".",
"append",
"(",
"example",
")",
"return",
"examples"
] |
Read a SQuAD json file into a list of SquadExample.
|
[
"Read",
"a",
"SQuAD",
"json",
"file",
"into",
"a",
"list",
"of",
"SquadExample",
"."
] |
b832d5bb8a6dfc5965015b828e577677eace601e
|
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/examples/run_squad.py#L122-L197
|
19,366
|
huggingface/pytorch-pretrained-BERT
|
examples/run_squad.py
|
_improve_answer_span
|
def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer,
orig_answer_text):
"""Returns tokenized answer spans that better match the annotated answer."""
# The SQuAD annotations are character based. We first project them to
# whitespace-tokenized words. But then after WordPiece tokenization, we can
# often find a "better match". For example:
#
# Question: What year was John Smith born?
# Context: The leader was John Smith (1895-1943).
# Answer: 1895
#
# The original whitespace-tokenized answer will be "(1895-1943).". However
# after tokenization, our tokens will be "( 1895 - 1943 ) .". So we can match
# the exact answer, 1895.
#
# However, this is not always possible. Consider the following:
#
# Question: What country is the top exporter of electornics?
# Context: The Japanese electronics industry is the lagest in the world.
# Answer: Japan
#
# In this case, the annotator chose "Japan" as a character sub-span of
# the word "Japanese". Since our WordPiece tokenizer does not split
# "Japanese", we just use "Japanese" as the annotation. This is fairly rare
# in SQuAD, but does happen.
tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text))
for new_start in range(input_start, input_end + 1):
for new_end in range(input_end, new_start - 1, -1):
text_span = " ".join(doc_tokens[new_start:(new_end + 1)])
if text_span == tok_answer_text:
return (new_start, new_end)
return (input_start, input_end)
|
python
|
def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer,
orig_answer_text):
"""Returns tokenized answer spans that better match the annotated answer."""
# The SQuAD annotations are character based. We first project them to
# whitespace-tokenized words. But then after WordPiece tokenization, we can
# often find a "better match". For example:
#
# Question: What year was John Smith born?
# Context: The leader was John Smith (1895-1943).
# Answer: 1895
#
# The original whitespace-tokenized answer will be "(1895-1943).". However
# after tokenization, our tokens will be "( 1895 - 1943 ) .". So we can match
# the exact answer, 1895.
#
# However, this is not always possible. Consider the following:
#
# Question: What country is the top exporter of electornics?
# Context: The Japanese electronics industry is the lagest in the world.
# Answer: Japan
#
# In this case, the annotator chose "Japan" as a character sub-span of
# the word "Japanese". Since our WordPiece tokenizer does not split
# "Japanese", we just use "Japanese" as the annotation. This is fairly rare
# in SQuAD, but does happen.
tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text))
for new_start in range(input_start, input_end + 1):
for new_end in range(input_end, new_start - 1, -1):
text_span = " ".join(doc_tokens[new_start:(new_end + 1)])
if text_span == tok_answer_text:
return (new_start, new_end)
return (input_start, input_end)
|
[
"def",
"_improve_answer_span",
"(",
"doc_tokens",
",",
"input_start",
",",
"input_end",
",",
"tokenizer",
",",
"orig_answer_text",
")",
":",
"# The SQuAD annotations are character based. We first project them to",
"# whitespace-tokenized words. But then after WordPiece tokenization, we can",
"# often find a \"better match\". For example:",
"#",
"# Question: What year was John Smith born?",
"# Context: The leader was John Smith (1895-1943).",
"# Answer: 1895",
"#",
"# The original whitespace-tokenized answer will be \"(1895-1943).\". However",
"# after tokenization, our tokens will be \"( 1895 - 1943 ) .\". So we can match",
"# the exact answer, 1895.",
"#",
"# However, this is not always possible. Consider the following:",
"#",
"# Question: What country is the top exporter of electornics?",
"# Context: The Japanese electronics industry is the lagest in the world.",
"# Answer: Japan",
"#",
"# In this case, the annotator chose \"Japan\" as a character sub-span of",
"# the word \"Japanese\". Since our WordPiece tokenizer does not split",
"# \"Japanese\", we just use \"Japanese\" as the annotation. This is fairly rare",
"# in SQuAD, but does happen.",
"tok_answer_text",
"=",
"\" \"",
".",
"join",
"(",
"tokenizer",
".",
"tokenize",
"(",
"orig_answer_text",
")",
")",
"for",
"new_start",
"in",
"range",
"(",
"input_start",
",",
"input_end",
"+",
"1",
")",
":",
"for",
"new_end",
"in",
"range",
"(",
"input_end",
",",
"new_start",
"-",
"1",
",",
"-",
"1",
")",
":",
"text_span",
"=",
"\" \"",
".",
"join",
"(",
"doc_tokens",
"[",
"new_start",
":",
"(",
"new_end",
"+",
"1",
")",
"]",
")",
"if",
"text_span",
"==",
"tok_answer_text",
":",
"return",
"(",
"new_start",
",",
"new_end",
")",
"return",
"(",
"input_start",
",",
"input_end",
")"
] |
Returns tokenized answer spans that better match the annotated answer.
|
[
"Returns",
"tokenized",
"answer",
"spans",
"that",
"better",
"match",
"the",
"annotated",
"answer",
"."
] |
b832d5bb8a6dfc5965015b828e577677eace601e
|
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/examples/run_squad.py#L363-L397
|
19,367
|
huggingface/pytorch-pretrained-BERT
|
examples/run_squad.py
|
_check_is_max_context
|
def _check_is_max_context(doc_spans, cur_span_index, position):
"""Check if this is the 'max context' doc span for the token."""
# Because of the sliding window approach taken to scoring documents, a single
# token can appear in multiple documents. E.g.
# Doc: the man went to the store and bought a gallon of milk
# Span A: the man went to the
# Span B: to the store and bought
# Span C: and bought a gallon of
# ...
#
# Now the word 'bought' will have two scores from spans B and C. We only
# want to consider the score with "maximum context", which we define as
# the *minimum* of its left and right context (the *sum* of left and
# right context will always be the same, of course).
#
# In the example the maximum context for 'bought' would be span C since
# it has 1 left context and 3 right context, while span B has 4 left context
# and 0 right context.
best_score = None
best_span_index = None
for (span_index, doc_span) in enumerate(doc_spans):
end = doc_span.start + doc_span.length - 1
if position < doc_span.start:
continue
if position > end:
continue
num_left_context = position - doc_span.start
num_right_context = end - position
score = min(num_left_context, num_right_context) + 0.01 * doc_span.length
if best_score is None or score > best_score:
best_score = score
best_span_index = span_index
return cur_span_index == best_span_index
|
python
|
def _check_is_max_context(doc_spans, cur_span_index, position):
"""Check if this is the 'max context' doc span for the token."""
# Because of the sliding window approach taken to scoring documents, a single
# token can appear in multiple documents. E.g.
# Doc: the man went to the store and bought a gallon of milk
# Span A: the man went to the
# Span B: to the store and bought
# Span C: and bought a gallon of
# ...
#
# Now the word 'bought' will have two scores from spans B and C. We only
# want to consider the score with "maximum context", which we define as
# the *minimum* of its left and right context (the *sum* of left and
# right context will always be the same, of course).
#
# In the example the maximum context for 'bought' would be span C since
# it has 1 left context and 3 right context, while span B has 4 left context
# and 0 right context.
best_score = None
best_span_index = None
for (span_index, doc_span) in enumerate(doc_spans):
end = doc_span.start + doc_span.length - 1
if position < doc_span.start:
continue
if position > end:
continue
num_left_context = position - doc_span.start
num_right_context = end - position
score = min(num_left_context, num_right_context) + 0.01 * doc_span.length
if best_score is None or score > best_score:
best_score = score
best_span_index = span_index
return cur_span_index == best_span_index
|
[
"def",
"_check_is_max_context",
"(",
"doc_spans",
",",
"cur_span_index",
",",
"position",
")",
":",
"# Because of the sliding window approach taken to scoring documents, a single",
"# token can appear in multiple documents. E.g.",
"# Doc: the man went to the store and bought a gallon of milk",
"# Span A: the man went to the",
"# Span B: to the store and bought",
"# Span C: and bought a gallon of",
"# ...",
"#",
"# Now the word 'bought' will have two scores from spans B and C. We only",
"# want to consider the score with \"maximum context\", which we define as",
"# the *minimum* of its left and right context (the *sum* of left and",
"# right context will always be the same, of course).",
"#",
"# In the example the maximum context for 'bought' would be span C since",
"# it has 1 left context and 3 right context, while span B has 4 left context",
"# and 0 right context.",
"best_score",
"=",
"None",
"best_span_index",
"=",
"None",
"for",
"(",
"span_index",
",",
"doc_span",
")",
"in",
"enumerate",
"(",
"doc_spans",
")",
":",
"end",
"=",
"doc_span",
".",
"start",
"+",
"doc_span",
".",
"length",
"-",
"1",
"if",
"position",
"<",
"doc_span",
".",
"start",
":",
"continue",
"if",
"position",
">",
"end",
":",
"continue",
"num_left_context",
"=",
"position",
"-",
"doc_span",
".",
"start",
"num_right_context",
"=",
"end",
"-",
"position",
"score",
"=",
"min",
"(",
"num_left_context",
",",
"num_right_context",
")",
"+",
"0.01",
"*",
"doc_span",
".",
"length",
"if",
"best_score",
"is",
"None",
"or",
"score",
">",
"best_score",
":",
"best_score",
"=",
"score",
"best_span_index",
"=",
"span_index",
"return",
"cur_span_index",
"==",
"best_span_index"
] |
Check if this is the 'max context' doc span for the token.
|
[
"Check",
"if",
"this",
"is",
"the",
"max",
"context",
"doc",
"span",
"for",
"the",
"token",
"."
] |
b832d5bb8a6dfc5965015b828e577677eace601e
|
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/examples/run_squad.py#L400-L434
|
19,368
|
huggingface/pytorch-pretrained-BERT
|
examples/run_squad.py
|
_get_best_indexes
|
def _get_best_indexes(logits, n_best_size):
"""Get the n-best logits from a list."""
index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)
best_indexes = []
for i in range(len(index_and_score)):
if i >= n_best_size:
break
best_indexes.append(index_and_score[i][0])
return best_indexes
|
python
|
def _get_best_indexes(logits, n_best_size):
"""Get the n-best logits from a list."""
index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)
best_indexes = []
for i in range(len(index_and_score)):
if i >= n_best_size:
break
best_indexes.append(index_and_score[i][0])
return best_indexes
|
[
"def",
"_get_best_indexes",
"(",
"logits",
",",
"n_best_size",
")",
":",
"index_and_score",
"=",
"sorted",
"(",
"enumerate",
"(",
"logits",
")",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"1",
"]",
",",
"reverse",
"=",
"True",
")",
"best_indexes",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"index_and_score",
")",
")",
":",
"if",
"i",
">=",
"n_best_size",
":",
"break",
"best_indexes",
".",
"append",
"(",
"index_and_score",
"[",
"i",
"]",
"[",
"0",
"]",
")",
"return",
"best_indexes"
] |
Get the n-best logits from a list.
|
[
"Get",
"the",
"n",
"-",
"best",
"logits",
"from",
"a",
"list",
"."
] |
b832d5bb8a6dfc5965015b828e577677eace601e
|
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/examples/run_squad.py#L729-L738
|
19,369
|
huggingface/pytorch-pretrained-BERT
|
examples/run_squad.py
|
_compute_softmax
|
def _compute_softmax(scores):
"""Compute softmax probability over raw logits."""
if not scores:
return []
max_score = None
for score in scores:
if max_score is None or score > max_score:
max_score = score
exp_scores = []
total_sum = 0.0
for score in scores:
x = math.exp(score - max_score)
exp_scores.append(x)
total_sum += x
probs = []
for score in exp_scores:
probs.append(score / total_sum)
return probs
|
python
|
def _compute_softmax(scores):
"""Compute softmax probability over raw logits."""
if not scores:
return []
max_score = None
for score in scores:
if max_score is None or score > max_score:
max_score = score
exp_scores = []
total_sum = 0.0
for score in scores:
x = math.exp(score - max_score)
exp_scores.append(x)
total_sum += x
probs = []
for score in exp_scores:
probs.append(score / total_sum)
return probs
|
[
"def",
"_compute_softmax",
"(",
"scores",
")",
":",
"if",
"not",
"scores",
":",
"return",
"[",
"]",
"max_score",
"=",
"None",
"for",
"score",
"in",
"scores",
":",
"if",
"max_score",
"is",
"None",
"or",
"score",
">",
"max_score",
":",
"max_score",
"=",
"score",
"exp_scores",
"=",
"[",
"]",
"total_sum",
"=",
"0.0",
"for",
"score",
"in",
"scores",
":",
"x",
"=",
"math",
".",
"exp",
"(",
"score",
"-",
"max_score",
")",
"exp_scores",
".",
"append",
"(",
"x",
")",
"total_sum",
"+=",
"x",
"probs",
"=",
"[",
"]",
"for",
"score",
"in",
"exp_scores",
":",
"probs",
".",
"append",
"(",
"score",
"/",
"total_sum",
")",
"return",
"probs"
] |
Compute softmax probability over raw logits.
|
[
"Compute",
"softmax",
"probability",
"over",
"raw",
"logits",
"."
] |
b832d5bb8a6dfc5965015b828e577677eace601e
|
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/examples/run_squad.py#L741-L761
|
19,370
|
huggingface/pytorch-pretrained-BERT
|
examples/run_classifier.py
|
DataProcessor._read_tsv
|
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, "r", encoding="utf-8") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
if sys.version_info[0] == 2:
line = list(unicode(cell, 'utf-8') for cell in line)
lines.append(line)
return lines
|
python
|
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, "r", encoding="utf-8") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
if sys.version_info[0] == 2:
line = list(unicode(cell, 'utf-8') for cell in line)
lines.append(line)
return lines
|
[
"def",
"_read_tsv",
"(",
"cls",
",",
"input_file",
",",
"quotechar",
"=",
"None",
")",
":",
"with",
"open",
"(",
"input_file",
",",
"\"r\"",
",",
"encoding",
"=",
"\"utf-8\"",
")",
"as",
"f",
":",
"reader",
"=",
"csv",
".",
"reader",
"(",
"f",
",",
"delimiter",
"=",
"\"\\t\"",
",",
"quotechar",
"=",
"quotechar",
")",
"lines",
"=",
"[",
"]",
"for",
"line",
"in",
"reader",
":",
"if",
"sys",
".",
"version_info",
"[",
"0",
"]",
"==",
"2",
":",
"line",
"=",
"list",
"(",
"unicode",
"(",
"cell",
",",
"'utf-8'",
")",
"for",
"cell",
"in",
"line",
")",
"lines",
".",
"append",
"(",
"line",
")",
"return",
"lines"
] |
Reads a tab separated value file.
|
[
"Reads",
"a",
"tab",
"separated",
"value",
"file",
"."
] |
b832d5bb8a6dfc5965015b828e577677eace601e
|
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/examples/run_classifier.py#L93-L102
|
19,371
|
huggingface/pytorch-pretrained-BERT
|
examples/run_classifier.py
|
MrpcProcessor._create_examples
|
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = line[3]
text_b = line[4]
label = line[0]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
|
python
|
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = line[3]
text_b = line[4]
label = line[0]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
|
[
"def",
"_create_examples",
"(",
"self",
",",
"lines",
",",
"set_type",
")",
":",
"examples",
"=",
"[",
"]",
"for",
"(",
"i",
",",
"line",
")",
"in",
"enumerate",
"(",
"lines",
")",
":",
"if",
"i",
"==",
"0",
":",
"continue",
"guid",
"=",
"\"%s-%s\"",
"%",
"(",
"set_type",
",",
"i",
")",
"text_a",
"=",
"line",
"[",
"3",
"]",
"text_b",
"=",
"line",
"[",
"4",
"]",
"label",
"=",
"line",
"[",
"0",
"]",
"examples",
".",
"append",
"(",
"InputExample",
"(",
"guid",
"=",
"guid",
",",
"text_a",
"=",
"text_a",
",",
"text_b",
"=",
"text_b",
",",
"label",
"=",
"label",
")",
")",
"return",
"examples"
] |
Creates examples for the training and dev sets.
|
[
"Creates",
"examples",
"for",
"the",
"training",
"and",
"dev",
"sets",
"."
] |
b832d5bb8a6dfc5965015b828e577677eace601e
|
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/examples/run_classifier.py#L123-L135
|
19,372
|
huggingface/pytorch-pretrained-BERT
|
pytorch_pretrained_bert/modeling_openai.py
|
OpenAIGPTConfig.from_dict
|
def from_dict(cls, json_object):
"""Constructs a `OpenAIGPTConfig` from a Python dictionary of parameters."""
config = OpenAIGPTConfig(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
|
python
|
def from_dict(cls, json_object):
"""Constructs a `OpenAIGPTConfig` from a Python dictionary of parameters."""
config = OpenAIGPTConfig(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
|
[
"def",
"from_dict",
"(",
"cls",
",",
"json_object",
")",
":",
"config",
"=",
"OpenAIGPTConfig",
"(",
"vocab_size_or_config_json_file",
"=",
"-",
"1",
")",
"for",
"key",
",",
"value",
"in",
"json_object",
".",
"items",
"(",
")",
":",
"config",
".",
"__dict__",
"[",
"key",
"]",
"=",
"value",
"return",
"config"
] |
Constructs a `OpenAIGPTConfig` from a Python dictionary of parameters.
|
[
"Constructs",
"a",
"OpenAIGPTConfig",
"from",
"a",
"Python",
"dictionary",
"of",
"parameters",
"."
] |
b832d5bb8a6dfc5965015b828e577677eace601e
|
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/pytorch_pretrained_bert/modeling_openai.py#L200-L205
|
19,373
|
huggingface/pytorch-pretrained-BERT
|
pytorch_pretrained_bert/modeling_openai.py
|
OpenAIGPTModel.set_num_special_tokens
|
def set_num_special_tokens(self, num_special_tokens):
" Update input embeddings with new embedding matrice if needed "
if self.config.n_special == num_special_tokens:
return
# Update config
self.config.n_special = num_special_tokens
# Build new embeddings and initialize all new embeddings (in particular the special tokens)
old_embed = self.tokens_embed
self.tokens_embed = nn.Embedding(self.config.total_tokens_embeddings, self.config.n_embd)
self.tokens_embed.to(old_embed.weight.device)
self.init_weights(self.tokens_embed)
# Copy word embeddings from the previous weights
self.tokens_embed.weight.data[:self.config.vocab_size, :] = old_embed.weight.data[:self.config.vocab_size, :]
|
python
|
def set_num_special_tokens(self, num_special_tokens):
" Update input embeddings with new embedding matrice if needed "
if self.config.n_special == num_special_tokens:
return
# Update config
self.config.n_special = num_special_tokens
# Build new embeddings and initialize all new embeddings (in particular the special tokens)
old_embed = self.tokens_embed
self.tokens_embed = nn.Embedding(self.config.total_tokens_embeddings, self.config.n_embd)
self.tokens_embed.to(old_embed.weight.device)
self.init_weights(self.tokens_embed)
# Copy word embeddings from the previous weights
self.tokens_embed.weight.data[:self.config.vocab_size, :] = old_embed.weight.data[:self.config.vocab_size, :]
|
[
"def",
"set_num_special_tokens",
"(",
"self",
",",
"num_special_tokens",
")",
":",
"if",
"self",
".",
"config",
".",
"n_special",
"==",
"num_special_tokens",
":",
"return",
"# Update config",
"self",
".",
"config",
".",
"n_special",
"=",
"num_special_tokens",
"# Build new embeddings and initialize all new embeddings (in particular the special tokens)",
"old_embed",
"=",
"self",
".",
"tokens_embed",
"self",
".",
"tokens_embed",
"=",
"nn",
".",
"Embedding",
"(",
"self",
".",
"config",
".",
"total_tokens_embeddings",
",",
"self",
".",
"config",
".",
"n_embd",
")",
"self",
".",
"tokens_embed",
".",
"to",
"(",
"old_embed",
".",
"weight",
".",
"device",
")",
"self",
".",
"init_weights",
"(",
"self",
".",
"tokens_embed",
")",
"# Copy word embeddings from the previous weights",
"self",
".",
"tokens_embed",
".",
"weight",
".",
"data",
"[",
":",
"self",
".",
"config",
".",
"vocab_size",
",",
":",
"]",
"=",
"old_embed",
".",
"weight",
".",
"data",
"[",
":",
"self",
".",
"config",
".",
"vocab_size",
",",
":",
"]"
] |
Update input embeddings with new embedding matrice if needed
|
[
"Update",
"input",
"embeddings",
"with",
"new",
"embedding",
"matrice",
"if",
"needed"
] |
b832d5bb8a6dfc5965015b828e577677eace601e
|
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/pytorch_pretrained_bert/modeling_openai.py#L605-L617
|
19,374
|
huggingface/pytorch-pretrained-BERT
|
pytorch_pretrained_bert/modeling_openai.py
|
OpenAIGPTLMHeadModel.set_num_special_tokens
|
def set_num_special_tokens(self, num_special_tokens):
""" Update input and output embeddings with new embedding matrice
Make sure we are sharing the embeddings
"""
self.transformer.set_num_special_tokens(num_special_tokens)
self.lm_head.set_embeddings_weights(self.transformer.tokens_embed.weight)
|
python
|
def set_num_special_tokens(self, num_special_tokens):
""" Update input and output embeddings with new embedding matrice
Make sure we are sharing the embeddings
"""
self.transformer.set_num_special_tokens(num_special_tokens)
self.lm_head.set_embeddings_weights(self.transformer.tokens_embed.weight)
|
[
"def",
"set_num_special_tokens",
"(",
"self",
",",
"num_special_tokens",
")",
":",
"self",
".",
"transformer",
".",
"set_num_special_tokens",
"(",
"num_special_tokens",
")",
"self",
".",
"lm_head",
".",
"set_embeddings_weights",
"(",
"self",
".",
"transformer",
".",
"tokens_embed",
".",
"weight",
")"
] |
Update input and output embeddings with new embedding matrice
Make sure we are sharing the embeddings
|
[
"Update",
"input",
"and",
"output",
"embeddings",
"with",
"new",
"embedding",
"matrice",
"Make",
"sure",
"we",
"are",
"sharing",
"the",
"embeddings"
] |
b832d5bb8a6dfc5965015b828e577677eace601e
|
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/pytorch_pretrained_bert/modeling_openai.py#L710-L715
|
19,375
|
huggingface/pytorch-pretrained-BERT
|
pytorch_pretrained_bert/tokenization.py
|
BertTokenizer.convert_tokens_to_ids
|
def convert_tokens_to_ids(self, tokens):
"""Converts a sequence of tokens into ids using the vocab."""
ids = []
for token in tokens:
ids.append(self.vocab[token])
if len(ids) > self.max_len:
logger.warning(
"Token indices sequence length is longer than the specified maximum "
" sequence length for this BERT model ({} > {}). Running this"
" sequence through BERT will result in indexing errors".format(len(ids), self.max_len)
)
return ids
|
python
|
def convert_tokens_to_ids(self, tokens):
"""Converts a sequence of tokens into ids using the vocab."""
ids = []
for token in tokens:
ids.append(self.vocab[token])
if len(ids) > self.max_len:
logger.warning(
"Token indices sequence length is longer than the specified maximum "
" sequence length for this BERT model ({} > {}). Running this"
" sequence through BERT will result in indexing errors".format(len(ids), self.max_len)
)
return ids
|
[
"def",
"convert_tokens_to_ids",
"(",
"self",
",",
"tokens",
")",
":",
"ids",
"=",
"[",
"]",
"for",
"token",
"in",
"tokens",
":",
"ids",
".",
"append",
"(",
"self",
".",
"vocab",
"[",
"token",
"]",
")",
"if",
"len",
"(",
"ids",
")",
">",
"self",
".",
"max_len",
":",
"logger",
".",
"warning",
"(",
"\"Token indices sequence length is longer than the specified maximum \"",
"\" sequence length for this BERT model ({} > {}). Running this\"",
"\" sequence through BERT will result in indexing errors\"",
".",
"format",
"(",
"len",
"(",
"ids",
")",
",",
"self",
".",
"max_len",
")",
")",
"return",
"ids"
] |
Converts a sequence of tokens into ids using the vocab.
|
[
"Converts",
"a",
"sequence",
"of",
"tokens",
"into",
"ids",
"using",
"the",
"vocab",
"."
] |
b832d5bb8a6dfc5965015b828e577677eace601e
|
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/pytorch_pretrained_bert/tokenization.py#L117-L128
|
19,376
|
huggingface/pytorch-pretrained-BERT
|
pytorch_pretrained_bert/tokenization.py
|
BertTokenizer.convert_ids_to_tokens
|
def convert_ids_to_tokens(self, ids):
"""Converts a sequence of ids in wordpiece tokens using the vocab."""
tokens = []
for i in ids:
tokens.append(self.ids_to_tokens[i])
return tokens
|
python
|
def convert_ids_to_tokens(self, ids):
"""Converts a sequence of ids in wordpiece tokens using the vocab."""
tokens = []
for i in ids:
tokens.append(self.ids_to_tokens[i])
return tokens
|
[
"def",
"convert_ids_to_tokens",
"(",
"self",
",",
"ids",
")",
":",
"tokens",
"=",
"[",
"]",
"for",
"i",
"in",
"ids",
":",
"tokens",
".",
"append",
"(",
"self",
".",
"ids_to_tokens",
"[",
"i",
"]",
")",
"return",
"tokens"
] |
Converts a sequence of ids in wordpiece tokens using the vocab.
|
[
"Converts",
"a",
"sequence",
"of",
"ids",
"in",
"wordpiece",
"tokens",
"using",
"the",
"vocab",
"."
] |
b832d5bb8a6dfc5965015b828e577677eace601e
|
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/pytorch_pretrained_bert/tokenization.py#L130-L135
|
19,377
|
huggingface/pytorch-pretrained-BERT
|
pytorch_pretrained_bert/tokenization.py
|
BertTokenizer.save_vocabulary
|
def save_vocabulary(self, vocab_path):
"""Save the tokenizer vocabulary to a directory or file."""
index = 0
if os.path.isdir(vocab_path):
vocab_file = os.path.join(vocab_path, VOCAB_NAME)
with open(vocab_file, "w", encoding="utf-8") as writer:
for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
if index != token_index:
logger.warning("Saving vocabulary to {}: vocabulary indices are not consecutive."
" Please check that the vocabulary is not corrupted!".format(vocab_file))
index = token_index
writer.write(token + u'\n')
index += 1
return vocab_file
|
python
|
def save_vocabulary(self, vocab_path):
"""Save the tokenizer vocabulary to a directory or file."""
index = 0
if os.path.isdir(vocab_path):
vocab_file = os.path.join(vocab_path, VOCAB_NAME)
with open(vocab_file, "w", encoding="utf-8") as writer:
for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
if index != token_index:
logger.warning("Saving vocabulary to {}: vocabulary indices are not consecutive."
" Please check that the vocabulary is not corrupted!".format(vocab_file))
index = token_index
writer.write(token + u'\n')
index += 1
return vocab_file
|
[
"def",
"save_vocabulary",
"(",
"self",
",",
"vocab_path",
")",
":",
"index",
"=",
"0",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"vocab_path",
")",
":",
"vocab_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"vocab_path",
",",
"VOCAB_NAME",
")",
"with",
"open",
"(",
"vocab_file",
",",
"\"w\"",
",",
"encoding",
"=",
"\"utf-8\"",
")",
"as",
"writer",
":",
"for",
"token",
",",
"token_index",
"in",
"sorted",
"(",
"self",
".",
"vocab",
".",
"items",
"(",
")",
",",
"key",
"=",
"lambda",
"kv",
":",
"kv",
"[",
"1",
"]",
")",
":",
"if",
"index",
"!=",
"token_index",
":",
"logger",
".",
"warning",
"(",
"\"Saving vocabulary to {}: vocabulary indices are not consecutive.\"",
"\" Please check that the vocabulary is not corrupted!\"",
".",
"format",
"(",
"vocab_file",
")",
")",
"index",
"=",
"token_index",
"writer",
".",
"write",
"(",
"token",
"+",
"u'\\n'",
")",
"index",
"+=",
"1",
"return",
"vocab_file"
] |
Save the tokenizer vocabulary to a directory or file.
|
[
"Save",
"the",
"tokenizer",
"vocabulary",
"to",
"a",
"directory",
"or",
"file",
"."
] |
b832d5bb8a6dfc5965015b828e577677eace601e
|
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/pytorch_pretrained_bert/tokenization.py#L137-L150
|
19,378
|
huggingface/pytorch-pretrained-BERT
|
pytorch_pretrained_bert/tokenization.py
|
BertTokenizer.from_pretrained
|
def from_pretrained(cls, pretrained_model_name_or_path, cache_dir=None, *inputs, **kwargs):
"""
Instantiate a PreTrainedBertModel from a pre-trained model file.
Download and cache the pre-trained model file if needed.
"""
if pretrained_model_name_or_path in PRETRAINED_VOCAB_ARCHIVE_MAP:
vocab_file = PRETRAINED_VOCAB_ARCHIVE_MAP[pretrained_model_name_or_path]
if '-cased' in pretrained_model_name_or_path and kwargs.get('do_lower_case', True):
logger.warning("The pre-trained model you are loading is a cased model but you have not set "
"`do_lower_case` to False. We are setting `do_lower_case=False` for you but "
"you may want to check this behavior.")
kwargs['do_lower_case'] = False
elif '-cased' not in pretrained_model_name_or_path and not kwargs.get('do_lower_case', True):
logger.warning("The pre-trained model you are loading is an uncased model but you have set "
"`do_lower_case` to False. We are setting `do_lower_case=True` for you "
"but you may want to check this behavior.")
kwargs['do_lower_case'] = True
else:
vocab_file = pretrained_model_name_or_path
if os.path.isdir(vocab_file):
vocab_file = os.path.join(vocab_file, VOCAB_NAME)
# redirect to the cache, if necessary
try:
resolved_vocab_file = cached_path(vocab_file, cache_dir=cache_dir)
except EnvironmentError:
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find any file "
"associated to this path or url.".format(
pretrained_model_name_or_path,
', '.join(PRETRAINED_VOCAB_ARCHIVE_MAP.keys()),
vocab_file))
return None
if resolved_vocab_file == vocab_file:
logger.info("loading vocabulary file {}".format(vocab_file))
else:
logger.info("loading vocabulary file {} from cache at {}".format(
vocab_file, resolved_vocab_file))
if pretrained_model_name_or_path in PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP:
# if we're using a pretrained model, ensure the tokenizer wont index sequences longer
# than the number of positional embeddings
max_len = PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP[pretrained_model_name_or_path]
kwargs['max_len'] = min(kwargs.get('max_len', int(1e12)), max_len)
# Instantiate tokenizer.
tokenizer = cls(resolved_vocab_file, *inputs, **kwargs)
return tokenizer
|
python
|
def from_pretrained(cls, pretrained_model_name_or_path, cache_dir=None, *inputs, **kwargs):
"""
Instantiate a PreTrainedBertModel from a pre-trained model file.
Download and cache the pre-trained model file if needed.
"""
if pretrained_model_name_or_path in PRETRAINED_VOCAB_ARCHIVE_MAP:
vocab_file = PRETRAINED_VOCAB_ARCHIVE_MAP[pretrained_model_name_or_path]
if '-cased' in pretrained_model_name_or_path and kwargs.get('do_lower_case', True):
logger.warning("The pre-trained model you are loading is a cased model but you have not set "
"`do_lower_case` to False. We are setting `do_lower_case=False` for you but "
"you may want to check this behavior.")
kwargs['do_lower_case'] = False
elif '-cased' not in pretrained_model_name_or_path and not kwargs.get('do_lower_case', True):
logger.warning("The pre-trained model you are loading is an uncased model but you have set "
"`do_lower_case` to False. We are setting `do_lower_case=True` for you "
"but you may want to check this behavior.")
kwargs['do_lower_case'] = True
else:
vocab_file = pretrained_model_name_or_path
if os.path.isdir(vocab_file):
vocab_file = os.path.join(vocab_file, VOCAB_NAME)
# redirect to the cache, if necessary
try:
resolved_vocab_file = cached_path(vocab_file, cache_dir=cache_dir)
except EnvironmentError:
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find any file "
"associated to this path or url.".format(
pretrained_model_name_or_path,
', '.join(PRETRAINED_VOCAB_ARCHIVE_MAP.keys()),
vocab_file))
return None
if resolved_vocab_file == vocab_file:
logger.info("loading vocabulary file {}".format(vocab_file))
else:
logger.info("loading vocabulary file {} from cache at {}".format(
vocab_file, resolved_vocab_file))
if pretrained_model_name_or_path in PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP:
# if we're using a pretrained model, ensure the tokenizer wont index sequences longer
# than the number of positional embeddings
max_len = PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP[pretrained_model_name_or_path]
kwargs['max_len'] = min(kwargs.get('max_len', int(1e12)), max_len)
# Instantiate tokenizer.
tokenizer = cls(resolved_vocab_file, *inputs, **kwargs)
return tokenizer
|
[
"def",
"from_pretrained",
"(",
"cls",
",",
"pretrained_model_name_or_path",
",",
"cache_dir",
"=",
"None",
",",
"*",
"inputs",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"pretrained_model_name_or_path",
"in",
"PRETRAINED_VOCAB_ARCHIVE_MAP",
":",
"vocab_file",
"=",
"PRETRAINED_VOCAB_ARCHIVE_MAP",
"[",
"pretrained_model_name_or_path",
"]",
"if",
"'-cased'",
"in",
"pretrained_model_name_or_path",
"and",
"kwargs",
".",
"get",
"(",
"'do_lower_case'",
",",
"True",
")",
":",
"logger",
".",
"warning",
"(",
"\"The pre-trained model you are loading is a cased model but you have not set \"",
"\"`do_lower_case` to False. We are setting `do_lower_case=False` for you but \"",
"\"you may want to check this behavior.\"",
")",
"kwargs",
"[",
"'do_lower_case'",
"]",
"=",
"False",
"elif",
"'-cased'",
"not",
"in",
"pretrained_model_name_or_path",
"and",
"not",
"kwargs",
".",
"get",
"(",
"'do_lower_case'",
",",
"True",
")",
":",
"logger",
".",
"warning",
"(",
"\"The pre-trained model you are loading is an uncased model but you have set \"",
"\"`do_lower_case` to False. We are setting `do_lower_case=True` for you \"",
"\"but you may want to check this behavior.\"",
")",
"kwargs",
"[",
"'do_lower_case'",
"]",
"=",
"True",
"else",
":",
"vocab_file",
"=",
"pretrained_model_name_or_path",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"vocab_file",
")",
":",
"vocab_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"vocab_file",
",",
"VOCAB_NAME",
")",
"# redirect to the cache, if necessary",
"try",
":",
"resolved_vocab_file",
"=",
"cached_path",
"(",
"vocab_file",
",",
"cache_dir",
"=",
"cache_dir",
")",
"except",
"EnvironmentError",
":",
"logger",
".",
"error",
"(",
"\"Model name '{}' was not found in model name list ({}). \"",
"\"We assumed '{}' was a path or url but couldn't find any file \"",
"\"associated to this path or url.\"",
".",
"format",
"(",
"pretrained_model_name_or_path",
",",
"', '",
".",
"join",
"(",
"PRETRAINED_VOCAB_ARCHIVE_MAP",
".",
"keys",
"(",
")",
")",
",",
"vocab_file",
")",
")",
"return",
"None",
"if",
"resolved_vocab_file",
"==",
"vocab_file",
":",
"logger",
".",
"info",
"(",
"\"loading vocabulary file {}\"",
".",
"format",
"(",
"vocab_file",
")",
")",
"else",
":",
"logger",
".",
"info",
"(",
"\"loading vocabulary file {} from cache at {}\"",
".",
"format",
"(",
"vocab_file",
",",
"resolved_vocab_file",
")",
")",
"if",
"pretrained_model_name_or_path",
"in",
"PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP",
":",
"# if we're using a pretrained model, ensure the tokenizer wont index sequences longer",
"# than the number of positional embeddings",
"max_len",
"=",
"PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP",
"[",
"pretrained_model_name_or_path",
"]",
"kwargs",
"[",
"'max_len'",
"]",
"=",
"min",
"(",
"kwargs",
".",
"get",
"(",
"'max_len'",
",",
"int",
"(",
"1e12",
")",
")",
",",
"max_len",
")",
"# Instantiate tokenizer.",
"tokenizer",
"=",
"cls",
"(",
"resolved_vocab_file",
",",
"*",
"inputs",
",",
"*",
"*",
"kwargs",
")",
"return",
"tokenizer"
] |
Instantiate a PreTrainedBertModel from a pre-trained model file.
Download and cache the pre-trained model file if needed.
|
[
"Instantiate",
"a",
"PreTrainedBertModel",
"from",
"a",
"pre",
"-",
"trained",
"model",
"file",
".",
"Download",
"and",
"cache",
"the",
"pre",
"-",
"trained",
"model",
"file",
"if",
"needed",
"."
] |
b832d5bb8a6dfc5965015b828e577677eace601e
|
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/pytorch_pretrained_bert/tokenization.py#L153-L198
|
19,379
|
huggingface/pytorch-pretrained-BERT
|
pytorch_pretrained_bert/tokenization.py
|
BasicTokenizer._run_strip_accents
|
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
|
python
|
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
|
[
"def",
"_run_strip_accents",
"(",
"self",
",",
"text",
")",
":",
"text",
"=",
"unicodedata",
".",
"normalize",
"(",
"\"NFD\"",
",",
"text",
")",
"output",
"=",
"[",
"]",
"for",
"char",
"in",
"text",
":",
"cat",
"=",
"unicodedata",
".",
"category",
"(",
"char",
")",
"if",
"cat",
"==",
"\"Mn\"",
":",
"continue",
"output",
".",
"append",
"(",
"char",
")",
"return",
"\"\"",
".",
"join",
"(",
"output",
")"
] |
Strips accents from a piece of text.
|
[
"Strips",
"accents",
"from",
"a",
"piece",
"of",
"text",
"."
] |
b832d5bb8a6dfc5965015b828e577677eace601e
|
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/pytorch_pretrained_bert/tokenization.py#L236-L245
|
19,380
|
huggingface/pytorch-pretrained-BERT
|
pytorch_pretrained_bert/tokenization.py
|
BasicTokenizer._tokenize_chinese_chars
|
def _tokenize_chinese_chars(self, text):
"""Adds whitespace around any CJK character."""
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
|
python
|
def _tokenize_chinese_chars(self, text):
"""Adds whitespace around any CJK character."""
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
|
[
"def",
"_tokenize_chinese_chars",
"(",
"self",
",",
"text",
")",
":",
"output",
"=",
"[",
"]",
"for",
"char",
"in",
"text",
":",
"cp",
"=",
"ord",
"(",
"char",
")",
"if",
"self",
".",
"_is_chinese_char",
"(",
"cp",
")",
":",
"output",
".",
"append",
"(",
"\" \"",
")",
"output",
".",
"append",
"(",
"char",
")",
"output",
".",
"append",
"(",
"\" \"",
")",
"else",
":",
"output",
".",
"append",
"(",
"char",
")",
"return",
"\"\"",
".",
"join",
"(",
"output",
")"
] |
Adds whitespace around any CJK character.
|
[
"Adds",
"whitespace",
"around",
"any",
"CJK",
"character",
"."
] |
b832d5bb8a6dfc5965015b828e577677eace601e
|
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/pytorch_pretrained_bert/tokenization.py#L269-L280
|
19,381
|
huggingface/pytorch-pretrained-BERT
|
pytorch_pretrained_bert/tokenization.py
|
BasicTokenizer._is_chinese_char
|
def _is_chinese_char(self, cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
(cp >= 0x3400 and cp <= 0x4DBF) or #
(cp >= 0x20000 and cp <= 0x2A6DF) or #
(cp >= 0x2A700 and cp <= 0x2B73F) or #
(cp >= 0x2B740 and cp <= 0x2B81F) or #
(cp >= 0x2B820 and cp <= 0x2CEAF) or
(cp >= 0xF900 and cp <= 0xFAFF) or #
(cp >= 0x2F800 and cp <= 0x2FA1F)): #
return True
return False
|
python
|
def _is_chinese_char(self, cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
(cp >= 0x3400 and cp <= 0x4DBF) or #
(cp >= 0x20000 and cp <= 0x2A6DF) or #
(cp >= 0x2A700 and cp <= 0x2B73F) or #
(cp >= 0x2B740 and cp <= 0x2B81F) or #
(cp >= 0x2B820 and cp <= 0x2CEAF) or
(cp >= 0xF900 and cp <= 0xFAFF) or #
(cp >= 0x2F800 and cp <= 0x2FA1F)): #
return True
return False
|
[
"def",
"_is_chinese_char",
"(",
"self",
",",
"cp",
")",
":",
"# This defines a \"chinese character\" as anything in the CJK Unicode block:",
"# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)",
"#",
"# Note that the CJK Unicode block is NOT all Japanese and Korean characters,",
"# despite its name. The modern Korean Hangul alphabet is a different block,",
"# as is Japanese Hiragana and Katakana. Those alphabets are used to write",
"# space-separated words, so they are not treated specially and handled",
"# like the all of the other languages.",
"if",
"(",
"(",
"cp",
">=",
"0x4E00",
"and",
"cp",
"<=",
"0x9FFF",
")",
"or",
"#",
"(",
"cp",
">=",
"0x3400",
"and",
"cp",
"<=",
"0x4DBF",
")",
"or",
"#",
"(",
"cp",
">=",
"0x20000",
"and",
"cp",
"<=",
"0x2A6DF",
")",
"or",
"#",
"(",
"cp",
">=",
"0x2A700",
"and",
"cp",
"<=",
"0x2B73F",
")",
"or",
"#",
"(",
"cp",
">=",
"0x2B740",
"and",
"cp",
"<=",
"0x2B81F",
")",
"or",
"#",
"(",
"cp",
">=",
"0x2B820",
"and",
"cp",
"<=",
"0x2CEAF",
")",
"or",
"(",
"cp",
">=",
"0xF900",
"and",
"cp",
"<=",
"0xFAFF",
")",
"or",
"#",
"(",
"cp",
">=",
"0x2F800",
"and",
"cp",
"<=",
"0x2FA1F",
")",
")",
":",
"#",
"return",
"True",
"return",
"False"
] |
Checks whether CP is the codepoint of a CJK character.
|
[
"Checks",
"whether",
"CP",
"is",
"the",
"codepoint",
"of",
"a",
"CJK",
"character",
"."
] |
b832d5bb8a6dfc5965015b828e577677eace601e
|
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/pytorch_pretrained_bert/tokenization.py#L282-L302
|
19,382
|
huggingface/pytorch-pretrained-BERT
|
examples/lm_finetuning/simple_lm_finetuning.py
|
BERTDataset.get_next_line
|
def get_next_line(self):
""" Gets next line of random_file and starts over when reaching end of file"""
try:
line = next(self.random_file).strip()
#keep track of which document we are currently looking at to later avoid having the same doc as t1
if line == "":
self.current_random_doc = self.current_random_doc + 1
line = next(self.random_file).strip()
except StopIteration:
self.random_file.close()
self.random_file = open(self.corpus_path, "r", encoding=self.encoding)
line = next(self.random_file).strip()
return line
|
python
|
def get_next_line(self):
""" Gets next line of random_file and starts over when reaching end of file"""
try:
line = next(self.random_file).strip()
#keep track of which document we are currently looking at to later avoid having the same doc as t1
if line == "":
self.current_random_doc = self.current_random_doc + 1
line = next(self.random_file).strip()
except StopIteration:
self.random_file.close()
self.random_file = open(self.corpus_path, "r", encoding=self.encoding)
line = next(self.random_file).strip()
return line
|
[
"def",
"get_next_line",
"(",
"self",
")",
":",
"try",
":",
"line",
"=",
"next",
"(",
"self",
".",
"random_file",
")",
".",
"strip",
"(",
")",
"#keep track of which document we are currently looking at to later avoid having the same doc as t1",
"if",
"line",
"==",
"\"\"",
":",
"self",
".",
"current_random_doc",
"=",
"self",
".",
"current_random_doc",
"+",
"1",
"line",
"=",
"next",
"(",
"self",
".",
"random_file",
")",
".",
"strip",
"(",
")",
"except",
"StopIteration",
":",
"self",
".",
"random_file",
".",
"close",
"(",
")",
"self",
".",
"random_file",
"=",
"open",
"(",
"self",
".",
"corpus_path",
",",
"\"r\"",
",",
"encoding",
"=",
"self",
".",
"encoding",
")",
"line",
"=",
"next",
"(",
"self",
".",
"random_file",
")",
".",
"strip",
"(",
")",
"return",
"line"
] |
Gets next line of random_file and starts over when reaching end of file
|
[
"Gets",
"next",
"line",
"of",
"random_file",
"and",
"starts",
"over",
"when",
"reaching",
"end",
"of",
"file"
] |
b832d5bb8a6dfc5965015b828e577677eace601e
|
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/examples/lm_finetuning/simple_lm_finetuning.py#L219-L231
|
19,383
|
huggingface/pytorch-pretrained-BERT
|
examples/lm_finetuning/pregenerate_training_data.py
|
create_masked_lm_predictions
|
def create_masked_lm_predictions(tokens, masked_lm_prob, max_predictions_per_seq, vocab_list):
"""Creates the predictions for the masked LM objective. This is mostly copied from the Google BERT repo, but
with several refactors to clean it up and remove a lot of unnecessary variables."""
cand_indices = []
for (i, token) in enumerate(tokens):
if token == "[CLS]" or token == "[SEP]":
continue
cand_indices.append(i)
num_to_mask = min(max_predictions_per_seq,
max(1, int(round(len(tokens) * masked_lm_prob))))
shuffle(cand_indices)
mask_indices = sorted(sample(cand_indices, num_to_mask))
masked_token_labels = []
for index in mask_indices:
# 80% of the time, replace with [MASK]
if random() < 0.8:
masked_token = "[MASK]"
else:
# 10% of the time, keep original
if random() < 0.5:
masked_token = tokens[index]
# 10% of the time, replace with random word
else:
masked_token = choice(vocab_list)
masked_token_labels.append(tokens[index])
# Once we've saved the true label for that token, we can overwrite it with the masked version
tokens[index] = masked_token
return tokens, mask_indices, masked_token_labels
|
python
|
def create_masked_lm_predictions(tokens, masked_lm_prob, max_predictions_per_seq, vocab_list):
"""Creates the predictions for the masked LM objective. This is mostly copied from the Google BERT repo, but
with several refactors to clean it up and remove a lot of unnecessary variables."""
cand_indices = []
for (i, token) in enumerate(tokens):
if token == "[CLS]" or token == "[SEP]":
continue
cand_indices.append(i)
num_to_mask = min(max_predictions_per_seq,
max(1, int(round(len(tokens) * masked_lm_prob))))
shuffle(cand_indices)
mask_indices = sorted(sample(cand_indices, num_to_mask))
masked_token_labels = []
for index in mask_indices:
# 80% of the time, replace with [MASK]
if random() < 0.8:
masked_token = "[MASK]"
else:
# 10% of the time, keep original
if random() < 0.5:
masked_token = tokens[index]
# 10% of the time, replace with random word
else:
masked_token = choice(vocab_list)
masked_token_labels.append(tokens[index])
# Once we've saved the true label for that token, we can overwrite it with the masked version
tokens[index] = masked_token
return tokens, mask_indices, masked_token_labels
|
[
"def",
"create_masked_lm_predictions",
"(",
"tokens",
",",
"masked_lm_prob",
",",
"max_predictions_per_seq",
",",
"vocab_list",
")",
":",
"cand_indices",
"=",
"[",
"]",
"for",
"(",
"i",
",",
"token",
")",
"in",
"enumerate",
"(",
"tokens",
")",
":",
"if",
"token",
"==",
"\"[CLS]\"",
"or",
"token",
"==",
"\"[SEP]\"",
":",
"continue",
"cand_indices",
".",
"append",
"(",
"i",
")",
"num_to_mask",
"=",
"min",
"(",
"max_predictions_per_seq",
",",
"max",
"(",
"1",
",",
"int",
"(",
"round",
"(",
"len",
"(",
"tokens",
")",
"*",
"masked_lm_prob",
")",
")",
")",
")",
"shuffle",
"(",
"cand_indices",
")",
"mask_indices",
"=",
"sorted",
"(",
"sample",
"(",
"cand_indices",
",",
"num_to_mask",
")",
")",
"masked_token_labels",
"=",
"[",
"]",
"for",
"index",
"in",
"mask_indices",
":",
"# 80% of the time, replace with [MASK]",
"if",
"random",
"(",
")",
"<",
"0.8",
":",
"masked_token",
"=",
"\"[MASK]\"",
"else",
":",
"# 10% of the time, keep original",
"if",
"random",
"(",
")",
"<",
"0.5",
":",
"masked_token",
"=",
"tokens",
"[",
"index",
"]",
"# 10% of the time, replace with random word",
"else",
":",
"masked_token",
"=",
"choice",
"(",
"vocab_list",
")",
"masked_token_labels",
".",
"append",
"(",
"tokens",
"[",
"index",
"]",
")",
"# Once we've saved the true label for that token, we can overwrite it with the masked version",
"tokens",
"[",
"index",
"]",
"=",
"masked_token",
"return",
"tokens",
",",
"mask_indices",
",",
"masked_token_labels"
] |
Creates the predictions for the masked LM objective. This is mostly copied from the Google BERT repo, but
with several refactors to clean it up and remove a lot of unnecessary variables.
|
[
"Creates",
"the",
"predictions",
"for",
"the",
"masked",
"LM",
"objective",
".",
"This",
"is",
"mostly",
"copied",
"from",
"the",
"Google",
"BERT",
"repo",
"but",
"with",
"several",
"refactors",
"to",
"clean",
"it",
"up",
"and",
"remove",
"a",
"lot",
"of",
"unnecessary",
"variables",
"."
] |
b832d5bb8a6dfc5965015b828e577677eace601e
|
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/examples/lm_finetuning/pregenerate_training_data.py#L102-L131
|
19,384
|
huggingface/pytorch-pretrained-BERT
|
pytorch_pretrained_bert/modeling_transfo_xl.py
|
build_tf_to_pytorch_map
|
def build_tf_to_pytorch_map(model, config):
""" A map of modules from TF to PyTorch.
This time I use a map to keep the PyTorch model as identical to the original PyTorch model as possible.
"""
tf_to_pt_map = {}
if hasattr(model, 'transformer'):
# We are loading in a TransfoXLLMHeadModel => we will load also the Adaptive Softmax
tf_to_pt_map.update({
"transformer/adaptive_softmax/cutoff_0/cluster_W": model.crit.cluster_weight,
"transformer/adaptive_softmax/cutoff_0/cluster_b": model.crit.cluster_bias})
for i, (out_l, proj_l, tie_proj) in enumerate(zip(
model.crit.out_layers,
model.crit.out_projs,
config.tie_projs)):
layer_str = "transformer/adaptive_softmax/cutoff_%d/" % i
if config.tie_weight:
tf_to_pt_map.update({
layer_str + 'b': out_l.bias})
else:
raise NotImplementedError
# I don't think this is implemented in the TF code
tf_to_pt_map.update({
layer_str + 'lookup_table': out_l.weight,
layer_str + 'b': out_l.bias})
if not tie_proj:
tf_to_pt_map.update({
layer_str + 'proj': proj_l
})
# Now load the rest of the transformer
model = model.transformer
# Embeddings
for i, (embed_l, proj_l) in enumerate(zip(model.word_emb.emb_layers, model.word_emb.emb_projs)):
layer_str = "transformer/adaptive_embed/cutoff_%d/" % i
tf_to_pt_map.update({
layer_str + 'lookup_table': embed_l.weight,
layer_str + 'proj_W': proj_l
})
# Transformer blocks
for i, b in enumerate(model.layers):
layer_str = "transformer/layer_%d/" % i
tf_to_pt_map.update({
layer_str + "rel_attn/LayerNorm/gamma": b.dec_attn.layer_norm.weight,
layer_str + "rel_attn/LayerNorm/beta": b.dec_attn.layer_norm.bias,
layer_str + "rel_attn/o/kernel": b.dec_attn.o_net.weight,
layer_str + "rel_attn/qkv/kernel": b.dec_attn.qkv_net.weight,
layer_str + "rel_attn/r/kernel": b.dec_attn.r_net.weight,
layer_str + "ff/LayerNorm/gamma": b.pos_ff.layer_norm.weight,
layer_str + "ff/LayerNorm/beta": b.pos_ff.layer_norm.bias,
layer_str + "ff/layer_1/kernel": b.pos_ff.CoreNet[0].weight,
layer_str + "ff/layer_1/bias": b.pos_ff.CoreNet[0].bias,
layer_str + "ff/layer_2/kernel": b.pos_ff.CoreNet[3].weight,
layer_str + "ff/layer_2/bias": b.pos_ff.CoreNet[3].bias,
})
# Relative positioning biases
if config.untie_r:
r_r_list = []
r_w_list = []
for b in model.layers:
r_r_list.append(b.dec_attn.r_r_bias)
r_w_list.append(b.dec_attn.r_w_bias)
else:
r_r_list = [model.r_r_bias]
r_w_list = [model.r_w_bias]
tf_to_pt_map.update({
'transformer/r_r_bias': r_r_list,
'transformer/r_w_bias': r_w_list})
return tf_to_pt_map
|
python
|
def build_tf_to_pytorch_map(model, config):
""" A map of modules from TF to PyTorch.
This time I use a map to keep the PyTorch model as identical to the original PyTorch model as possible.
"""
tf_to_pt_map = {}
if hasattr(model, 'transformer'):
# We are loading in a TransfoXLLMHeadModel => we will load also the Adaptive Softmax
tf_to_pt_map.update({
"transformer/adaptive_softmax/cutoff_0/cluster_W": model.crit.cluster_weight,
"transformer/adaptive_softmax/cutoff_0/cluster_b": model.crit.cluster_bias})
for i, (out_l, proj_l, tie_proj) in enumerate(zip(
model.crit.out_layers,
model.crit.out_projs,
config.tie_projs)):
layer_str = "transformer/adaptive_softmax/cutoff_%d/" % i
if config.tie_weight:
tf_to_pt_map.update({
layer_str + 'b': out_l.bias})
else:
raise NotImplementedError
# I don't think this is implemented in the TF code
tf_to_pt_map.update({
layer_str + 'lookup_table': out_l.weight,
layer_str + 'b': out_l.bias})
if not tie_proj:
tf_to_pt_map.update({
layer_str + 'proj': proj_l
})
# Now load the rest of the transformer
model = model.transformer
# Embeddings
for i, (embed_l, proj_l) in enumerate(zip(model.word_emb.emb_layers, model.word_emb.emb_projs)):
layer_str = "transformer/adaptive_embed/cutoff_%d/" % i
tf_to_pt_map.update({
layer_str + 'lookup_table': embed_l.weight,
layer_str + 'proj_W': proj_l
})
# Transformer blocks
for i, b in enumerate(model.layers):
layer_str = "transformer/layer_%d/" % i
tf_to_pt_map.update({
layer_str + "rel_attn/LayerNorm/gamma": b.dec_attn.layer_norm.weight,
layer_str + "rel_attn/LayerNorm/beta": b.dec_attn.layer_norm.bias,
layer_str + "rel_attn/o/kernel": b.dec_attn.o_net.weight,
layer_str + "rel_attn/qkv/kernel": b.dec_attn.qkv_net.weight,
layer_str + "rel_attn/r/kernel": b.dec_attn.r_net.weight,
layer_str + "ff/LayerNorm/gamma": b.pos_ff.layer_norm.weight,
layer_str + "ff/LayerNorm/beta": b.pos_ff.layer_norm.bias,
layer_str + "ff/layer_1/kernel": b.pos_ff.CoreNet[0].weight,
layer_str + "ff/layer_1/bias": b.pos_ff.CoreNet[0].bias,
layer_str + "ff/layer_2/kernel": b.pos_ff.CoreNet[3].weight,
layer_str + "ff/layer_2/bias": b.pos_ff.CoreNet[3].bias,
})
# Relative positioning biases
if config.untie_r:
r_r_list = []
r_w_list = []
for b in model.layers:
r_r_list.append(b.dec_attn.r_r_bias)
r_w_list.append(b.dec_attn.r_w_bias)
else:
r_r_list = [model.r_r_bias]
r_w_list = [model.r_w_bias]
tf_to_pt_map.update({
'transformer/r_r_bias': r_r_list,
'transformer/r_w_bias': r_w_list})
return tf_to_pt_map
|
[
"def",
"build_tf_to_pytorch_map",
"(",
"model",
",",
"config",
")",
":",
"tf_to_pt_map",
"=",
"{",
"}",
"if",
"hasattr",
"(",
"model",
",",
"'transformer'",
")",
":",
"# We are loading in a TransfoXLLMHeadModel => we will load also the Adaptive Softmax",
"tf_to_pt_map",
".",
"update",
"(",
"{",
"\"transformer/adaptive_softmax/cutoff_0/cluster_W\"",
":",
"model",
".",
"crit",
".",
"cluster_weight",
",",
"\"transformer/adaptive_softmax/cutoff_0/cluster_b\"",
":",
"model",
".",
"crit",
".",
"cluster_bias",
"}",
")",
"for",
"i",
",",
"(",
"out_l",
",",
"proj_l",
",",
"tie_proj",
")",
"in",
"enumerate",
"(",
"zip",
"(",
"model",
".",
"crit",
".",
"out_layers",
",",
"model",
".",
"crit",
".",
"out_projs",
",",
"config",
".",
"tie_projs",
")",
")",
":",
"layer_str",
"=",
"\"transformer/adaptive_softmax/cutoff_%d/\"",
"%",
"i",
"if",
"config",
".",
"tie_weight",
":",
"tf_to_pt_map",
".",
"update",
"(",
"{",
"layer_str",
"+",
"'b'",
":",
"out_l",
".",
"bias",
"}",
")",
"else",
":",
"raise",
"NotImplementedError",
"# I don't think this is implemented in the TF code",
"tf_to_pt_map",
".",
"update",
"(",
"{",
"layer_str",
"+",
"'lookup_table'",
":",
"out_l",
".",
"weight",
",",
"layer_str",
"+",
"'b'",
":",
"out_l",
".",
"bias",
"}",
")",
"if",
"not",
"tie_proj",
":",
"tf_to_pt_map",
".",
"update",
"(",
"{",
"layer_str",
"+",
"'proj'",
":",
"proj_l",
"}",
")",
"# Now load the rest of the transformer",
"model",
"=",
"model",
".",
"transformer",
"# Embeddings",
"for",
"i",
",",
"(",
"embed_l",
",",
"proj_l",
")",
"in",
"enumerate",
"(",
"zip",
"(",
"model",
".",
"word_emb",
".",
"emb_layers",
",",
"model",
".",
"word_emb",
".",
"emb_projs",
")",
")",
":",
"layer_str",
"=",
"\"transformer/adaptive_embed/cutoff_%d/\"",
"%",
"i",
"tf_to_pt_map",
".",
"update",
"(",
"{",
"layer_str",
"+",
"'lookup_table'",
":",
"embed_l",
".",
"weight",
",",
"layer_str",
"+",
"'proj_W'",
":",
"proj_l",
"}",
")",
"# Transformer blocks",
"for",
"i",
",",
"b",
"in",
"enumerate",
"(",
"model",
".",
"layers",
")",
":",
"layer_str",
"=",
"\"transformer/layer_%d/\"",
"%",
"i",
"tf_to_pt_map",
".",
"update",
"(",
"{",
"layer_str",
"+",
"\"rel_attn/LayerNorm/gamma\"",
":",
"b",
".",
"dec_attn",
".",
"layer_norm",
".",
"weight",
",",
"layer_str",
"+",
"\"rel_attn/LayerNorm/beta\"",
":",
"b",
".",
"dec_attn",
".",
"layer_norm",
".",
"bias",
",",
"layer_str",
"+",
"\"rel_attn/o/kernel\"",
":",
"b",
".",
"dec_attn",
".",
"o_net",
".",
"weight",
",",
"layer_str",
"+",
"\"rel_attn/qkv/kernel\"",
":",
"b",
".",
"dec_attn",
".",
"qkv_net",
".",
"weight",
",",
"layer_str",
"+",
"\"rel_attn/r/kernel\"",
":",
"b",
".",
"dec_attn",
".",
"r_net",
".",
"weight",
",",
"layer_str",
"+",
"\"ff/LayerNorm/gamma\"",
":",
"b",
".",
"pos_ff",
".",
"layer_norm",
".",
"weight",
",",
"layer_str",
"+",
"\"ff/LayerNorm/beta\"",
":",
"b",
".",
"pos_ff",
".",
"layer_norm",
".",
"bias",
",",
"layer_str",
"+",
"\"ff/layer_1/kernel\"",
":",
"b",
".",
"pos_ff",
".",
"CoreNet",
"[",
"0",
"]",
".",
"weight",
",",
"layer_str",
"+",
"\"ff/layer_1/bias\"",
":",
"b",
".",
"pos_ff",
".",
"CoreNet",
"[",
"0",
"]",
".",
"bias",
",",
"layer_str",
"+",
"\"ff/layer_2/kernel\"",
":",
"b",
".",
"pos_ff",
".",
"CoreNet",
"[",
"3",
"]",
".",
"weight",
",",
"layer_str",
"+",
"\"ff/layer_2/bias\"",
":",
"b",
".",
"pos_ff",
".",
"CoreNet",
"[",
"3",
"]",
".",
"bias",
",",
"}",
")",
"# Relative positioning biases",
"if",
"config",
".",
"untie_r",
":",
"r_r_list",
"=",
"[",
"]",
"r_w_list",
"=",
"[",
"]",
"for",
"b",
"in",
"model",
".",
"layers",
":",
"r_r_list",
".",
"append",
"(",
"b",
".",
"dec_attn",
".",
"r_r_bias",
")",
"r_w_list",
".",
"append",
"(",
"b",
".",
"dec_attn",
".",
"r_w_bias",
")",
"else",
":",
"r_r_list",
"=",
"[",
"model",
".",
"r_r_bias",
"]",
"r_w_list",
"=",
"[",
"model",
".",
"r_w_bias",
"]",
"tf_to_pt_map",
".",
"update",
"(",
"{",
"'transformer/r_r_bias'",
":",
"r_r_list",
",",
"'transformer/r_w_bias'",
":",
"r_w_list",
"}",
")",
"return",
"tf_to_pt_map"
] |
A map of modules from TF to PyTorch.
This time I use a map to keep the PyTorch model as identical to the original PyTorch model as possible.
|
[
"A",
"map",
"of",
"modules",
"from",
"TF",
"to",
"PyTorch",
".",
"This",
"time",
"I",
"use",
"a",
"map",
"to",
"keep",
"the",
"PyTorch",
"model",
"as",
"identical",
"to",
"the",
"original",
"PyTorch",
"model",
"as",
"possible",
"."
] |
b832d5bb8a6dfc5965015b828e577677eace601e
|
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/pytorch_pretrained_bert/modeling_transfo_xl.py#L56-L126
|
19,385
|
pandas-dev/pandas
|
pandas/tseries/frequencies.py
|
to_offset
|
def to_offset(freq):
"""
Return DateOffset object from string or tuple representation
or datetime.timedelta object
Parameters
----------
freq : str, tuple, datetime.timedelta, DateOffset or None
Returns
-------
DateOffset
None if freq is None.
Raises
------
ValueError
If freq is an invalid frequency
See Also
--------
DateOffset
Examples
--------
>>> to_offset('5min')
<5 * Minutes>
>>> to_offset('1D1H')
<25 * Hours>
>>> to_offset(('W', 2))
<2 * Weeks: weekday=6>
>>> to_offset((2, 'B'))
<2 * BusinessDays>
>>> to_offset(datetime.timedelta(days=1))
<Day>
>>> to_offset(Hour())
<Hour>
"""
if freq is None:
return None
if isinstance(freq, DateOffset):
return freq
if isinstance(freq, tuple):
name = freq[0]
stride = freq[1]
if isinstance(stride, str):
name, stride = stride, name
name, _ = libfreqs._base_and_stride(name)
delta = get_offset(name) * stride
elif isinstance(freq, timedelta):
delta = None
freq = Timedelta(freq)
try:
for name in freq.components._fields:
offset = _name_to_offset_map[name]
stride = getattr(freq.components, name)
if stride != 0:
offset = stride * offset
if delta is None:
delta = offset
else:
delta = delta + offset
except Exception:
raise ValueError(libfreqs.INVALID_FREQ_ERR_MSG.format(freq))
else:
delta = None
stride_sign = None
try:
splitted = re.split(libfreqs.opattern, freq)
if splitted[-1] != '' and not splitted[-1].isspace():
# the last element must be blank
raise ValueError('last element must be blank')
for sep, stride, name in zip(splitted[0::4], splitted[1::4],
splitted[2::4]):
if sep != '' and not sep.isspace():
raise ValueError('separator must be spaces')
prefix = libfreqs._lite_rule_alias.get(name) or name
if stride_sign is None:
stride_sign = -1 if stride.startswith('-') else 1
if not stride:
stride = 1
if prefix in Resolution._reso_str_bump_map.keys():
stride, name = Resolution.get_stride_from_decimal(
float(stride), prefix
)
stride = int(stride)
offset = get_offset(name)
offset = offset * int(np.fabs(stride) * stride_sign)
if delta is None:
delta = offset
else:
delta = delta + offset
except Exception:
raise ValueError(libfreqs.INVALID_FREQ_ERR_MSG.format(freq))
if delta is None:
raise ValueError(libfreqs.INVALID_FREQ_ERR_MSG.format(freq))
return delta
|
python
|
def to_offset(freq):
"""
Return DateOffset object from string or tuple representation
or datetime.timedelta object
Parameters
----------
freq : str, tuple, datetime.timedelta, DateOffset or None
Returns
-------
DateOffset
None if freq is None.
Raises
------
ValueError
If freq is an invalid frequency
See Also
--------
DateOffset
Examples
--------
>>> to_offset('5min')
<5 * Minutes>
>>> to_offset('1D1H')
<25 * Hours>
>>> to_offset(('W', 2))
<2 * Weeks: weekday=6>
>>> to_offset((2, 'B'))
<2 * BusinessDays>
>>> to_offset(datetime.timedelta(days=1))
<Day>
>>> to_offset(Hour())
<Hour>
"""
if freq is None:
return None
if isinstance(freq, DateOffset):
return freq
if isinstance(freq, tuple):
name = freq[0]
stride = freq[1]
if isinstance(stride, str):
name, stride = stride, name
name, _ = libfreqs._base_and_stride(name)
delta = get_offset(name) * stride
elif isinstance(freq, timedelta):
delta = None
freq = Timedelta(freq)
try:
for name in freq.components._fields:
offset = _name_to_offset_map[name]
stride = getattr(freq.components, name)
if stride != 0:
offset = stride * offset
if delta is None:
delta = offset
else:
delta = delta + offset
except Exception:
raise ValueError(libfreqs.INVALID_FREQ_ERR_MSG.format(freq))
else:
delta = None
stride_sign = None
try:
splitted = re.split(libfreqs.opattern, freq)
if splitted[-1] != '' and not splitted[-1].isspace():
# the last element must be blank
raise ValueError('last element must be blank')
for sep, stride, name in zip(splitted[0::4], splitted[1::4],
splitted[2::4]):
if sep != '' and not sep.isspace():
raise ValueError('separator must be spaces')
prefix = libfreqs._lite_rule_alias.get(name) or name
if stride_sign is None:
stride_sign = -1 if stride.startswith('-') else 1
if not stride:
stride = 1
if prefix in Resolution._reso_str_bump_map.keys():
stride, name = Resolution.get_stride_from_decimal(
float(stride), prefix
)
stride = int(stride)
offset = get_offset(name)
offset = offset * int(np.fabs(stride) * stride_sign)
if delta is None:
delta = offset
else:
delta = delta + offset
except Exception:
raise ValueError(libfreqs.INVALID_FREQ_ERR_MSG.format(freq))
if delta is None:
raise ValueError(libfreqs.INVALID_FREQ_ERR_MSG.format(freq))
return delta
|
[
"def",
"to_offset",
"(",
"freq",
")",
":",
"if",
"freq",
"is",
"None",
":",
"return",
"None",
"if",
"isinstance",
"(",
"freq",
",",
"DateOffset",
")",
":",
"return",
"freq",
"if",
"isinstance",
"(",
"freq",
",",
"tuple",
")",
":",
"name",
"=",
"freq",
"[",
"0",
"]",
"stride",
"=",
"freq",
"[",
"1",
"]",
"if",
"isinstance",
"(",
"stride",
",",
"str",
")",
":",
"name",
",",
"stride",
"=",
"stride",
",",
"name",
"name",
",",
"_",
"=",
"libfreqs",
".",
"_base_and_stride",
"(",
"name",
")",
"delta",
"=",
"get_offset",
"(",
"name",
")",
"*",
"stride",
"elif",
"isinstance",
"(",
"freq",
",",
"timedelta",
")",
":",
"delta",
"=",
"None",
"freq",
"=",
"Timedelta",
"(",
"freq",
")",
"try",
":",
"for",
"name",
"in",
"freq",
".",
"components",
".",
"_fields",
":",
"offset",
"=",
"_name_to_offset_map",
"[",
"name",
"]",
"stride",
"=",
"getattr",
"(",
"freq",
".",
"components",
",",
"name",
")",
"if",
"stride",
"!=",
"0",
":",
"offset",
"=",
"stride",
"*",
"offset",
"if",
"delta",
"is",
"None",
":",
"delta",
"=",
"offset",
"else",
":",
"delta",
"=",
"delta",
"+",
"offset",
"except",
"Exception",
":",
"raise",
"ValueError",
"(",
"libfreqs",
".",
"INVALID_FREQ_ERR_MSG",
".",
"format",
"(",
"freq",
")",
")",
"else",
":",
"delta",
"=",
"None",
"stride_sign",
"=",
"None",
"try",
":",
"splitted",
"=",
"re",
".",
"split",
"(",
"libfreqs",
".",
"opattern",
",",
"freq",
")",
"if",
"splitted",
"[",
"-",
"1",
"]",
"!=",
"''",
"and",
"not",
"splitted",
"[",
"-",
"1",
"]",
".",
"isspace",
"(",
")",
":",
"# the last element must be blank",
"raise",
"ValueError",
"(",
"'last element must be blank'",
")",
"for",
"sep",
",",
"stride",
",",
"name",
"in",
"zip",
"(",
"splitted",
"[",
"0",
":",
":",
"4",
"]",
",",
"splitted",
"[",
"1",
":",
":",
"4",
"]",
",",
"splitted",
"[",
"2",
":",
":",
"4",
"]",
")",
":",
"if",
"sep",
"!=",
"''",
"and",
"not",
"sep",
".",
"isspace",
"(",
")",
":",
"raise",
"ValueError",
"(",
"'separator must be spaces'",
")",
"prefix",
"=",
"libfreqs",
".",
"_lite_rule_alias",
".",
"get",
"(",
"name",
")",
"or",
"name",
"if",
"stride_sign",
"is",
"None",
":",
"stride_sign",
"=",
"-",
"1",
"if",
"stride",
".",
"startswith",
"(",
"'-'",
")",
"else",
"1",
"if",
"not",
"stride",
":",
"stride",
"=",
"1",
"if",
"prefix",
"in",
"Resolution",
".",
"_reso_str_bump_map",
".",
"keys",
"(",
")",
":",
"stride",
",",
"name",
"=",
"Resolution",
".",
"get_stride_from_decimal",
"(",
"float",
"(",
"stride",
")",
",",
"prefix",
")",
"stride",
"=",
"int",
"(",
"stride",
")",
"offset",
"=",
"get_offset",
"(",
"name",
")",
"offset",
"=",
"offset",
"*",
"int",
"(",
"np",
".",
"fabs",
"(",
"stride",
")",
"*",
"stride_sign",
")",
"if",
"delta",
"is",
"None",
":",
"delta",
"=",
"offset",
"else",
":",
"delta",
"=",
"delta",
"+",
"offset",
"except",
"Exception",
":",
"raise",
"ValueError",
"(",
"libfreqs",
".",
"INVALID_FREQ_ERR_MSG",
".",
"format",
"(",
"freq",
")",
")",
"if",
"delta",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"libfreqs",
".",
"INVALID_FREQ_ERR_MSG",
".",
"format",
"(",
"freq",
")",
")",
"return",
"delta"
] |
Return DateOffset object from string or tuple representation
or datetime.timedelta object
Parameters
----------
freq : str, tuple, datetime.timedelta, DateOffset or None
Returns
-------
DateOffset
None if freq is None.
Raises
------
ValueError
If freq is an invalid frequency
See Also
--------
DateOffset
Examples
--------
>>> to_offset('5min')
<5 * Minutes>
>>> to_offset('1D1H')
<25 * Hours>
>>> to_offset(('W', 2))
<2 * Weeks: weekday=6>
>>> to_offset((2, 'B'))
<2 * BusinessDays>
>>> to_offset(datetime.timedelta(days=1))
<Day>
>>> to_offset(Hour())
<Hour>
|
[
"Return",
"DateOffset",
"object",
"from",
"string",
"or",
"tuple",
"representation",
"or",
"datetime",
".",
"timedelta",
"object"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/tseries/frequencies.py#L57-L164
|
19,386
|
pandas-dev/pandas
|
pandas/tseries/frequencies.py
|
get_offset
|
def get_offset(name):
"""
Return DateOffset object associated with rule name
Examples
--------
get_offset('EOM') --> BMonthEnd(1)
"""
if name not in libfreqs._dont_uppercase:
name = name.upper()
name = libfreqs._lite_rule_alias.get(name, name)
name = libfreqs._lite_rule_alias.get(name.lower(), name)
else:
name = libfreqs._lite_rule_alias.get(name, name)
if name not in _offset_map:
try:
split = name.split('-')
klass = prefix_mapping[split[0]]
# handles case where there's no suffix (and will TypeError if too
# many '-')
offset = klass._from_name(*split[1:])
except (ValueError, TypeError, KeyError):
# bad prefix or suffix
raise ValueError(libfreqs.INVALID_FREQ_ERR_MSG.format(name))
# cache
_offset_map[name] = offset
return _offset_map[name]
|
python
|
def get_offset(name):
"""
Return DateOffset object associated with rule name
Examples
--------
get_offset('EOM') --> BMonthEnd(1)
"""
if name not in libfreqs._dont_uppercase:
name = name.upper()
name = libfreqs._lite_rule_alias.get(name, name)
name = libfreqs._lite_rule_alias.get(name.lower(), name)
else:
name = libfreqs._lite_rule_alias.get(name, name)
if name not in _offset_map:
try:
split = name.split('-')
klass = prefix_mapping[split[0]]
# handles case where there's no suffix (and will TypeError if too
# many '-')
offset = klass._from_name(*split[1:])
except (ValueError, TypeError, KeyError):
# bad prefix or suffix
raise ValueError(libfreqs.INVALID_FREQ_ERR_MSG.format(name))
# cache
_offset_map[name] = offset
return _offset_map[name]
|
[
"def",
"get_offset",
"(",
"name",
")",
":",
"if",
"name",
"not",
"in",
"libfreqs",
".",
"_dont_uppercase",
":",
"name",
"=",
"name",
".",
"upper",
"(",
")",
"name",
"=",
"libfreqs",
".",
"_lite_rule_alias",
".",
"get",
"(",
"name",
",",
"name",
")",
"name",
"=",
"libfreqs",
".",
"_lite_rule_alias",
".",
"get",
"(",
"name",
".",
"lower",
"(",
")",
",",
"name",
")",
"else",
":",
"name",
"=",
"libfreqs",
".",
"_lite_rule_alias",
".",
"get",
"(",
"name",
",",
"name",
")",
"if",
"name",
"not",
"in",
"_offset_map",
":",
"try",
":",
"split",
"=",
"name",
".",
"split",
"(",
"'-'",
")",
"klass",
"=",
"prefix_mapping",
"[",
"split",
"[",
"0",
"]",
"]",
"# handles case where there's no suffix (and will TypeError if too",
"# many '-')",
"offset",
"=",
"klass",
".",
"_from_name",
"(",
"*",
"split",
"[",
"1",
":",
"]",
")",
"except",
"(",
"ValueError",
",",
"TypeError",
",",
"KeyError",
")",
":",
"# bad prefix or suffix",
"raise",
"ValueError",
"(",
"libfreqs",
".",
"INVALID_FREQ_ERR_MSG",
".",
"format",
"(",
"name",
")",
")",
"# cache",
"_offset_map",
"[",
"name",
"]",
"=",
"offset",
"return",
"_offset_map",
"[",
"name",
"]"
] |
Return DateOffset object associated with rule name
Examples
--------
get_offset('EOM') --> BMonthEnd(1)
|
[
"Return",
"DateOffset",
"object",
"associated",
"with",
"rule",
"name"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/tseries/frequencies.py#L167-L195
|
19,387
|
pandas-dev/pandas
|
pandas/tseries/frequencies.py
|
infer_freq
|
def infer_freq(index, warn=True):
"""
Infer the most likely frequency given the input index. If the frequency is
uncertain, a warning will be printed.
Parameters
----------
index : DatetimeIndex or TimedeltaIndex
if passed a Series will use the values of the series (NOT THE INDEX)
warn : boolean, default True
Returns
-------
str or None
None if no discernible frequency
TypeError if the index is not datetime-like
ValueError if there are less than three values.
"""
import pandas as pd
if isinstance(index, ABCSeries):
values = index._values
if not (is_datetime64_dtype(values) or
is_timedelta64_dtype(values) or
values.dtype == object):
raise TypeError("cannot infer freq from a non-convertible dtype "
"on a Series of {dtype}".format(dtype=index.dtype))
index = values
if is_period_arraylike(index):
raise TypeError("PeriodIndex given. Check the `freq` attribute "
"instead of using infer_freq.")
elif is_timedelta64_dtype(index):
# Allow TimedeltaIndex and TimedeltaArray
inferer = _TimedeltaFrequencyInferer(index, warn=warn)
return inferer.get_freq()
if isinstance(index, pd.Index) and not isinstance(index, pd.DatetimeIndex):
if isinstance(index, (pd.Int64Index, pd.Float64Index)):
raise TypeError("cannot infer freq from a non-convertible index "
"type {type}".format(type=type(index)))
index = index.values
if not isinstance(index, pd.DatetimeIndex):
try:
index = pd.DatetimeIndex(index)
except AmbiguousTimeError:
index = pd.DatetimeIndex(index.asi8)
inferer = _FrequencyInferer(index, warn=warn)
return inferer.get_freq()
|
python
|
def infer_freq(index, warn=True):
"""
Infer the most likely frequency given the input index. If the frequency is
uncertain, a warning will be printed.
Parameters
----------
index : DatetimeIndex or TimedeltaIndex
if passed a Series will use the values of the series (NOT THE INDEX)
warn : boolean, default True
Returns
-------
str or None
None if no discernible frequency
TypeError if the index is not datetime-like
ValueError if there are less than three values.
"""
import pandas as pd
if isinstance(index, ABCSeries):
values = index._values
if not (is_datetime64_dtype(values) or
is_timedelta64_dtype(values) or
values.dtype == object):
raise TypeError("cannot infer freq from a non-convertible dtype "
"on a Series of {dtype}".format(dtype=index.dtype))
index = values
if is_period_arraylike(index):
raise TypeError("PeriodIndex given. Check the `freq` attribute "
"instead of using infer_freq.")
elif is_timedelta64_dtype(index):
# Allow TimedeltaIndex and TimedeltaArray
inferer = _TimedeltaFrequencyInferer(index, warn=warn)
return inferer.get_freq()
if isinstance(index, pd.Index) and not isinstance(index, pd.DatetimeIndex):
if isinstance(index, (pd.Int64Index, pd.Float64Index)):
raise TypeError("cannot infer freq from a non-convertible index "
"type {type}".format(type=type(index)))
index = index.values
if not isinstance(index, pd.DatetimeIndex):
try:
index = pd.DatetimeIndex(index)
except AmbiguousTimeError:
index = pd.DatetimeIndex(index.asi8)
inferer = _FrequencyInferer(index, warn=warn)
return inferer.get_freq()
|
[
"def",
"infer_freq",
"(",
"index",
",",
"warn",
"=",
"True",
")",
":",
"import",
"pandas",
"as",
"pd",
"if",
"isinstance",
"(",
"index",
",",
"ABCSeries",
")",
":",
"values",
"=",
"index",
".",
"_values",
"if",
"not",
"(",
"is_datetime64_dtype",
"(",
"values",
")",
"or",
"is_timedelta64_dtype",
"(",
"values",
")",
"or",
"values",
".",
"dtype",
"==",
"object",
")",
":",
"raise",
"TypeError",
"(",
"\"cannot infer freq from a non-convertible dtype \"",
"\"on a Series of {dtype}\"",
".",
"format",
"(",
"dtype",
"=",
"index",
".",
"dtype",
")",
")",
"index",
"=",
"values",
"if",
"is_period_arraylike",
"(",
"index",
")",
":",
"raise",
"TypeError",
"(",
"\"PeriodIndex given. Check the `freq` attribute \"",
"\"instead of using infer_freq.\"",
")",
"elif",
"is_timedelta64_dtype",
"(",
"index",
")",
":",
"# Allow TimedeltaIndex and TimedeltaArray",
"inferer",
"=",
"_TimedeltaFrequencyInferer",
"(",
"index",
",",
"warn",
"=",
"warn",
")",
"return",
"inferer",
".",
"get_freq",
"(",
")",
"if",
"isinstance",
"(",
"index",
",",
"pd",
".",
"Index",
")",
"and",
"not",
"isinstance",
"(",
"index",
",",
"pd",
".",
"DatetimeIndex",
")",
":",
"if",
"isinstance",
"(",
"index",
",",
"(",
"pd",
".",
"Int64Index",
",",
"pd",
".",
"Float64Index",
")",
")",
":",
"raise",
"TypeError",
"(",
"\"cannot infer freq from a non-convertible index \"",
"\"type {type}\"",
".",
"format",
"(",
"type",
"=",
"type",
"(",
"index",
")",
")",
")",
"index",
"=",
"index",
".",
"values",
"if",
"not",
"isinstance",
"(",
"index",
",",
"pd",
".",
"DatetimeIndex",
")",
":",
"try",
":",
"index",
"=",
"pd",
".",
"DatetimeIndex",
"(",
"index",
")",
"except",
"AmbiguousTimeError",
":",
"index",
"=",
"pd",
".",
"DatetimeIndex",
"(",
"index",
".",
"asi8",
")",
"inferer",
"=",
"_FrequencyInferer",
"(",
"index",
",",
"warn",
"=",
"warn",
")",
"return",
"inferer",
".",
"get_freq",
"(",
")"
] |
Infer the most likely frequency given the input index. If the frequency is
uncertain, a warning will be printed.
Parameters
----------
index : DatetimeIndex or TimedeltaIndex
if passed a Series will use the values of the series (NOT THE INDEX)
warn : boolean, default True
Returns
-------
str or None
None if no discernible frequency
TypeError if the index is not datetime-like
ValueError if there are less than three values.
|
[
"Infer",
"the",
"most",
"likely",
"frequency",
"given",
"the",
"input",
"index",
".",
"If",
"the",
"frequency",
"is",
"uncertain",
"a",
"warning",
"will",
"be",
"printed",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/tseries/frequencies.py#L202-L252
|
19,388
|
pandas-dev/pandas
|
pandas/tseries/frequencies.py
|
_FrequencyInferer.get_freq
|
def get_freq(self):
"""
Find the appropriate frequency string to describe the inferred
frequency of self.values
Returns
-------
str or None
"""
if not self.is_monotonic or not self.index._is_unique:
return None
delta = self.deltas[0]
if _is_multiple(delta, _ONE_DAY):
return self._infer_daily_rule()
# Business hourly, maybe. 17: one day / 65: one weekend
if self.hour_deltas in ([1, 17], [1, 65], [1, 17, 65]):
return 'BH'
# Possibly intraday frequency. Here we use the
# original .asi8 values as the modified values
# will not work around DST transitions. See #8772
elif not self.is_unique_asi8:
return None
delta = self.deltas_asi8[0]
if _is_multiple(delta, _ONE_HOUR):
# Hours
return _maybe_add_count('H', delta / _ONE_HOUR)
elif _is_multiple(delta, _ONE_MINUTE):
# Minutes
return _maybe_add_count('T', delta / _ONE_MINUTE)
elif _is_multiple(delta, _ONE_SECOND):
# Seconds
return _maybe_add_count('S', delta / _ONE_SECOND)
elif _is_multiple(delta, _ONE_MILLI):
# Milliseconds
return _maybe_add_count('L', delta / _ONE_MILLI)
elif _is_multiple(delta, _ONE_MICRO):
# Microseconds
return _maybe_add_count('U', delta / _ONE_MICRO)
else:
# Nanoseconds
return _maybe_add_count('N', delta)
|
python
|
def get_freq(self):
"""
Find the appropriate frequency string to describe the inferred
frequency of self.values
Returns
-------
str or None
"""
if not self.is_monotonic or not self.index._is_unique:
return None
delta = self.deltas[0]
if _is_multiple(delta, _ONE_DAY):
return self._infer_daily_rule()
# Business hourly, maybe. 17: one day / 65: one weekend
if self.hour_deltas in ([1, 17], [1, 65], [1, 17, 65]):
return 'BH'
# Possibly intraday frequency. Here we use the
# original .asi8 values as the modified values
# will not work around DST transitions. See #8772
elif not self.is_unique_asi8:
return None
delta = self.deltas_asi8[0]
if _is_multiple(delta, _ONE_HOUR):
# Hours
return _maybe_add_count('H', delta / _ONE_HOUR)
elif _is_multiple(delta, _ONE_MINUTE):
# Minutes
return _maybe_add_count('T', delta / _ONE_MINUTE)
elif _is_multiple(delta, _ONE_SECOND):
# Seconds
return _maybe_add_count('S', delta / _ONE_SECOND)
elif _is_multiple(delta, _ONE_MILLI):
# Milliseconds
return _maybe_add_count('L', delta / _ONE_MILLI)
elif _is_multiple(delta, _ONE_MICRO):
# Microseconds
return _maybe_add_count('U', delta / _ONE_MICRO)
else:
# Nanoseconds
return _maybe_add_count('N', delta)
|
[
"def",
"get_freq",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"is_monotonic",
"or",
"not",
"self",
".",
"index",
".",
"_is_unique",
":",
"return",
"None",
"delta",
"=",
"self",
".",
"deltas",
"[",
"0",
"]",
"if",
"_is_multiple",
"(",
"delta",
",",
"_ONE_DAY",
")",
":",
"return",
"self",
".",
"_infer_daily_rule",
"(",
")",
"# Business hourly, maybe. 17: one day / 65: one weekend",
"if",
"self",
".",
"hour_deltas",
"in",
"(",
"[",
"1",
",",
"17",
"]",
",",
"[",
"1",
",",
"65",
"]",
",",
"[",
"1",
",",
"17",
",",
"65",
"]",
")",
":",
"return",
"'BH'",
"# Possibly intraday frequency. Here we use the",
"# original .asi8 values as the modified values",
"# will not work around DST transitions. See #8772",
"elif",
"not",
"self",
".",
"is_unique_asi8",
":",
"return",
"None",
"delta",
"=",
"self",
".",
"deltas_asi8",
"[",
"0",
"]",
"if",
"_is_multiple",
"(",
"delta",
",",
"_ONE_HOUR",
")",
":",
"# Hours",
"return",
"_maybe_add_count",
"(",
"'H'",
",",
"delta",
"/",
"_ONE_HOUR",
")",
"elif",
"_is_multiple",
"(",
"delta",
",",
"_ONE_MINUTE",
")",
":",
"# Minutes",
"return",
"_maybe_add_count",
"(",
"'T'",
",",
"delta",
"/",
"_ONE_MINUTE",
")",
"elif",
"_is_multiple",
"(",
"delta",
",",
"_ONE_SECOND",
")",
":",
"# Seconds",
"return",
"_maybe_add_count",
"(",
"'S'",
",",
"delta",
"/",
"_ONE_SECOND",
")",
"elif",
"_is_multiple",
"(",
"delta",
",",
"_ONE_MILLI",
")",
":",
"# Milliseconds",
"return",
"_maybe_add_count",
"(",
"'L'",
",",
"delta",
"/",
"_ONE_MILLI",
")",
"elif",
"_is_multiple",
"(",
"delta",
",",
"_ONE_MICRO",
")",
":",
"# Microseconds",
"return",
"_maybe_add_count",
"(",
"'U'",
",",
"delta",
"/",
"_ONE_MICRO",
")",
"else",
":",
"# Nanoseconds",
"return",
"_maybe_add_count",
"(",
"'N'",
",",
"delta",
")"
] |
Find the appropriate frequency string to describe the inferred
frequency of self.values
Returns
-------
str or None
|
[
"Find",
"the",
"appropriate",
"frequency",
"string",
"to",
"describe",
"the",
"inferred",
"frequency",
"of",
"self",
".",
"values"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/tseries/frequencies.py#L294-L337
|
19,389
|
pandas-dev/pandas
|
pandas/compat/pickle_compat.py
|
load
|
def load(fh, encoding=None, is_verbose=False):
"""load a pickle, with a provided encoding
if compat is True:
fake the old class hierarchy
if it works, then return the new type objects
Parameters
----------
fh : a filelike object
encoding : an optional encoding
is_verbose : show exception output
"""
try:
fh.seek(0)
if encoding is not None:
up = Unpickler(fh, encoding=encoding)
else:
up = Unpickler(fh)
up.is_verbose = is_verbose
return up.load()
except (ValueError, TypeError):
raise
|
python
|
def load(fh, encoding=None, is_verbose=False):
"""load a pickle, with a provided encoding
if compat is True:
fake the old class hierarchy
if it works, then return the new type objects
Parameters
----------
fh : a filelike object
encoding : an optional encoding
is_verbose : show exception output
"""
try:
fh.seek(0)
if encoding is not None:
up = Unpickler(fh, encoding=encoding)
else:
up = Unpickler(fh)
up.is_verbose = is_verbose
return up.load()
except (ValueError, TypeError):
raise
|
[
"def",
"load",
"(",
"fh",
",",
"encoding",
"=",
"None",
",",
"is_verbose",
"=",
"False",
")",
":",
"try",
":",
"fh",
".",
"seek",
"(",
"0",
")",
"if",
"encoding",
"is",
"not",
"None",
":",
"up",
"=",
"Unpickler",
"(",
"fh",
",",
"encoding",
"=",
"encoding",
")",
"else",
":",
"up",
"=",
"Unpickler",
"(",
"fh",
")",
"up",
".",
"is_verbose",
"=",
"is_verbose",
"return",
"up",
".",
"load",
"(",
")",
"except",
"(",
"ValueError",
",",
"TypeError",
")",
":",
"raise"
] |
load a pickle, with a provided encoding
if compat is True:
fake the old class hierarchy
if it works, then return the new type objects
Parameters
----------
fh : a filelike object
encoding : an optional encoding
is_verbose : show exception output
|
[
"load",
"a",
"pickle",
"with",
"a",
"provided",
"encoding"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/compat/pickle_compat.py#L189-L213
|
19,390
|
pandas-dev/pandas
|
pandas/core/indexes/base.py
|
ensure_index_from_sequences
|
def ensure_index_from_sequences(sequences, names=None):
"""
Construct an index from sequences of data.
A single sequence returns an Index. Many sequences returns a
MultiIndex.
Parameters
----------
sequences : sequence of sequences
names : sequence of str
Returns
-------
index : Index or MultiIndex
Examples
--------
>>> ensure_index_from_sequences([[1, 2, 3]], names=['name'])
Int64Index([1, 2, 3], dtype='int64', name='name')
>>> ensure_index_from_sequences([['a', 'a'], ['a', 'b']],
names=['L1', 'L2'])
MultiIndex(levels=[['a'], ['a', 'b']],
codes=[[0, 0], [0, 1]],
names=['L1', 'L2'])
See Also
--------
ensure_index
"""
from .multi import MultiIndex
if len(sequences) == 1:
if names is not None:
names = names[0]
return Index(sequences[0], name=names)
else:
return MultiIndex.from_arrays(sequences, names=names)
|
python
|
def ensure_index_from_sequences(sequences, names=None):
"""
Construct an index from sequences of data.
A single sequence returns an Index. Many sequences returns a
MultiIndex.
Parameters
----------
sequences : sequence of sequences
names : sequence of str
Returns
-------
index : Index or MultiIndex
Examples
--------
>>> ensure_index_from_sequences([[1, 2, 3]], names=['name'])
Int64Index([1, 2, 3], dtype='int64', name='name')
>>> ensure_index_from_sequences([['a', 'a'], ['a', 'b']],
names=['L1', 'L2'])
MultiIndex(levels=[['a'], ['a', 'b']],
codes=[[0, 0], [0, 1]],
names=['L1', 'L2'])
See Also
--------
ensure_index
"""
from .multi import MultiIndex
if len(sequences) == 1:
if names is not None:
names = names[0]
return Index(sequences[0], name=names)
else:
return MultiIndex.from_arrays(sequences, names=names)
|
[
"def",
"ensure_index_from_sequences",
"(",
"sequences",
",",
"names",
"=",
"None",
")",
":",
"from",
".",
"multi",
"import",
"MultiIndex",
"if",
"len",
"(",
"sequences",
")",
"==",
"1",
":",
"if",
"names",
"is",
"not",
"None",
":",
"names",
"=",
"names",
"[",
"0",
"]",
"return",
"Index",
"(",
"sequences",
"[",
"0",
"]",
",",
"name",
"=",
"names",
")",
"else",
":",
"return",
"MultiIndex",
".",
"from_arrays",
"(",
"sequences",
",",
"names",
"=",
"names",
")"
] |
Construct an index from sequences of data.
A single sequence returns an Index. Many sequences returns a
MultiIndex.
Parameters
----------
sequences : sequence of sequences
names : sequence of str
Returns
-------
index : Index or MultiIndex
Examples
--------
>>> ensure_index_from_sequences([[1, 2, 3]], names=['name'])
Int64Index([1, 2, 3], dtype='int64', name='name')
>>> ensure_index_from_sequences([['a', 'a'], ['a', 'b']],
names=['L1', 'L2'])
MultiIndex(levels=[['a'], ['a', 'b']],
codes=[[0, 0], [0, 1]],
names=['L1', 'L2'])
See Also
--------
ensure_index
|
[
"Construct",
"an",
"index",
"from",
"sequences",
"of",
"data",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L5277-L5315
|
19,391
|
pandas-dev/pandas
|
pandas/core/indexes/base.py
|
ensure_index
|
def ensure_index(index_like, copy=False):
"""
Ensure that we have an index from some index-like object.
Parameters
----------
index : sequence
An Index or other sequence
copy : bool
Returns
-------
index : Index or MultiIndex
Examples
--------
>>> ensure_index(['a', 'b'])
Index(['a', 'b'], dtype='object')
>>> ensure_index([('a', 'a'), ('b', 'c')])
Index([('a', 'a'), ('b', 'c')], dtype='object')
>>> ensure_index([['a', 'a'], ['b', 'c']])
MultiIndex(levels=[['a'], ['b', 'c']],
codes=[[0, 0], [0, 1]])
See Also
--------
ensure_index_from_sequences
"""
if isinstance(index_like, Index):
if copy:
index_like = index_like.copy()
return index_like
if hasattr(index_like, 'name'):
return Index(index_like, name=index_like.name, copy=copy)
if is_iterator(index_like):
index_like = list(index_like)
# must check for exactly list here because of strict type
# check in clean_index_list
if isinstance(index_like, list):
if type(index_like) != list:
index_like = list(index_like)
converted, all_arrays = lib.clean_index_list(index_like)
if len(converted) > 0 and all_arrays:
from .multi import MultiIndex
return MultiIndex.from_arrays(converted)
else:
index_like = converted
else:
# clean_index_list does the equivalent of copying
# so only need to do this if not list instance
if copy:
from copy import copy
index_like = copy(index_like)
return Index(index_like)
|
python
|
def ensure_index(index_like, copy=False):
"""
Ensure that we have an index from some index-like object.
Parameters
----------
index : sequence
An Index or other sequence
copy : bool
Returns
-------
index : Index or MultiIndex
Examples
--------
>>> ensure_index(['a', 'b'])
Index(['a', 'b'], dtype='object')
>>> ensure_index([('a', 'a'), ('b', 'c')])
Index([('a', 'a'), ('b', 'c')], dtype='object')
>>> ensure_index([['a', 'a'], ['b', 'c']])
MultiIndex(levels=[['a'], ['b', 'c']],
codes=[[0, 0], [0, 1]])
See Also
--------
ensure_index_from_sequences
"""
if isinstance(index_like, Index):
if copy:
index_like = index_like.copy()
return index_like
if hasattr(index_like, 'name'):
return Index(index_like, name=index_like.name, copy=copy)
if is_iterator(index_like):
index_like = list(index_like)
# must check for exactly list here because of strict type
# check in clean_index_list
if isinstance(index_like, list):
if type(index_like) != list:
index_like = list(index_like)
converted, all_arrays = lib.clean_index_list(index_like)
if len(converted) > 0 and all_arrays:
from .multi import MultiIndex
return MultiIndex.from_arrays(converted)
else:
index_like = converted
else:
# clean_index_list does the equivalent of copying
# so only need to do this if not list instance
if copy:
from copy import copy
index_like = copy(index_like)
return Index(index_like)
|
[
"def",
"ensure_index",
"(",
"index_like",
",",
"copy",
"=",
"False",
")",
":",
"if",
"isinstance",
"(",
"index_like",
",",
"Index",
")",
":",
"if",
"copy",
":",
"index_like",
"=",
"index_like",
".",
"copy",
"(",
")",
"return",
"index_like",
"if",
"hasattr",
"(",
"index_like",
",",
"'name'",
")",
":",
"return",
"Index",
"(",
"index_like",
",",
"name",
"=",
"index_like",
".",
"name",
",",
"copy",
"=",
"copy",
")",
"if",
"is_iterator",
"(",
"index_like",
")",
":",
"index_like",
"=",
"list",
"(",
"index_like",
")",
"# must check for exactly list here because of strict type",
"# check in clean_index_list",
"if",
"isinstance",
"(",
"index_like",
",",
"list",
")",
":",
"if",
"type",
"(",
"index_like",
")",
"!=",
"list",
":",
"index_like",
"=",
"list",
"(",
"index_like",
")",
"converted",
",",
"all_arrays",
"=",
"lib",
".",
"clean_index_list",
"(",
"index_like",
")",
"if",
"len",
"(",
"converted",
")",
">",
"0",
"and",
"all_arrays",
":",
"from",
".",
"multi",
"import",
"MultiIndex",
"return",
"MultiIndex",
".",
"from_arrays",
"(",
"converted",
")",
"else",
":",
"index_like",
"=",
"converted",
"else",
":",
"# clean_index_list does the equivalent of copying",
"# so only need to do this if not list instance",
"if",
"copy",
":",
"from",
"copy",
"import",
"copy",
"index_like",
"=",
"copy",
"(",
"index_like",
")",
"return",
"Index",
"(",
"index_like",
")"
] |
Ensure that we have an index from some index-like object.
Parameters
----------
index : sequence
An Index or other sequence
copy : bool
Returns
-------
index : Index or MultiIndex
Examples
--------
>>> ensure_index(['a', 'b'])
Index(['a', 'b'], dtype='object')
>>> ensure_index([('a', 'a'), ('b', 'c')])
Index([('a', 'a'), ('b', 'c')], dtype='object')
>>> ensure_index([['a', 'a'], ['b', 'c']])
MultiIndex(levels=[['a'], ['b', 'c']],
codes=[[0, 0], [0, 1]])
See Also
--------
ensure_index_from_sequences
|
[
"Ensure",
"that",
"we",
"have",
"an",
"index",
"from",
"some",
"index",
"-",
"like",
"object",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L5318-L5378
|
19,392
|
pandas-dev/pandas
|
pandas/core/indexes/base.py
|
Index._simple_new
|
def _simple_new(cls, values, name=None, dtype=None, **kwargs):
"""
We require that we have a dtype compat for the values. If we are passed
a non-dtype compat, then coerce using the constructor.
Must be careful not to recurse.
"""
if not hasattr(values, 'dtype'):
if (values is None or not len(values)) and dtype is not None:
values = np.empty(0, dtype=dtype)
else:
values = np.array(values, copy=False)
if is_object_dtype(values):
values = cls(values, name=name, dtype=dtype,
**kwargs)._ndarray_values
if isinstance(values, (ABCSeries, ABCIndexClass)):
# Index._data must always be an ndarray.
# This is no-copy for when _values is an ndarray,
# which should be always at this point.
values = np.asarray(values._values)
result = object.__new__(cls)
result._data = values
# _index_data is a (temporary?) fix to ensure that the direct data
# manipulation we do in `_libs/reduction.pyx` continues to work.
# We need access to the actual ndarray, since we're messing with
# data buffers and strides. We don't re-use `_ndarray_values`, since
# we actually set this value too.
result._index_data = values
result.name = name
for k, v in kwargs.items():
setattr(result, k, v)
return result._reset_identity()
|
python
|
def _simple_new(cls, values, name=None, dtype=None, **kwargs):
"""
We require that we have a dtype compat for the values. If we are passed
a non-dtype compat, then coerce using the constructor.
Must be careful not to recurse.
"""
if not hasattr(values, 'dtype'):
if (values is None or not len(values)) and dtype is not None:
values = np.empty(0, dtype=dtype)
else:
values = np.array(values, copy=False)
if is_object_dtype(values):
values = cls(values, name=name, dtype=dtype,
**kwargs)._ndarray_values
if isinstance(values, (ABCSeries, ABCIndexClass)):
# Index._data must always be an ndarray.
# This is no-copy for when _values is an ndarray,
# which should be always at this point.
values = np.asarray(values._values)
result = object.__new__(cls)
result._data = values
# _index_data is a (temporary?) fix to ensure that the direct data
# manipulation we do in `_libs/reduction.pyx` continues to work.
# We need access to the actual ndarray, since we're messing with
# data buffers and strides. We don't re-use `_ndarray_values`, since
# we actually set this value too.
result._index_data = values
result.name = name
for k, v in kwargs.items():
setattr(result, k, v)
return result._reset_identity()
|
[
"def",
"_simple_new",
"(",
"cls",
",",
"values",
",",
"name",
"=",
"None",
",",
"dtype",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"hasattr",
"(",
"values",
",",
"'dtype'",
")",
":",
"if",
"(",
"values",
"is",
"None",
"or",
"not",
"len",
"(",
"values",
")",
")",
"and",
"dtype",
"is",
"not",
"None",
":",
"values",
"=",
"np",
".",
"empty",
"(",
"0",
",",
"dtype",
"=",
"dtype",
")",
"else",
":",
"values",
"=",
"np",
".",
"array",
"(",
"values",
",",
"copy",
"=",
"False",
")",
"if",
"is_object_dtype",
"(",
"values",
")",
":",
"values",
"=",
"cls",
"(",
"values",
",",
"name",
"=",
"name",
",",
"dtype",
"=",
"dtype",
",",
"*",
"*",
"kwargs",
")",
".",
"_ndarray_values",
"if",
"isinstance",
"(",
"values",
",",
"(",
"ABCSeries",
",",
"ABCIndexClass",
")",
")",
":",
"# Index._data must always be an ndarray.",
"# This is no-copy for when _values is an ndarray,",
"# which should be always at this point.",
"values",
"=",
"np",
".",
"asarray",
"(",
"values",
".",
"_values",
")",
"result",
"=",
"object",
".",
"__new__",
"(",
"cls",
")",
"result",
".",
"_data",
"=",
"values",
"# _index_data is a (temporary?) fix to ensure that the direct data",
"# manipulation we do in `_libs/reduction.pyx` continues to work.",
"# We need access to the actual ndarray, since we're messing with",
"# data buffers and strides. We don't re-use `_ndarray_values`, since",
"# we actually set this value too.",
"result",
".",
"_index_data",
"=",
"values",
"result",
".",
"name",
"=",
"name",
"for",
"k",
",",
"v",
"in",
"kwargs",
".",
"items",
"(",
")",
":",
"setattr",
"(",
"result",
",",
"k",
",",
"v",
")",
"return",
"result",
".",
"_reset_identity",
"(",
")"
] |
We require that we have a dtype compat for the values. If we are passed
a non-dtype compat, then coerce using the constructor.
Must be careful not to recurse.
|
[
"We",
"require",
"that",
"we",
"have",
"a",
"dtype",
"compat",
"for",
"the",
"values",
".",
"If",
"we",
"are",
"passed",
"a",
"non",
"-",
"dtype",
"compat",
"then",
"coerce",
"using",
"the",
"constructor",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L506-L539
|
19,393
|
pandas-dev/pandas
|
pandas/core/indexes/base.py
|
Index._shallow_copy_with_infer
|
def _shallow_copy_with_infer(self, values, **kwargs):
"""
Create a new Index inferring the class with passed value, don't copy
the data, use the same object attributes with passed in attributes
taking precedence.
*this is an internal non-public method*
Parameters
----------
values : the values to create the new Index, optional
kwargs : updates the default attributes for this Index
"""
attributes = self._get_attributes_dict()
attributes.update(kwargs)
attributes['copy'] = False
if not len(values) and 'dtype' not in kwargs:
attributes['dtype'] = self.dtype
if self._infer_as_myclass:
try:
return self._constructor(values, **attributes)
except (TypeError, ValueError):
pass
return Index(values, **attributes)
|
python
|
def _shallow_copy_with_infer(self, values, **kwargs):
"""
Create a new Index inferring the class with passed value, don't copy
the data, use the same object attributes with passed in attributes
taking precedence.
*this is an internal non-public method*
Parameters
----------
values : the values to create the new Index, optional
kwargs : updates the default attributes for this Index
"""
attributes = self._get_attributes_dict()
attributes.update(kwargs)
attributes['copy'] = False
if not len(values) and 'dtype' not in kwargs:
attributes['dtype'] = self.dtype
if self._infer_as_myclass:
try:
return self._constructor(values, **attributes)
except (TypeError, ValueError):
pass
return Index(values, **attributes)
|
[
"def",
"_shallow_copy_with_infer",
"(",
"self",
",",
"values",
",",
"*",
"*",
"kwargs",
")",
":",
"attributes",
"=",
"self",
".",
"_get_attributes_dict",
"(",
")",
"attributes",
".",
"update",
"(",
"kwargs",
")",
"attributes",
"[",
"'copy'",
"]",
"=",
"False",
"if",
"not",
"len",
"(",
"values",
")",
"and",
"'dtype'",
"not",
"in",
"kwargs",
":",
"attributes",
"[",
"'dtype'",
"]",
"=",
"self",
".",
"dtype",
"if",
"self",
".",
"_infer_as_myclass",
":",
"try",
":",
"return",
"self",
".",
"_constructor",
"(",
"values",
",",
"*",
"*",
"attributes",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"pass",
"return",
"Index",
"(",
"values",
",",
"*",
"*",
"attributes",
")"
] |
Create a new Index inferring the class with passed value, don't copy
the data, use the same object attributes with passed in attributes
taking precedence.
*this is an internal non-public method*
Parameters
----------
values : the values to create the new Index, optional
kwargs : updates the default attributes for this Index
|
[
"Create",
"a",
"new",
"Index",
"inferring",
"the",
"class",
"with",
"passed",
"value",
"don",
"t",
"copy",
"the",
"data",
"use",
"the",
"same",
"object",
"attributes",
"with",
"passed",
"in",
"attributes",
"taking",
"precedence",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L585-L608
|
19,394
|
pandas-dev/pandas
|
pandas/core/indexes/base.py
|
Index.is_
|
def is_(self, other):
"""
More flexible, faster check like ``is`` but that works through views.
Note: this is *not* the same as ``Index.identical()``, which checks
that metadata is also the same.
Parameters
----------
other : object
other object to compare against.
Returns
-------
True if both have same underlying data, False otherwise : bool
"""
# use something other than None to be clearer
return self._id is getattr(
other, '_id', Ellipsis) and self._id is not None
|
python
|
def is_(self, other):
"""
More flexible, faster check like ``is`` but that works through views.
Note: this is *not* the same as ``Index.identical()``, which checks
that metadata is also the same.
Parameters
----------
other : object
other object to compare against.
Returns
-------
True if both have same underlying data, False otherwise : bool
"""
# use something other than None to be clearer
return self._id is getattr(
other, '_id', Ellipsis) and self._id is not None
|
[
"def",
"is_",
"(",
"self",
",",
"other",
")",
":",
"# use something other than None to be clearer",
"return",
"self",
".",
"_id",
"is",
"getattr",
"(",
"other",
",",
"'_id'",
",",
"Ellipsis",
")",
"and",
"self",
".",
"_id",
"is",
"not",
"None"
] |
More flexible, faster check like ``is`` but that works through views.
Note: this is *not* the same as ``Index.identical()``, which checks
that metadata is also the same.
Parameters
----------
other : object
other object to compare against.
Returns
-------
True if both have same underlying data, False otherwise : bool
|
[
"More",
"flexible",
"faster",
"check",
"like",
"is",
"but",
"that",
"works",
"through",
"views",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L614-L632
|
19,395
|
pandas-dev/pandas
|
pandas/core/indexes/base.py
|
Index._assert_take_fillable
|
def _assert_take_fillable(self, values, indices, allow_fill=True,
fill_value=None, na_value=np.nan):
"""
Internal method to handle NA filling of take.
"""
indices = ensure_platform_int(indices)
# only fill if we are passing a non-None fill_value
if allow_fill and fill_value is not None:
if (indices < -1).any():
msg = ('When allow_fill=True and fill_value is not None, '
'all indices must be >= -1')
raise ValueError(msg)
taken = algos.take(values,
indices,
allow_fill=allow_fill,
fill_value=na_value)
else:
taken = values.take(indices)
return taken
|
python
|
def _assert_take_fillable(self, values, indices, allow_fill=True,
fill_value=None, na_value=np.nan):
"""
Internal method to handle NA filling of take.
"""
indices = ensure_platform_int(indices)
# only fill if we are passing a non-None fill_value
if allow_fill and fill_value is not None:
if (indices < -1).any():
msg = ('When allow_fill=True and fill_value is not None, '
'all indices must be >= -1')
raise ValueError(msg)
taken = algos.take(values,
indices,
allow_fill=allow_fill,
fill_value=na_value)
else:
taken = values.take(indices)
return taken
|
[
"def",
"_assert_take_fillable",
"(",
"self",
",",
"values",
",",
"indices",
",",
"allow_fill",
"=",
"True",
",",
"fill_value",
"=",
"None",
",",
"na_value",
"=",
"np",
".",
"nan",
")",
":",
"indices",
"=",
"ensure_platform_int",
"(",
"indices",
")",
"# only fill if we are passing a non-None fill_value",
"if",
"allow_fill",
"and",
"fill_value",
"is",
"not",
"None",
":",
"if",
"(",
"indices",
"<",
"-",
"1",
")",
".",
"any",
"(",
")",
":",
"msg",
"=",
"(",
"'When allow_fill=True and fill_value is not None, '",
"'all indices must be >= -1'",
")",
"raise",
"ValueError",
"(",
"msg",
")",
"taken",
"=",
"algos",
".",
"take",
"(",
"values",
",",
"indices",
",",
"allow_fill",
"=",
"allow_fill",
",",
"fill_value",
"=",
"na_value",
")",
"else",
":",
"taken",
"=",
"values",
".",
"take",
"(",
"indices",
")",
"return",
"taken"
] |
Internal method to handle NA filling of take.
|
[
"Internal",
"method",
"to",
"handle",
"NA",
"filling",
"of",
"take",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L803-L822
|
19,396
|
pandas-dev/pandas
|
pandas/core/indexes/base.py
|
Index._format_data
|
def _format_data(self, name=None):
"""
Return the formatted data as a unicode string.
"""
# do we want to justify (only do so for non-objects)
is_justify = not (self.inferred_type in ('string', 'unicode') or
(self.inferred_type == 'categorical' and
is_object_dtype(self.categories)))
return format_object_summary(self, self._formatter_func,
is_justify=is_justify, name=name)
|
python
|
def _format_data(self, name=None):
"""
Return the formatted data as a unicode string.
"""
# do we want to justify (only do so for non-objects)
is_justify = not (self.inferred_type in ('string', 'unicode') or
(self.inferred_type == 'categorical' and
is_object_dtype(self.categories)))
return format_object_summary(self, self._formatter_func,
is_justify=is_justify, name=name)
|
[
"def",
"_format_data",
"(",
"self",
",",
"name",
"=",
"None",
")",
":",
"# do we want to justify (only do so for non-objects)",
"is_justify",
"=",
"not",
"(",
"self",
".",
"inferred_type",
"in",
"(",
"'string'",
",",
"'unicode'",
")",
"or",
"(",
"self",
".",
"inferred_type",
"==",
"'categorical'",
"and",
"is_object_dtype",
"(",
"self",
".",
"categories",
")",
")",
")",
"return",
"format_object_summary",
"(",
"self",
",",
"self",
".",
"_formatter_func",
",",
"is_justify",
"=",
"is_justify",
",",
"name",
"=",
"name",
")"
] |
Return the formatted data as a unicode string.
|
[
"Return",
"the",
"formatted",
"data",
"as",
"a",
"unicode",
"string",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L958-L969
|
19,397
|
pandas-dev/pandas
|
pandas/core/indexes/base.py
|
Index.format
|
def format(self, name=False, formatter=None, **kwargs):
"""
Render a string representation of the Index.
"""
header = []
if name:
header.append(pprint_thing(self.name,
escape_chars=('\t', '\r', '\n')) if
self.name is not None else '')
if formatter is not None:
return header + list(self.map(formatter))
return self._format_with_header(header, **kwargs)
|
python
|
def format(self, name=False, formatter=None, **kwargs):
"""
Render a string representation of the Index.
"""
header = []
if name:
header.append(pprint_thing(self.name,
escape_chars=('\t', '\r', '\n')) if
self.name is not None else '')
if formatter is not None:
return header + list(self.map(formatter))
return self._format_with_header(header, **kwargs)
|
[
"def",
"format",
"(",
"self",
",",
"name",
"=",
"False",
",",
"formatter",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"header",
"=",
"[",
"]",
"if",
"name",
":",
"header",
".",
"append",
"(",
"pprint_thing",
"(",
"self",
".",
"name",
",",
"escape_chars",
"=",
"(",
"'\\t'",
",",
"'\\r'",
",",
"'\\n'",
")",
")",
"if",
"self",
".",
"name",
"is",
"not",
"None",
"else",
"''",
")",
"if",
"formatter",
"is",
"not",
"None",
":",
"return",
"header",
"+",
"list",
"(",
"self",
".",
"map",
"(",
"formatter",
")",
")",
"return",
"self",
".",
"_format_with_header",
"(",
"header",
",",
"*",
"*",
"kwargs",
")"
] |
Render a string representation of the Index.
|
[
"Render",
"a",
"string",
"representation",
"of",
"the",
"Index",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L981-L994
|
19,398
|
pandas-dev/pandas
|
pandas/core/indexes/base.py
|
Index.to_native_types
|
def to_native_types(self, slicer=None, **kwargs):
"""
Format specified values of `self` and return them.
Parameters
----------
slicer : int, array-like
An indexer into `self` that specifies which values
are used in the formatting process.
kwargs : dict
Options for specifying how the values should be formatted.
These options include the following:
1) na_rep : str
The value that serves as a placeholder for NULL values
2) quoting : bool or None
Whether or not there are quoted values in `self`
3) date_format : str
The format used to represent date-like values
"""
values = self
if slicer is not None:
values = values[slicer]
return values._format_native_types(**kwargs)
|
python
|
def to_native_types(self, slicer=None, **kwargs):
"""
Format specified values of `self` and return them.
Parameters
----------
slicer : int, array-like
An indexer into `self` that specifies which values
are used in the formatting process.
kwargs : dict
Options for specifying how the values should be formatted.
These options include the following:
1) na_rep : str
The value that serves as a placeholder for NULL values
2) quoting : bool or None
Whether or not there are quoted values in `self`
3) date_format : str
The format used to represent date-like values
"""
values = self
if slicer is not None:
values = values[slicer]
return values._format_native_types(**kwargs)
|
[
"def",
"to_native_types",
"(",
"self",
",",
"slicer",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"values",
"=",
"self",
"if",
"slicer",
"is",
"not",
"None",
":",
"values",
"=",
"values",
"[",
"slicer",
"]",
"return",
"values",
".",
"_format_native_types",
"(",
"*",
"*",
"kwargs",
")"
] |
Format specified values of `self` and return them.
Parameters
----------
slicer : int, array-like
An indexer into `self` that specifies which values
are used in the formatting process.
kwargs : dict
Options for specifying how the values should be formatted.
These options include the following:
1) na_rep : str
The value that serves as a placeholder for NULL values
2) quoting : bool or None
Whether or not there are quoted values in `self`
3) date_format : str
The format used to represent date-like values
|
[
"Format",
"specified",
"values",
"of",
"self",
"and",
"return",
"them",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L1022-L1046
|
19,399
|
pandas-dev/pandas
|
pandas/core/indexes/base.py
|
Index._format_native_types
|
def _format_native_types(self, na_rep='', quoting=None, **kwargs):
"""
Actually format specific types of the index.
"""
mask = isna(self)
if not self.is_object() and not quoting:
values = np.asarray(self).astype(str)
else:
values = np.array(self, dtype=object, copy=True)
values[mask] = na_rep
return values
|
python
|
def _format_native_types(self, na_rep='', quoting=None, **kwargs):
"""
Actually format specific types of the index.
"""
mask = isna(self)
if not self.is_object() and not quoting:
values = np.asarray(self).astype(str)
else:
values = np.array(self, dtype=object, copy=True)
values[mask] = na_rep
return values
|
[
"def",
"_format_native_types",
"(",
"self",
",",
"na_rep",
"=",
"''",
",",
"quoting",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"mask",
"=",
"isna",
"(",
"self",
")",
"if",
"not",
"self",
".",
"is_object",
"(",
")",
"and",
"not",
"quoting",
":",
"values",
"=",
"np",
".",
"asarray",
"(",
"self",
")",
".",
"astype",
"(",
"str",
")",
"else",
":",
"values",
"=",
"np",
".",
"array",
"(",
"self",
",",
"dtype",
"=",
"object",
",",
"copy",
"=",
"True",
")",
"values",
"[",
"mask",
"]",
"=",
"na_rep",
"return",
"values"
] |
Actually format specific types of the index.
|
[
"Actually",
"format",
"specific",
"types",
"of",
"the",
"index",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L1048-L1059
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.