id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
19,100
apache/spark
python/pyspark/streaming/dstream.py
DStream.transformWith
def transformWith(self, func, other, keepSerializer=False): """ Return a new DStream in which each RDD is generated by applying a function on each RDD of this DStream and 'other' DStream. `func` can have two arguments of (`rdd_a`, `rdd_b`) or have three arguments of (`time`, `rdd_a`, `rdd_b`) """ if func.__code__.co_argcount == 2: oldfunc = func func = lambda t, a, b: oldfunc(a, b) assert func.__code__.co_argcount == 3, "func should take two or three arguments" jfunc = TransformFunction(self._sc, func, self._jrdd_deserializer, other._jrdd_deserializer) dstream = self._sc._jvm.PythonTransformed2DStream(self._jdstream.dstream(), other._jdstream.dstream(), jfunc) jrdd_serializer = self._jrdd_deserializer if keepSerializer else self._sc.serializer return DStream(dstream.asJavaDStream(), self._ssc, jrdd_serializer)
python
def transformWith(self, func, other, keepSerializer=False): """ Return a new DStream in which each RDD is generated by applying a function on each RDD of this DStream and 'other' DStream. `func` can have two arguments of (`rdd_a`, `rdd_b`) or have three arguments of (`time`, `rdd_a`, `rdd_b`) """ if func.__code__.co_argcount == 2: oldfunc = func func = lambda t, a, b: oldfunc(a, b) assert func.__code__.co_argcount == 3, "func should take two or three arguments" jfunc = TransformFunction(self._sc, func, self._jrdd_deserializer, other._jrdd_deserializer) dstream = self._sc._jvm.PythonTransformed2DStream(self._jdstream.dstream(), other._jdstream.dstream(), jfunc) jrdd_serializer = self._jrdd_deserializer if keepSerializer else self._sc.serializer return DStream(dstream.asJavaDStream(), self._ssc, jrdd_serializer)
[ "def", "transformWith", "(", "self", ",", "func", ",", "other", ",", "keepSerializer", "=", "False", ")", ":", "if", "func", ".", "__code__", ".", "co_argcount", "==", "2", ":", "oldfunc", "=", "func", "func", "=", "lambda", "t", ",", "a", ",", "b", ":", "oldfunc", "(", "a", ",", "b", ")", "assert", "func", ".", "__code__", ".", "co_argcount", "==", "3", ",", "\"func should take two or three arguments\"", "jfunc", "=", "TransformFunction", "(", "self", ".", "_sc", ",", "func", ",", "self", ".", "_jrdd_deserializer", ",", "other", ".", "_jrdd_deserializer", ")", "dstream", "=", "self", ".", "_sc", ".", "_jvm", ".", "PythonTransformed2DStream", "(", "self", ".", "_jdstream", ".", "dstream", "(", ")", ",", "other", ".", "_jdstream", ".", "dstream", "(", ")", ",", "jfunc", ")", "jrdd_serializer", "=", "self", ".", "_jrdd_deserializer", "if", "keepSerializer", "else", "self", ".", "_sc", ".", "serializer", "return", "DStream", "(", "dstream", ".", "asJavaDStream", "(", ")", ",", "self", ".", "_ssc", ",", "jrdd_serializer", ")" ]
Return a new DStream in which each RDD is generated by applying a function on each RDD of this DStream and 'other' DStream. `func` can have two arguments of (`rdd_a`, `rdd_b`) or have three arguments of (`time`, `rdd_a`, `rdd_b`)
[ "Return", "a", "new", "DStream", "in", "which", "each", "RDD", "is", "generated", "by", "applying", "a", "function", "on", "each", "RDD", "of", "this", "DStream", "and", "other", "DStream", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/streaming/dstream.py#L301-L317
19,101
apache/spark
python/pyspark/streaming/dstream.py
DStream.union
def union(self, other): """ Return a new DStream by unifying data of another DStream with this DStream. @param other: Another DStream having the same interval (i.e., slideDuration) as this DStream. """ if self._slideDuration != other._slideDuration: raise ValueError("the two DStream should have same slide duration") return self.transformWith(lambda a, b: a.union(b), other, True)
python
def union(self, other): """ Return a new DStream by unifying data of another DStream with this DStream. @param other: Another DStream having the same interval (i.e., slideDuration) as this DStream. """ if self._slideDuration != other._slideDuration: raise ValueError("the two DStream should have same slide duration") return self.transformWith(lambda a, b: a.union(b), other, True)
[ "def", "union", "(", "self", ",", "other", ")", ":", "if", "self", ".", "_slideDuration", "!=", "other", ".", "_slideDuration", ":", "raise", "ValueError", "(", "\"the two DStream should have same slide duration\"", ")", "return", "self", ".", "transformWith", "(", "lambda", "a", ",", "b", ":", "a", ".", "union", "(", "b", ")", ",", "other", ",", "True", ")" ]
Return a new DStream by unifying data of another DStream with this DStream. @param other: Another DStream having the same interval (i.e., slideDuration) as this DStream.
[ "Return", "a", "new", "DStream", "by", "unifying", "data", "of", "another", "DStream", "with", "this", "DStream", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/streaming/dstream.py#L332-L341
19,102
apache/spark
python/pyspark/streaming/dstream.py
DStream.cogroup
def cogroup(self, other, numPartitions=None): """ Return a new DStream by applying 'cogroup' between RDDs of this DStream and `other` DStream. Hash partitioning is used to generate the RDDs with `numPartitions` partitions. """ if numPartitions is None: numPartitions = self._sc.defaultParallelism return self.transformWith(lambda a, b: a.cogroup(b, numPartitions), other)
python
def cogroup(self, other, numPartitions=None): """ Return a new DStream by applying 'cogroup' between RDDs of this DStream and `other` DStream. Hash partitioning is used to generate the RDDs with `numPartitions` partitions. """ if numPartitions is None: numPartitions = self._sc.defaultParallelism return self.transformWith(lambda a, b: a.cogroup(b, numPartitions), other)
[ "def", "cogroup", "(", "self", ",", "other", ",", "numPartitions", "=", "None", ")", ":", "if", "numPartitions", "is", "None", ":", "numPartitions", "=", "self", ".", "_sc", ".", "defaultParallelism", "return", "self", ".", "transformWith", "(", "lambda", "a", ",", "b", ":", "a", ".", "cogroup", "(", "b", ",", "numPartitions", ")", ",", "other", ")" ]
Return a new DStream by applying 'cogroup' between RDDs of this DStream and `other` DStream. Hash partitioning is used to generate the RDDs with `numPartitions` partitions.
[ "Return", "a", "new", "DStream", "by", "applying", "cogroup", "between", "RDDs", "of", "this", "DStream", "and", "other", "DStream", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/streaming/dstream.py#L343-L352
19,103
apache/spark
python/pyspark/streaming/dstream.py
DStream._jtime
def _jtime(self, timestamp): """ Convert datetime or unix_timestamp into Time """ if isinstance(timestamp, datetime): timestamp = time.mktime(timestamp.timetuple()) return self._sc._jvm.Time(long(timestamp * 1000))
python
def _jtime(self, timestamp): """ Convert datetime or unix_timestamp into Time """ if isinstance(timestamp, datetime): timestamp = time.mktime(timestamp.timetuple()) return self._sc._jvm.Time(long(timestamp * 1000))
[ "def", "_jtime", "(", "self", ",", "timestamp", ")", ":", "if", "isinstance", "(", "timestamp", ",", "datetime", ")", ":", "timestamp", "=", "time", ".", "mktime", "(", "timestamp", ".", "timetuple", "(", ")", ")", "return", "self", ".", "_sc", ".", "_jvm", ".", "Time", "(", "long", "(", "timestamp", "*", "1000", ")", ")" ]
Convert datetime or unix_timestamp into Time
[ "Convert", "datetime", "or", "unix_timestamp", "into", "Time" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/streaming/dstream.py#L402-L407
19,104
apache/spark
python/pyspark/streaming/dstream.py
DStream.window
def window(self, windowDuration, slideDuration=None): """ Return a new DStream in which each RDD contains all the elements in seen in a sliding window of time over this DStream. @param windowDuration: width of the window; must be a multiple of this DStream's batching interval @param slideDuration: sliding interval of the window (i.e., the interval after which the new DStream will generate RDDs); must be a multiple of this DStream's batching interval """ self._validate_window_param(windowDuration, slideDuration) d = self._ssc._jduration(windowDuration) if slideDuration is None: return DStream(self._jdstream.window(d), self._ssc, self._jrdd_deserializer) s = self._ssc._jduration(slideDuration) return DStream(self._jdstream.window(d, s), self._ssc, self._jrdd_deserializer)
python
def window(self, windowDuration, slideDuration=None): """ Return a new DStream in which each RDD contains all the elements in seen in a sliding window of time over this DStream. @param windowDuration: width of the window; must be a multiple of this DStream's batching interval @param slideDuration: sliding interval of the window (i.e., the interval after which the new DStream will generate RDDs); must be a multiple of this DStream's batching interval """ self._validate_window_param(windowDuration, slideDuration) d = self._ssc._jduration(windowDuration) if slideDuration is None: return DStream(self._jdstream.window(d), self._ssc, self._jrdd_deserializer) s = self._ssc._jduration(slideDuration) return DStream(self._jdstream.window(d, s), self._ssc, self._jrdd_deserializer)
[ "def", "window", "(", "self", ",", "windowDuration", ",", "slideDuration", "=", "None", ")", ":", "self", ".", "_validate_window_param", "(", "windowDuration", ",", "slideDuration", ")", "d", "=", "self", ".", "_ssc", ".", "_jduration", "(", "windowDuration", ")", "if", "slideDuration", "is", "None", ":", "return", "DStream", "(", "self", ".", "_jdstream", ".", "window", "(", "d", ")", ",", "self", ".", "_ssc", ",", "self", ".", "_jrdd_deserializer", ")", "s", "=", "self", ".", "_ssc", ".", "_jduration", "(", "slideDuration", ")", "return", "DStream", "(", "self", ".", "_jdstream", ".", "window", "(", "d", ",", "s", ")", ",", "self", ".", "_ssc", ",", "self", ".", "_jrdd_deserializer", ")" ]
Return a new DStream in which each RDD contains all the elements in seen in a sliding window of time over this DStream. @param windowDuration: width of the window; must be a multiple of this DStream's batching interval @param slideDuration: sliding interval of the window (i.e., the interval after which the new DStream will generate RDDs); must be a multiple of this DStream's batching interval
[ "Return", "a", "new", "DStream", "in", "which", "each", "RDD", "contains", "all", "the", "elements", "in", "seen", "in", "a", "sliding", "window", "of", "time", "over", "this", "DStream", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/streaming/dstream.py#L427-L443
19,105
apache/spark
python/pyspark/streaming/dstream.py
DStream.reduceByWindow
def reduceByWindow(self, reduceFunc, invReduceFunc, windowDuration, slideDuration): """ Return a new DStream in which each RDD has a single element generated by reducing all elements in a sliding window over this DStream. if `invReduceFunc` is not None, the reduction is done incrementally using the old window's reduced value : 1. reduce the new values that entered the window (e.g., adding new counts) 2. "inverse reduce" the old values that left the window (e.g., subtracting old counts) This is more efficient than `invReduceFunc` is None. @param reduceFunc: associative and commutative reduce function @param invReduceFunc: inverse reduce function of `reduceFunc`; such that for all y, and invertible x: `invReduceFunc(reduceFunc(x, y), x) = y` @param windowDuration: width of the window; must be a multiple of this DStream's batching interval @param slideDuration: sliding interval of the window (i.e., the interval after which the new DStream will generate RDDs); must be a multiple of this DStream's batching interval """ keyed = self.map(lambda x: (1, x)) reduced = keyed.reduceByKeyAndWindow(reduceFunc, invReduceFunc, windowDuration, slideDuration, 1) return reduced.map(lambda kv: kv[1])
python
def reduceByWindow(self, reduceFunc, invReduceFunc, windowDuration, slideDuration): """ Return a new DStream in which each RDD has a single element generated by reducing all elements in a sliding window over this DStream. if `invReduceFunc` is not None, the reduction is done incrementally using the old window's reduced value : 1. reduce the new values that entered the window (e.g., adding new counts) 2. "inverse reduce" the old values that left the window (e.g., subtracting old counts) This is more efficient than `invReduceFunc` is None. @param reduceFunc: associative and commutative reduce function @param invReduceFunc: inverse reduce function of `reduceFunc`; such that for all y, and invertible x: `invReduceFunc(reduceFunc(x, y), x) = y` @param windowDuration: width of the window; must be a multiple of this DStream's batching interval @param slideDuration: sliding interval of the window (i.e., the interval after which the new DStream will generate RDDs); must be a multiple of this DStream's batching interval """ keyed = self.map(lambda x: (1, x)) reduced = keyed.reduceByKeyAndWindow(reduceFunc, invReduceFunc, windowDuration, slideDuration, 1) return reduced.map(lambda kv: kv[1])
[ "def", "reduceByWindow", "(", "self", ",", "reduceFunc", ",", "invReduceFunc", ",", "windowDuration", ",", "slideDuration", ")", ":", "keyed", "=", "self", ".", "map", "(", "lambda", "x", ":", "(", "1", ",", "x", ")", ")", "reduced", "=", "keyed", ".", "reduceByKeyAndWindow", "(", "reduceFunc", ",", "invReduceFunc", ",", "windowDuration", ",", "slideDuration", ",", "1", ")", "return", "reduced", ".", "map", "(", "lambda", "kv", ":", "kv", "[", "1", "]", ")" ]
Return a new DStream in which each RDD has a single element generated by reducing all elements in a sliding window over this DStream. if `invReduceFunc` is not None, the reduction is done incrementally using the old window's reduced value : 1. reduce the new values that entered the window (e.g., adding new counts) 2. "inverse reduce" the old values that left the window (e.g., subtracting old counts) This is more efficient than `invReduceFunc` is None. @param reduceFunc: associative and commutative reduce function @param invReduceFunc: inverse reduce function of `reduceFunc`; such that for all y, and invertible x: `invReduceFunc(reduceFunc(x, y), x) = y` @param windowDuration: width of the window; must be a multiple of this DStream's batching interval @param slideDuration: sliding interval of the window (i.e., the interval after which the new DStream will generate RDDs); must be a multiple of this DStream's batching interval
[ "Return", "a", "new", "DStream", "in", "which", "each", "RDD", "has", "a", "single", "element", "generated", "by", "reducing", "all", "elements", "in", "a", "sliding", "window", "over", "this", "DStream", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/streaming/dstream.py#L445-L471
19,106
apache/spark
python/pyspark/streaming/dstream.py
DStream.countByValueAndWindow
def countByValueAndWindow(self, windowDuration, slideDuration, numPartitions=None): """ Return a new DStream in which each RDD contains the count of distinct elements in RDDs in a sliding window over this DStream. @param windowDuration: width of the window; must be a multiple of this DStream's batching interval @param slideDuration: sliding interval of the window (i.e., the interval after which the new DStream will generate RDDs); must be a multiple of this DStream's batching interval @param numPartitions: number of partitions of each RDD in the new DStream. """ keyed = self.map(lambda x: (x, 1)) counted = keyed.reduceByKeyAndWindow(operator.add, operator.sub, windowDuration, slideDuration, numPartitions) return counted.filter(lambda kv: kv[1] > 0)
python
def countByValueAndWindow(self, windowDuration, slideDuration, numPartitions=None): """ Return a new DStream in which each RDD contains the count of distinct elements in RDDs in a sliding window over this DStream. @param windowDuration: width of the window; must be a multiple of this DStream's batching interval @param slideDuration: sliding interval of the window (i.e., the interval after which the new DStream will generate RDDs); must be a multiple of this DStream's batching interval @param numPartitions: number of partitions of each RDD in the new DStream. """ keyed = self.map(lambda x: (x, 1)) counted = keyed.reduceByKeyAndWindow(operator.add, operator.sub, windowDuration, slideDuration, numPartitions) return counted.filter(lambda kv: kv[1] > 0)
[ "def", "countByValueAndWindow", "(", "self", ",", "windowDuration", ",", "slideDuration", ",", "numPartitions", "=", "None", ")", ":", "keyed", "=", "self", ".", "map", "(", "lambda", "x", ":", "(", "x", ",", "1", ")", ")", "counted", "=", "keyed", ".", "reduceByKeyAndWindow", "(", "operator", ".", "add", ",", "operator", ".", "sub", ",", "windowDuration", ",", "slideDuration", ",", "numPartitions", ")", "return", "counted", ".", "filter", "(", "lambda", "kv", ":", "kv", "[", "1", "]", ">", "0", ")" ]
Return a new DStream in which each RDD contains the count of distinct elements in RDDs in a sliding window over this DStream. @param windowDuration: width of the window; must be a multiple of this DStream's batching interval @param slideDuration: sliding interval of the window (i.e., the interval after which the new DStream will generate RDDs); must be a multiple of this DStream's batching interval @param numPartitions: number of partitions of each RDD in the new DStream.
[ "Return", "a", "new", "DStream", "in", "which", "each", "RDD", "contains", "the", "count", "of", "distinct", "elements", "in", "RDDs", "in", "a", "sliding", "window", "over", "this", "DStream", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/streaming/dstream.py#L485-L500
19,107
apache/spark
python/pyspark/streaming/dstream.py
DStream.reduceByKeyAndWindow
def reduceByKeyAndWindow(self, func, invFunc, windowDuration, slideDuration=None, numPartitions=None, filterFunc=None): """ Return a new DStream by applying incremental `reduceByKey` over a sliding window. The reduced value of over a new window is calculated using the old window's reduce value : 1. reduce the new values that entered the window (e.g., adding new counts) 2. "inverse reduce" the old values that left the window (e.g., subtracting old counts) `invFunc` can be None, then it will reduce all the RDDs in window, could be slower than having `invFunc`. @param func: associative and commutative reduce function @param invFunc: inverse function of `reduceFunc` @param windowDuration: width of the window; must be a multiple of this DStream's batching interval @param slideDuration: sliding interval of the window (i.e., the interval after which the new DStream will generate RDDs); must be a multiple of this DStream's batching interval @param numPartitions: number of partitions of each RDD in the new DStream. @param filterFunc: function to filter expired key-value pairs; only pairs that satisfy the function are retained set this to null if you do not want to filter """ self._validate_window_param(windowDuration, slideDuration) if numPartitions is None: numPartitions = self._sc.defaultParallelism reduced = self.reduceByKey(func, numPartitions) if invFunc: def reduceFunc(t, a, b): b = b.reduceByKey(func, numPartitions) r = a.union(b).reduceByKey(func, numPartitions) if a else b if filterFunc: r = r.filter(filterFunc) return r def invReduceFunc(t, a, b): b = b.reduceByKey(func, numPartitions) joined = a.leftOuterJoin(b, numPartitions) return joined.mapValues(lambda kv: invFunc(kv[0], kv[1]) if kv[1] is not None else kv[0]) jreduceFunc = TransformFunction(self._sc, reduceFunc, reduced._jrdd_deserializer) jinvReduceFunc = TransformFunction(self._sc, invReduceFunc, reduced._jrdd_deserializer) if slideDuration is None: slideDuration = self._slideDuration dstream = self._sc._jvm.PythonReducedWindowedDStream( reduced._jdstream.dstream(), jreduceFunc, jinvReduceFunc, self._ssc._jduration(windowDuration), self._ssc._jduration(slideDuration)) return DStream(dstream.asJavaDStream(), self._ssc, self._sc.serializer) else: return reduced.window(windowDuration, slideDuration).reduceByKey(func, numPartitions)
python
def reduceByKeyAndWindow(self, func, invFunc, windowDuration, slideDuration=None, numPartitions=None, filterFunc=None): """ Return a new DStream by applying incremental `reduceByKey` over a sliding window. The reduced value of over a new window is calculated using the old window's reduce value : 1. reduce the new values that entered the window (e.g., adding new counts) 2. "inverse reduce" the old values that left the window (e.g., subtracting old counts) `invFunc` can be None, then it will reduce all the RDDs in window, could be slower than having `invFunc`. @param func: associative and commutative reduce function @param invFunc: inverse function of `reduceFunc` @param windowDuration: width of the window; must be a multiple of this DStream's batching interval @param slideDuration: sliding interval of the window (i.e., the interval after which the new DStream will generate RDDs); must be a multiple of this DStream's batching interval @param numPartitions: number of partitions of each RDD in the new DStream. @param filterFunc: function to filter expired key-value pairs; only pairs that satisfy the function are retained set this to null if you do not want to filter """ self._validate_window_param(windowDuration, slideDuration) if numPartitions is None: numPartitions = self._sc.defaultParallelism reduced = self.reduceByKey(func, numPartitions) if invFunc: def reduceFunc(t, a, b): b = b.reduceByKey(func, numPartitions) r = a.union(b).reduceByKey(func, numPartitions) if a else b if filterFunc: r = r.filter(filterFunc) return r def invReduceFunc(t, a, b): b = b.reduceByKey(func, numPartitions) joined = a.leftOuterJoin(b, numPartitions) return joined.mapValues(lambda kv: invFunc(kv[0], kv[1]) if kv[1] is not None else kv[0]) jreduceFunc = TransformFunction(self._sc, reduceFunc, reduced._jrdd_deserializer) jinvReduceFunc = TransformFunction(self._sc, invReduceFunc, reduced._jrdd_deserializer) if slideDuration is None: slideDuration = self._slideDuration dstream = self._sc._jvm.PythonReducedWindowedDStream( reduced._jdstream.dstream(), jreduceFunc, jinvReduceFunc, self._ssc._jduration(windowDuration), self._ssc._jduration(slideDuration)) return DStream(dstream.asJavaDStream(), self._ssc, self._sc.serializer) else: return reduced.window(windowDuration, slideDuration).reduceByKey(func, numPartitions)
[ "def", "reduceByKeyAndWindow", "(", "self", ",", "func", ",", "invFunc", ",", "windowDuration", ",", "slideDuration", "=", "None", ",", "numPartitions", "=", "None", ",", "filterFunc", "=", "None", ")", ":", "self", ".", "_validate_window_param", "(", "windowDuration", ",", "slideDuration", ")", "if", "numPartitions", "is", "None", ":", "numPartitions", "=", "self", ".", "_sc", ".", "defaultParallelism", "reduced", "=", "self", ".", "reduceByKey", "(", "func", ",", "numPartitions", ")", "if", "invFunc", ":", "def", "reduceFunc", "(", "t", ",", "a", ",", "b", ")", ":", "b", "=", "b", ".", "reduceByKey", "(", "func", ",", "numPartitions", ")", "r", "=", "a", ".", "union", "(", "b", ")", ".", "reduceByKey", "(", "func", ",", "numPartitions", ")", "if", "a", "else", "b", "if", "filterFunc", ":", "r", "=", "r", ".", "filter", "(", "filterFunc", ")", "return", "r", "def", "invReduceFunc", "(", "t", ",", "a", ",", "b", ")", ":", "b", "=", "b", ".", "reduceByKey", "(", "func", ",", "numPartitions", ")", "joined", "=", "a", ".", "leftOuterJoin", "(", "b", ",", "numPartitions", ")", "return", "joined", ".", "mapValues", "(", "lambda", "kv", ":", "invFunc", "(", "kv", "[", "0", "]", ",", "kv", "[", "1", "]", ")", "if", "kv", "[", "1", "]", "is", "not", "None", "else", "kv", "[", "0", "]", ")", "jreduceFunc", "=", "TransformFunction", "(", "self", ".", "_sc", ",", "reduceFunc", ",", "reduced", ".", "_jrdd_deserializer", ")", "jinvReduceFunc", "=", "TransformFunction", "(", "self", ".", "_sc", ",", "invReduceFunc", ",", "reduced", ".", "_jrdd_deserializer", ")", "if", "slideDuration", "is", "None", ":", "slideDuration", "=", "self", ".", "_slideDuration", "dstream", "=", "self", ".", "_sc", ".", "_jvm", ".", "PythonReducedWindowedDStream", "(", "reduced", ".", "_jdstream", ".", "dstream", "(", ")", ",", "jreduceFunc", ",", "jinvReduceFunc", ",", "self", ".", "_ssc", ".", "_jduration", "(", "windowDuration", ")", ",", "self", ".", "_ssc", ".", "_jduration", "(", "slideDuration", ")", ")", "return", "DStream", "(", "dstream", ".", "asJavaDStream", "(", ")", ",", "self", ".", "_ssc", ",", "self", ".", "_sc", ".", "serializer", ")", "else", ":", "return", "reduced", ".", "window", "(", "windowDuration", ",", "slideDuration", ")", ".", "reduceByKey", "(", "func", ",", "numPartitions", ")" ]
Return a new DStream by applying incremental `reduceByKey` over a sliding window. The reduced value of over a new window is calculated using the old window's reduce value : 1. reduce the new values that entered the window (e.g., adding new counts) 2. "inverse reduce" the old values that left the window (e.g., subtracting old counts) `invFunc` can be None, then it will reduce all the RDDs in window, could be slower than having `invFunc`. @param func: associative and commutative reduce function @param invFunc: inverse function of `reduceFunc` @param windowDuration: width of the window; must be a multiple of this DStream's batching interval @param slideDuration: sliding interval of the window (i.e., the interval after which the new DStream will generate RDDs); must be a multiple of this DStream's batching interval @param numPartitions: number of partitions of each RDD in the new DStream. @param filterFunc: function to filter expired key-value pairs; only pairs that satisfy the function are retained set this to null if you do not want to filter
[ "Return", "a", "new", "DStream", "by", "applying", "incremental", "reduceByKey", "over", "a", "sliding", "window", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/streaming/dstream.py#L519-L574
19,108
apache/spark
python/pyspark/streaming/dstream.py
DStream.updateStateByKey
def updateStateByKey(self, updateFunc, numPartitions=None, initialRDD=None): """ Return a new "state" DStream where the state for each key is updated by applying the given function on the previous state of the key and the new values of the key. @param updateFunc: State update function. If this function returns None, then corresponding state key-value pair will be eliminated. """ if numPartitions is None: numPartitions = self._sc.defaultParallelism if initialRDD and not isinstance(initialRDD, RDD): initialRDD = self._sc.parallelize(initialRDD) def reduceFunc(t, a, b): if a is None: g = b.groupByKey(numPartitions).mapValues(lambda vs: (list(vs), None)) else: g = a.cogroup(b.partitionBy(numPartitions), numPartitions) g = g.mapValues(lambda ab: (list(ab[1]), list(ab[0])[0] if len(ab[0]) else None)) state = g.mapValues(lambda vs_s: updateFunc(vs_s[0], vs_s[1])) return state.filter(lambda k_v: k_v[1] is not None) jreduceFunc = TransformFunction(self._sc, reduceFunc, self._sc.serializer, self._jrdd_deserializer) if initialRDD: initialRDD = initialRDD._reserialize(self._jrdd_deserializer) dstream = self._sc._jvm.PythonStateDStream(self._jdstream.dstream(), jreduceFunc, initialRDD._jrdd) else: dstream = self._sc._jvm.PythonStateDStream(self._jdstream.dstream(), jreduceFunc) return DStream(dstream.asJavaDStream(), self._ssc, self._sc.serializer)
python
def updateStateByKey(self, updateFunc, numPartitions=None, initialRDD=None): """ Return a new "state" DStream where the state for each key is updated by applying the given function on the previous state of the key and the new values of the key. @param updateFunc: State update function. If this function returns None, then corresponding state key-value pair will be eliminated. """ if numPartitions is None: numPartitions = self._sc.defaultParallelism if initialRDD and not isinstance(initialRDD, RDD): initialRDD = self._sc.parallelize(initialRDD) def reduceFunc(t, a, b): if a is None: g = b.groupByKey(numPartitions).mapValues(lambda vs: (list(vs), None)) else: g = a.cogroup(b.partitionBy(numPartitions), numPartitions) g = g.mapValues(lambda ab: (list(ab[1]), list(ab[0])[0] if len(ab[0]) else None)) state = g.mapValues(lambda vs_s: updateFunc(vs_s[0], vs_s[1])) return state.filter(lambda k_v: k_v[1] is not None) jreduceFunc = TransformFunction(self._sc, reduceFunc, self._sc.serializer, self._jrdd_deserializer) if initialRDD: initialRDD = initialRDD._reserialize(self._jrdd_deserializer) dstream = self._sc._jvm.PythonStateDStream(self._jdstream.dstream(), jreduceFunc, initialRDD._jrdd) else: dstream = self._sc._jvm.PythonStateDStream(self._jdstream.dstream(), jreduceFunc) return DStream(dstream.asJavaDStream(), self._ssc, self._sc.serializer)
[ "def", "updateStateByKey", "(", "self", ",", "updateFunc", ",", "numPartitions", "=", "None", ",", "initialRDD", "=", "None", ")", ":", "if", "numPartitions", "is", "None", ":", "numPartitions", "=", "self", ".", "_sc", ".", "defaultParallelism", "if", "initialRDD", "and", "not", "isinstance", "(", "initialRDD", ",", "RDD", ")", ":", "initialRDD", "=", "self", ".", "_sc", ".", "parallelize", "(", "initialRDD", ")", "def", "reduceFunc", "(", "t", ",", "a", ",", "b", ")", ":", "if", "a", "is", "None", ":", "g", "=", "b", ".", "groupByKey", "(", "numPartitions", ")", ".", "mapValues", "(", "lambda", "vs", ":", "(", "list", "(", "vs", ")", ",", "None", ")", ")", "else", ":", "g", "=", "a", ".", "cogroup", "(", "b", ".", "partitionBy", "(", "numPartitions", ")", ",", "numPartitions", ")", "g", "=", "g", ".", "mapValues", "(", "lambda", "ab", ":", "(", "list", "(", "ab", "[", "1", "]", ")", ",", "list", "(", "ab", "[", "0", "]", ")", "[", "0", "]", "if", "len", "(", "ab", "[", "0", "]", ")", "else", "None", ")", ")", "state", "=", "g", ".", "mapValues", "(", "lambda", "vs_s", ":", "updateFunc", "(", "vs_s", "[", "0", "]", ",", "vs_s", "[", "1", "]", ")", ")", "return", "state", ".", "filter", "(", "lambda", "k_v", ":", "k_v", "[", "1", "]", "is", "not", "None", ")", "jreduceFunc", "=", "TransformFunction", "(", "self", ".", "_sc", ",", "reduceFunc", ",", "self", ".", "_sc", ".", "serializer", ",", "self", ".", "_jrdd_deserializer", ")", "if", "initialRDD", ":", "initialRDD", "=", "initialRDD", ".", "_reserialize", "(", "self", ".", "_jrdd_deserializer", ")", "dstream", "=", "self", ".", "_sc", ".", "_jvm", ".", "PythonStateDStream", "(", "self", ".", "_jdstream", ".", "dstream", "(", ")", ",", "jreduceFunc", ",", "initialRDD", ".", "_jrdd", ")", "else", ":", "dstream", "=", "self", ".", "_sc", ".", "_jvm", ".", "PythonStateDStream", "(", "self", ".", "_jdstream", ".", "dstream", "(", ")", ",", "jreduceFunc", ")", "return", "DStream", "(", "dstream", ".", "asJavaDStream", "(", ")", ",", "self", ".", "_ssc", ",", "self", ".", "_sc", ".", "serializer", ")" ]
Return a new "state" DStream where the state for each key is updated by applying the given function on the previous state of the key and the new values of the key. @param updateFunc: State update function. If this function returns None, then corresponding state key-value pair will be eliminated.
[ "Return", "a", "new", "state", "DStream", "where", "the", "state", "for", "each", "key", "is", "updated", "by", "applying", "the", "given", "function", "on", "the", "previous", "state", "of", "the", "key", "and", "the", "new", "values", "of", "the", "key", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/streaming/dstream.py#L576-L608
19,109
apache/spark
python/pyspark/traceback_utils.py
first_spark_call
def first_spark_call(): """ Return a CallSite representing the first Spark call in the current call stack. """ tb = traceback.extract_stack() if len(tb) == 0: return None file, line, module, what = tb[len(tb) - 1] sparkpath = os.path.dirname(file) first_spark_frame = len(tb) - 1 for i in range(0, len(tb)): file, line, fun, what = tb[i] if file.startswith(sparkpath): first_spark_frame = i break if first_spark_frame == 0: file, line, fun, what = tb[0] return CallSite(function=fun, file=file, linenum=line) sfile, sline, sfun, swhat = tb[first_spark_frame] ufile, uline, ufun, uwhat = tb[first_spark_frame - 1] return CallSite(function=sfun, file=ufile, linenum=uline)
python
def first_spark_call(): """ Return a CallSite representing the first Spark call in the current call stack. """ tb = traceback.extract_stack() if len(tb) == 0: return None file, line, module, what = tb[len(tb) - 1] sparkpath = os.path.dirname(file) first_spark_frame = len(tb) - 1 for i in range(0, len(tb)): file, line, fun, what = tb[i] if file.startswith(sparkpath): first_spark_frame = i break if first_spark_frame == 0: file, line, fun, what = tb[0] return CallSite(function=fun, file=file, linenum=line) sfile, sline, sfun, swhat = tb[first_spark_frame] ufile, uline, ufun, uwhat = tb[first_spark_frame - 1] return CallSite(function=sfun, file=ufile, linenum=uline)
[ "def", "first_spark_call", "(", ")", ":", "tb", "=", "traceback", ".", "extract_stack", "(", ")", "if", "len", "(", "tb", ")", "==", "0", ":", "return", "None", "file", ",", "line", ",", "module", ",", "what", "=", "tb", "[", "len", "(", "tb", ")", "-", "1", "]", "sparkpath", "=", "os", ".", "path", ".", "dirname", "(", "file", ")", "first_spark_frame", "=", "len", "(", "tb", ")", "-", "1", "for", "i", "in", "range", "(", "0", ",", "len", "(", "tb", ")", ")", ":", "file", ",", "line", ",", "fun", ",", "what", "=", "tb", "[", "i", "]", "if", "file", ".", "startswith", "(", "sparkpath", ")", ":", "first_spark_frame", "=", "i", "break", "if", "first_spark_frame", "==", "0", ":", "file", ",", "line", ",", "fun", ",", "what", "=", "tb", "[", "0", "]", "return", "CallSite", "(", "function", "=", "fun", ",", "file", "=", "file", ",", "linenum", "=", "line", ")", "sfile", ",", "sline", ",", "sfun", ",", "swhat", "=", "tb", "[", "first_spark_frame", "]", "ufile", ",", "uline", ",", "ufun", ",", "uwhat", "=", "tb", "[", "first_spark_frame", "-", "1", "]", "return", "CallSite", "(", "function", "=", "sfun", ",", "file", "=", "ufile", ",", "linenum", "=", "uline", ")" ]
Return a CallSite representing the first Spark call in the current call stack.
[ "Return", "a", "CallSite", "representing", "the", "first", "Spark", "call", "in", "the", "current", "call", "stack", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/traceback_utils.py#L26-L46
19,110
apache/spark
examples/src/main/python/mllib/logistic_regression.py
parsePoint
def parsePoint(line): """ Parse a line of text into an MLlib LabeledPoint object. """ values = [float(s) for s in line.split(' ')] if values[0] == -1: # Convert -1 labels to 0 for MLlib values[0] = 0 return LabeledPoint(values[0], values[1:])
python
def parsePoint(line): """ Parse a line of text into an MLlib LabeledPoint object. """ values = [float(s) for s in line.split(' ')] if values[0] == -1: # Convert -1 labels to 0 for MLlib values[0] = 0 return LabeledPoint(values[0], values[1:])
[ "def", "parsePoint", "(", "line", ")", ":", "values", "=", "[", "float", "(", "s", ")", "for", "s", "in", "line", ".", "split", "(", "' '", ")", "]", "if", "values", "[", "0", "]", "==", "-", "1", ":", "# Convert -1 labels to 0 for MLlib", "values", "[", "0", "]", "=", "0", "return", "LabeledPoint", "(", "values", "[", "0", "]", ",", "values", "[", "1", ":", "]", ")" ]
Parse a line of text into an MLlib LabeledPoint object.
[ "Parse", "a", "line", "of", "text", "into", "an", "MLlib", "LabeledPoint", "object", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/examples/src/main/python/mllib/logistic_regression.py#L32-L39
19,111
apache/spark
python/pyspark/mllib/evaluation.py
MulticlassMetrics.fMeasure
def fMeasure(self, label, beta=None): """ Returns f-measure. """ if beta is None: return self.call("fMeasure", label) else: return self.call("fMeasure", label, beta)
python
def fMeasure(self, label, beta=None): """ Returns f-measure. """ if beta is None: return self.call("fMeasure", label) else: return self.call("fMeasure", label, beta)
[ "def", "fMeasure", "(", "self", ",", "label", ",", "beta", "=", "None", ")", ":", "if", "beta", "is", "None", ":", "return", "self", ".", "call", "(", "\"fMeasure\"", ",", "label", ")", "else", ":", "return", "self", ".", "call", "(", "\"fMeasure\"", ",", "label", ",", "beta", ")" ]
Returns f-measure.
[ "Returns", "f", "-", "measure", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/evaluation.py#L297-L304
19,112
apache/spark
python/pyspark/sql/dataframe.py
_to_corrected_pandas_type
def _to_corrected_pandas_type(dt): """ When converting Spark SQL records to Pandas DataFrame, the inferred data type may be wrong. This method gets the corrected data type for Pandas if that type may be inferred uncorrectly. """ import numpy as np if type(dt) == ByteType: return np.int8 elif type(dt) == ShortType: return np.int16 elif type(dt) == IntegerType: return np.int32 elif type(dt) == FloatType: return np.float32 else: return None
python
def _to_corrected_pandas_type(dt): """ When converting Spark SQL records to Pandas DataFrame, the inferred data type may be wrong. This method gets the corrected data type for Pandas if that type may be inferred uncorrectly. """ import numpy as np if type(dt) == ByteType: return np.int8 elif type(dt) == ShortType: return np.int16 elif type(dt) == IntegerType: return np.int32 elif type(dt) == FloatType: return np.float32 else: return None
[ "def", "_to_corrected_pandas_type", "(", "dt", ")", ":", "import", "numpy", "as", "np", "if", "type", "(", "dt", ")", "==", "ByteType", ":", "return", "np", ".", "int8", "elif", "type", "(", "dt", ")", "==", "ShortType", ":", "return", "np", ".", "int16", "elif", "type", "(", "dt", ")", "==", "IntegerType", ":", "return", "np", ".", "int32", "elif", "type", "(", "dt", ")", "==", "FloatType", ":", "return", "np", ".", "float32", "else", ":", "return", "None" ]
When converting Spark SQL records to Pandas DataFrame, the inferred data type may be wrong. This method gets the corrected data type for Pandas if that type may be inferred uncorrectly.
[ "When", "converting", "Spark", "SQL", "records", "to", "Pandas", "DataFrame", "the", "inferred", "data", "type", "may", "be", "wrong", ".", "This", "method", "gets", "the", "corrected", "data", "type", "for", "Pandas", "if", "that", "type", "may", "be", "inferred", "uncorrectly", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/dataframe.py#L2239-L2254
19,113
apache/spark
python/pyspark/sql/dataframe.py
DataFrame.show
def show(self, n=20, truncate=True, vertical=False): """Prints the first ``n`` rows to the console. :param n: Number of rows to show. :param truncate: If set to True, truncate strings longer than 20 chars by default. If set to a number greater than one, truncates long strings to length ``truncate`` and align cells right. :param vertical: If set to True, print output rows vertically (one line per column value). >>> df DataFrame[age: int, name: string] >>> df.show() +---+-----+ |age| name| +---+-----+ | 2|Alice| | 5| Bob| +---+-----+ >>> df.show(truncate=3) +---+----+ |age|name| +---+----+ | 2| Ali| | 5| Bob| +---+----+ >>> df.show(vertical=True) -RECORD 0----- age | 2 name | Alice -RECORD 1----- age | 5 name | Bob """ if isinstance(truncate, bool) and truncate: print(self._jdf.showString(n, 20, vertical)) else: print(self._jdf.showString(n, int(truncate), vertical))
python
def show(self, n=20, truncate=True, vertical=False): """Prints the first ``n`` rows to the console. :param n: Number of rows to show. :param truncate: If set to True, truncate strings longer than 20 chars by default. If set to a number greater than one, truncates long strings to length ``truncate`` and align cells right. :param vertical: If set to True, print output rows vertically (one line per column value). >>> df DataFrame[age: int, name: string] >>> df.show() +---+-----+ |age| name| +---+-----+ | 2|Alice| | 5| Bob| +---+-----+ >>> df.show(truncate=3) +---+----+ |age|name| +---+----+ | 2| Ali| | 5| Bob| +---+----+ >>> df.show(vertical=True) -RECORD 0----- age | 2 name | Alice -RECORD 1----- age | 5 name | Bob """ if isinstance(truncate, bool) and truncate: print(self._jdf.showString(n, 20, vertical)) else: print(self._jdf.showString(n, int(truncate), vertical))
[ "def", "show", "(", "self", ",", "n", "=", "20", ",", "truncate", "=", "True", ",", "vertical", "=", "False", ")", ":", "if", "isinstance", "(", "truncate", ",", "bool", ")", "and", "truncate", ":", "print", "(", "self", ".", "_jdf", ".", "showString", "(", "n", ",", "20", ",", "vertical", ")", ")", "else", ":", "print", "(", "self", ".", "_jdf", ".", "showString", "(", "n", ",", "int", "(", "truncate", ")", ",", "vertical", ")", ")" ]
Prints the first ``n`` rows to the console. :param n: Number of rows to show. :param truncate: If set to True, truncate strings longer than 20 chars by default. If set to a number greater than one, truncates long strings to length ``truncate`` and align cells right. :param vertical: If set to True, print output rows vertically (one line per column value). >>> df DataFrame[age: int, name: string] >>> df.show() +---+-----+ |age| name| +---+-----+ | 2|Alice| | 5| Bob| +---+-----+ >>> df.show(truncate=3) +---+----+ |age|name| +---+----+ | 2| Ali| | 5| Bob| +---+----+ >>> df.show(vertical=True) -RECORD 0----- age | 2 name | Alice -RECORD 1----- age | 5 name | Bob
[ "Prints", "the", "first", "n", "rows", "to", "the", "console", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/dataframe.py#L324-L361
19,114
apache/spark
python/pyspark/sql/dataframe.py
DataFrame._repr_html_
def _repr_html_(self): """Returns a dataframe with html code when you enabled eager evaluation by 'spark.sql.repl.eagerEval.enabled', this only called by REPL you are using support eager evaluation with HTML. """ import cgi if not self._support_repr_html: self._support_repr_html = True if self.sql_ctx._conf.isReplEagerEvalEnabled(): max_num_rows = max(self.sql_ctx._conf.replEagerEvalMaxNumRows(), 0) sock_info = self._jdf.getRowsToPython( max_num_rows, self.sql_ctx._conf.replEagerEvalTruncate()) rows = list(_load_from_socket(sock_info, BatchedSerializer(PickleSerializer()))) head = rows[0] row_data = rows[1:] has_more_data = len(row_data) > max_num_rows row_data = row_data[:max_num_rows] html = "<table border='1'>\n" # generate table head html += "<tr><th>%s</th></tr>\n" % "</th><th>".join(map(lambda x: cgi.escape(x), head)) # generate table rows for row in row_data: html += "<tr><td>%s</td></tr>\n" % "</td><td>".join( map(lambda x: cgi.escape(x), row)) html += "</table>\n" if has_more_data: html += "only showing top %d %s\n" % ( max_num_rows, "row" if max_num_rows == 1 else "rows") return html else: return None
python
def _repr_html_(self): """Returns a dataframe with html code when you enabled eager evaluation by 'spark.sql.repl.eagerEval.enabled', this only called by REPL you are using support eager evaluation with HTML. """ import cgi if not self._support_repr_html: self._support_repr_html = True if self.sql_ctx._conf.isReplEagerEvalEnabled(): max_num_rows = max(self.sql_ctx._conf.replEagerEvalMaxNumRows(), 0) sock_info = self._jdf.getRowsToPython( max_num_rows, self.sql_ctx._conf.replEagerEvalTruncate()) rows = list(_load_from_socket(sock_info, BatchedSerializer(PickleSerializer()))) head = rows[0] row_data = rows[1:] has_more_data = len(row_data) > max_num_rows row_data = row_data[:max_num_rows] html = "<table border='1'>\n" # generate table head html += "<tr><th>%s</th></tr>\n" % "</th><th>".join(map(lambda x: cgi.escape(x), head)) # generate table rows for row in row_data: html += "<tr><td>%s</td></tr>\n" % "</td><td>".join( map(lambda x: cgi.escape(x), row)) html += "</table>\n" if has_more_data: html += "only showing top %d %s\n" % ( max_num_rows, "row" if max_num_rows == 1 else "rows") return html else: return None
[ "def", "_repr_html_", "(", "self", ")", ":", "import", "cgi", "if", "not", "self", ".", "_support_repr_html", ":", "self", ".", "_support_repr_html", "=", "True", "if", "self", ".", "sql_ctx", ".", "_conf", ".", "isReplEagerEvalEnabled", "(", ")", ":", "max_num_rows", "=", "max", "(", "self", ".", "sql_ctx", ".", "_conf", ".", "replEagerEvalMaxNumRows", "(", ")", ",", "0", ")", "sock_info", "=", "self", ".", "_jdf", ".", "getRowsToPython", "(", "max_num_rows", ",", "self", ".", "sql_ctx", ".", "_conf", ".", "replEagerEvalTruncate", "(", ")", ")", "rows", "=", "list", "(", "_load_from_socket", "(", "sock_info", ",", "BatchedSerializer", "(", "PickleSerializer", "(", ")", ")", ")", ")", "head", "=", "rows", "[", "0", "]", "row_data", "=", "rows", "[", "1", ":", "]", "has_more_data", "=", "len", "(", "row_data", ")", ">", "max_num_rows", "row_data", "=", "row_data", "[", ":", "max_num_rows", "]", "html", "=", "\"<table border='1'>\\n\"", "# generate table head", "html", "+=", "\"<tr><th>%s</th></tr>\\n\"", "%", "\"</th><th>\"", ".", "join", "(", "map", "(", "lambda", "x", ":", "cgi", ".", "escape", "(", "x", ")", ",", "head", ")", ")", "# generate table rows", "for", "row", "in", "row_data", ":", "html", "+=", "\"<tr><td>%s</td></tr>\\n\"", "%", "\"</td><td>\"", ".", "join", "(", "map", "(", "lambda", "x", ":", "cgi", ".", "escape", "(", "x", ")", ",", "row", ")", ")", "html", "+=", "\"</table>\\n\"", "if", "has_more_data", ":", "html", "+=", "\"only showing top %d %s\\n\"", "%", "(", "max_num_rows", ",", "\"row\"", "if", "max_num_rows", "==", "1", "else", "\"rows\"", ")", "return", "html", "else", ":", "return", "None" ]
Returns a dataframe with html code when you enabled eager evaluation by 'spark.sql.repl.eagerEval.enabled', this only called by REPL you are using support eager evaluation with HTML.
[ "Returns", "a", "dataframe", "with", "html", "code", "when", "you", "enabled", "eager", "evaluation", "by", "spark", ".", "sql", ".", "repl", ".", "eagerEval", ".", "enabled", "this", "only", "called", "by", "REPL", "you", "are", "using", "support", "eager", "evaluation", "with", "HTML", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/dataframe.py#L372-L403
19,115
apache/spark
python/pyspark/sql/dataframe.py
DataFrame.localCheckpoint
def localCheckpoint(self, eager=True): """Returns a locally checkpointed version of this Dataset. Checkpointing can be used to truncate the logical plan of this DataFrame, which is especially useful in iterative algorithms where the plan may grow exponentially. Local checkpoints are stored in the executors using the caching subsystem and therefore they are not reliable. :param eager: Whether to checkpoint this DataFrame immediately .. note:: Experimental """ jdf = self._jdf.localCheckpoint(eager) return DataFrame(jdf, self.sql_ctx)
python
def localCheckpoint(self, eager=True): """Returns a locally checkpointed version of this Dataset. Checkpointing can be used to truncate the logical plan of this DataFrame, which is especially useful in iterative algorithms where the plan may grow exponentially. Local checkpoints are stored in the executors using the caching subsystem and therefore they are not reliable. :param eager: Whether to checkpoint this DataFrame immediately .. note:: Experimental """ jdf = self._jdf.localCheckpoint(eager) return DataFrame(jdf, self.sql_ctx)
[ "def", "localCheckpoint", "(", "self", ",", "eager", "=", "True", ")", ":", "jdf", "=", "self", ".", "_jdf", ".", "localCheckpoint", "(", "eager", ")", "return", "DataFrame", "(", "jdf", ",", "self", ".", "sql_ctx", ")" ]
Returns a locally checkpointed version of this Dataset. Checkpointing can be used to truncate the logical plan of this DataFrame, which is especially useful in iterative algorithms where the plan may grow exponentially. Local checkpoints are stored in the executors using the caching subsystem and therefore they are not reliable. :param eager: Whether to checkpoint this DataFrame immediately .. note:: Experimental
[ "Returns", "a", "locally", "checkpointed", "version", "of", "this", "Dataset", ".", "Checkpointing", "can", "be", "used", "to", "truncate", "the", "logical", "plan", "of", "this", "DataFrame", "which", "is", "especially", "useful", "in", "iterative", "algorithms", "where", "the", "plan", "may", "grow", "exponentially", ".", "Local", "checkpoints", "are", "stored", "in", "the", "executors", "using", "the", "caching", "subsystem", "and", "therefore", "they", "are", "not", "reliable", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/dataframe.py#L420-L431
19,116
apache/spark
python/pyspark/sql/dataframe.py
DataFrame.hint
def hint(self, name, *parameters): """Specifies some hint on the current DataFrame. :param name: A name of the hint. :param parameters: Optional parameters. :return: :class:`DataFrame` >>> df.join(df2.hint("broadcast"), "name").show() +----+---+------+ |name|age|height| +----+---+------+ | Bob| 5| 85| +----+---+------+ """ if len(parameters) == 1 and isinstance(parameters[0], list): parameters = parameters[0] if not isinstance(name, str): raise TypeError("name should be provided as str, got {0}".format(type(name))) allowed_types = (basestring, list, float, int) for p in parameters: if not isinstance(p, allowed_types): raise TypeError( "all parameters should be in {0}, got {1} of type {2}".format( allowed_types, p, type(p))) jdf = self._jdf.hint(name, self._jseq(parameters)) return DataFrame(jdf, self.sql_ctx)
python
def hint(self, name, *parameters): """Specifies some hint on the current DataFrame. :param name: A name of the hint. :param parameters: Optional parameters. :return: :class:`DataFrame` >>> df.join(df2.hint("broadcast"), "name").show() +----+---+------+ |name|age|height| +----+---+------+ | Bob| 5| 85| +----+---+------+ """ if len(parameters) == 1 and isinstance(parameters[0], list): parameters = parameters[0] if not isinstance(name, str): raise TypeError("name should be provided as str, got {0}".format(type(name))) allowed_types = (basestring, list, float, int) for p in parameters: if not isinstance(p, allowed_types): raise TypeError( "all parameters should be in {0}, got {1} of type {2}".format( allowed_types, p, type(p))) jdf = self._jdf.hint(name, self._jseq(parameters)) return DataFrame(jdf, self.sql_ctx)
[ "def", "hint", "(", "self", ",", "name", ",", "*", "parameters", ")", ":", "if", "len", "(", "parameters", ")", "==", "1", "and", "isinstance", "(", "parameters", "[", "0", "]", ",", "list", ")", ":", "parameters", "=", "parameters", "[", "0", "]", "if", "not", "isinstance", "(", "name", ",", "str", ")", ":", "raise", "TypeError", "(", "\"name should be provided as str, got {0}\"", ".", "format", "(", "type", "(", "name", ")", ")", ")", "allowed_types", "=", "(", "basestring", ",", "list", ",", "float", ",", "int", ")", "for", "p", "in", "parameters", ":", "if", "not", "isinstance", "(", "p", ",", "allowed_types", ")", ":", "raise", "TypeError", "(", "\"all parameters should be in {0}, got {1} of type {2}\"", ".", "format", "(", "allowed_types", ",", "p", ",", "type", "(", "p", ")", ")", ")", "jdf", "=", "self", ".", "_jdf", ".", "hint", "(", "name", ",", "self", ".", "_jseq", "(", "parameters", ")", ")", "return", "DataFrame", "(", "jdf", ",", "self", ".", "sql_ctx", ")" ]
Specifies some hint on the current DataFrame. :param name: A name of the hint. :param parameters: Optional parameters. :return: :class:`DataFrame` >>> df.join(df2.hint("broadcast"), "name").show() +----+---+------+ |name|age|height| +----+---+------+ | Bob| 5| 85| +----+---+------+
[ "Specifies", "some", "hint", "on", "the", "current", "DataFrame", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/dataframe.py#L468-L496
19,117
apache/spark
python/pyspark/sql/dataframe.py
DataFrame.limit
def limit(self, num): """Limits the result count to the number specified. >>> df.limit(1).collect() [Row(age=2, name=u'Alice')] >>> df.limit(0).collect() [] """ jdf = self._jdf.limit(num) return DataFrame(jdf, self.sql_ctx)
python
def limit(self, num): """Limits the result count to the number specified. >>> df.limit(1).collect() [Row(age=2, name=u'Alice')] >>> df.limit(0).collect() [] """ jdf = self._jdf.limit(num) return DataFrame(jdf, self.sql_ctx)
[ "def", "limit", "(", "self", ",", "num", ")", ":", "jdf", "=", "self", ".", "_jdf", ".", "limit", "(", "num", ")", "return", "DataFrame", "(", "jdf", ",", "self", ".", "sql_ctx", ")" ]
Limits the result count to the number specified. >>> df.limit(1).collect() [Row(age=2, name=u'Alice')] >>> df.limit(0).collect() []
[ "Limits", "the", "result", "count", "to", "the", "number", "specified", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/dataframe.py#L535-L544
19,118
apache/spark
python/pyspark/sql/dataframe.py
DataFrame.sampleBy
def sampleBy(self, col, fractions, seed=None): """ Returns a stratified sample without replacement based on the fraction given on each stratum. :param col: column that defines strata :param fractions: sampling fraction for each stratum. If a stratum is not specified, we treat its fraction as zero. :param seed: random seed :return: a new DataFrame that represents the stratified sample >>> from pyspark.sql.functions import col >>> dataset = sqlContext.range(0, 100).select((col("id") % 3).alias("key")) >>> sampled = dataset.sampleBy("key", fractions={0: 0.1, 1: 0.2}, seed=0) >>> sampled.groupBy("key").count().orderBy("key").show() +---+-----+ |key|count| +---+-----+ | 0| 3| | 1| 6| +---+-----+ >>> dataset.sampleBy(col("key"), fractions={2: 1.0}, seed=0).count() 33 .. versionchanged:: 3.0 Added sampling by a column of :class:`Column` """ if isinstance(col, basestring): col = Column(col) elif not isinstance(col, Column): raise ValueError("col must be a string or a column, but got %r" % type(col)) if not isinstance(fractions, dict): raise ValueError("fractions must be a dict but got %r" % type(fractions)) for k, v in fractions.items(): if not isinstance(k, (float, int, long, basestring)): raise ValueError("key must be float, int, long, or string, but got %r" % type(k)) fractions[k] = float(v) col = col._jc seed = seed if seed is not None else random.randint(0, sys.maxsize) return DataFrame(self._jdf.stat().sampleBy(col, self._jmap(fractions), seed), self.sql_ctx)
python
def sampleBy(self, col, fractions, seed=None): """ Returns a stratified sample without replacement based on the fraction given on each stratum. :param col: column that defines strata :param fractions: sampling fraction for each stratum. If a stratum is not specified, we treat its fraction as zero. :param seed: random seed :return: a new DataFrame that represents the stratified sample >>> from pyspark.sql.functions import col >>> dataset = sqlContext.range(0, 100).select((col("id") % 3).alias("key")) >>> sampled = dataset.sampleBy("key", fractions={0: 0.1, 1: 0.2}, seed=0) >>> sampled.groupBy("key").count().orderBy("key").show() +---+-----+ |key|count| +---+-----+ | 0| 3| | 1| 6| +---+-----+ >>> dataset.sampleBy(col("key"), fractions={2: 1.0}, seed=0).count() 33 .. versionchanged:: 3.0 Added sampling by a column of :class:`Column` """ if isinstance(col, basestring): col = Column(col) elif not isinstance(col, Column): raise ValueError("col must be a string or a column, but got %r" % type(col)) if not isinstance(fractions, dict): raise ValueError("fractions must be a dict but got %r" % type(fractions)) for k, v in fractions.items(): if not isinstance(k, (float, int, long, basestring)): raise ValueError("key must be float, int, long, or string, but got %r" % type(k)) fractions[k] = float(v) col = col._jc seed = seed if seed is not None else random.randint(0, sys.maxsize) return DataFrame(self._jdf.stat().sampleBy(col, self._jmap(fractions), seed), self.sql_ctx)
[ "def", "sampleBy", "(", "self", ",", "col", ",", "fractions", ",", "seed", "=", "None", ")", ":", "if", "isinstance", "(", "col", ",", "basestring", ")", ":", "col", "=", "Column", "(", "col", ")", "elif", "not", "isinstance", "(", "col", ",", "Column", ")", ":", "raise", "ValueError", "(", "\"col must be a string or a column, but got %r\"", "%", "type", "(", "col", ")", ")", "if", "not", "isinstance", "(", "fractions", ",", "dict", ")", ":", "raise", "ValueError", "(", "\"fractions must be a dict but got %r\"", "%", "type", "(", "fractions", ")", ")", "for", "k", ",", "v", "in", "fractions", ".", "items", "(", ")", ":", "if", "not", "isinstance", "(", "k", ",", "(", "float", ",", "int", ",", "long", ",", "basestring", ")", ")", ":", "raise", "ValueError", "(", "\"key must be float, int, long, or string, but got %r\"", "%", "type", "(", "k", ")", ")", "fractions", "[", "k", "]", "=", "float", "(", "v", ")", "col", "=", "col", ".", "_jc", "seed", "=", "seed", "if", "seed", "is", "not", "None", "else", "random", ".", "randint", "(", "0", ",", "sys", ".", "maxsize", ")", "return", "DataFrame", "(", "self", ".", "_jdf", ".", "stat", "(", ")", ".", "sampleBy", "(", "col", ",", "self", ".", "_jmap", "(", "fractions", ")", ",", "seed", ")", ",", "self", ".", "sql_ctx", ")" ]
Returns a stratified sample without replacement based on the fraction given on each stratum. :param col: column that defines strata :param fractions: sampling fraction for each stratum. If a stratum is not specified, we treat its fraction as zero. :param seed: random seed :return: a new DataFrame that represents the stratified sample >>> from pyspark.sql.functions import col >>> dataset = sqlContext.range(0, 100).select((col("id") % 3).alias("key")) >>> sampled = dataset.sampleBy("key", fractions={0: 0.1, 1: 0.2}, seed=0) >>> sampled.groupBy("key").count().orderBy("key").show() +---+-----+ |key|count| +---+-----+ | 0| 3| | 1| 6| +---+-----+ >>> dataset.sampleBy(col("key"), fractions={2: 1.0}, seed=0).count() 33 .. versionchanged:: 3.0 Added sampling by a column of :class:`Column`
[ "Returns", "a", "stratified", "sample", "without", "replacement", "based", "on", "the", "fraction", "given", "on", "each", "stratum", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/dataframe.py#L849-L889
19,119
apache/spark
python/pyspark/sql/dataframe.py
DataFrame.dtypes
def dtypes(self): """Returns all column names and their data types as a list. >>> df.dtypes [('age', 'int'), ('name', 'string')] """ return [(str(f.name), f.dataType.simpleString()) for f in self.schema.fields]
python
def dtypes(self): """Returns all column names and their data types as a list. >>> df.dtypes [('age', 'int'), ('name', 'string')] """ return [(str(f.name), f.dataType.simpleString()) for f in self.schema.fields]
[ "def", "dtypes", "(", "self", ")", ":", "return", "[", "(", "str", "(", "f", ".", "name", ")", ",", "f", ".", "dataType", ".", "simpleString", "(", ")", ")", "for", "f", "in", "self", ".", "schema", ".", "fields", "]" ]
Returns all column names and their data types as a list. >>> df.dtypes [('age', 'int'), ('name', 'string')]
[ "Returns", "all", "column", "names", "and", "their", "data", "types", "as", "a", "list", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/dataframe.py#L915-L921
19,120
apache/spark
python/pyspark/sql/dataframe.py
DataFrame._jseq
def _jseq(self, cols, converter=None): """Return a JVM Seq of Columns from a list of Column or names""" return _to_seq(self.sql_ctx._sc, cols, converter)
python
def _jseq(self, cols, converter=None): """Return a JVM Seq of Columns from a list of Column or names""" return _to_seq(self.sql_ctx._sc, cols, converter)
[ "def", "_jseq", "(", "self", ",", "cols", ",", "converter", "=", "None", ")", ":", "return", "_to_seq", "(", "self", ".", "sql_ctx", ".", "_sc", ",", "cols", ",", "converter", ")" ]
Return a JVM Seq of Columns from a list of Column or names
[ "Return", "a", "JVM", "Seq", "of", "Columns", "from", "a", "list", "of", "Column", "or", "names" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/dataframe.py#L1097-L1099
19,121
apache/spark
python/pyspark/sql/dataframe.py
DataFrame._jcols
def _jcols(self, *cols): """Return a JVM Seq of Columns from a list of Column or column names If `cols` has only one list in it, cols[0] will be used as the list. """ if len(cols) == 1 and isinstance(cols[0], list): cols = cols[0] return self._jseq(cols, _to_java_column)
python
def _jcols(self, *cols): """Return a JVM Seq of Columns from a list of Column or column names If `cols` has only one list in it, cols[0] will be used as the list. """ if len(cols) == 1 and isinstance(cols[0], list): cols = cols[0] return self._jseq(cols, _to_java_column)
[ "def", "_jcols", "(", "self", ",", "*", "cols", ")", ":", "if", "len", "(", "cols", ")", "==", "1", "and", "isinstance", "(", "cols", "[", "0", "]", ",", "list", ")", ":", "cols", "=", "cols", "[", "0", "]", "return", "self", ".", "_jseq", "(", "cols", ",", "_to_java_column", ")" ]
Return a JVM Seq of Columns from a list of Column or column names If `cols` has only one list in it, cols[0] will be used as the list.
[ "Return", "a", "JVM", "Seq", "of", "Columns", "from", "a", "list", "of", "Column", "or", "column", "names" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/dataframe.py#L1105-L1112
19,122
apache/spark
python/pyspark/sql/dataframe.py
DataFrame._sort_cols
def _sort_cols(self, cols, kwargs): """ Return a JVM Seq of Columns that describes the sort order """ if not cols: raise ValueError("should sort by at least one column") if len(cols) == 1 and isinstance(cols[0], list): cols = cols[0] jcols = [_to_java_column(c) for c in cols] ascending = kwargs.get('ascending', True) if isinstance(ascending, (bool, int)): if not ascending: jcols = [jc.desc() for jc in jcols] elif isinstance(ascending, list): jcols = [jc if asc else jc.desc() for asc, jc in zip(ascending, jcols)] else: raise TypeError("ascending can only be boolean or list, but got %s" % type(ascending)) return self._jseq(jcols)
python
def _sort_cols(self, cols, kwargs): """ Return a JVM Seq of Columns that describes the sort order """ if not cols: raise ValueError("should sort by at least one column") if len(cols) == 1 and isinstance(cols[0], list): cols = cols[0] jcols = [_to_java_column(c) for c in cols] ascending = kwargs.get('ascending', True) if isinstance(ascending, (bool, int)): if not ascending: jcols = [jc.desc() for jc in jcols] elif isinstance(ascending, list): jcols = [jc if asc else jc.desc() for asc, jc in zip(ascending, jcols)] else: raise TypeError("ascending can only be boolean or list, but got %s" % type(ascending)) return self._jseq(jcols)
[ "def", "_sort_cols", "(", "self", ",", "cols", ",", "kwargs", ")", ":", "if", "not", "cols", ":", "raise", "ValueError", "(", "\"should sort by at least one column\"", ")", "if", "len", "(", "cols", ")", "==", "1", "and", "isinstance", "(", "cols", "[", "0", "]", ",", "list", ")", ":", "cols", "=", "cols", "[", "0", "]", "jcols", "=", "[", "_to_java_column", "(", "c", ")", "for", "c", "in", "cols", "]", "ascending", "=", "kwargs", ".", "get", "(", "'ascending'", ",", "True", ")", "if", "isinstance", "(", "ascending", ",", "(", "bool", ",", "int", ")", ")", ":", "if", "not", "ascending", ":", "jcols", "=", "[", "jc", ".", "desc", "(", ")", "for", "jc", "in", "jcols", "]", "elif", "isinstance", "(", "ascending", ",", "list", ")", ":", "jcols", "=", "[", "jc", "if", "asc", "else", "jc", ".", "desc", "(", ")", "for", "asc", ",", "jc", "in", "zip", "(", "ascending", ",", "jcols", ")", "]", "else", ":", "raise", "TypeError", "(", "\"ascending can only be boolean or list, but got %s\"", "%", "type", "(", "ascending", ")", ")", "return", "self", ".", "_jseq", "(", "jcols", ")" ]
Return a JVM Seq of Columns that describes the sort order
[ "Return", "a", "JVM", "Seq", "of", "Columns", "that", "describes", "the", "sort", "order" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/dataframe.py#L1114-L1131
19,123
apache/spark
python/pyspark/sql/dataframe.py
DataFrame.describe
def describe(self, *cols): """Computes basic statistics for numeric and string columns. This include count, mean, stddev, min, and max. If no columns are given, this function computes statistics for all numerical or string columns. .. note:: This function is meant for exploratory data analysis, as we make no guarantee about the backward compatibility of the schema of the resulting DataFrame. >>> df.describe(['age']).show() +-------+------------------+ |summary| age| +-------+------------------+ | count| 2| | mean| 3.5| | stddev|2.1213203435596424| | min| 2| | max| 5| +-------+------------------+ >>> df.describe().show() +-------+------------------+-----+ |summary| age| name| +-------+------------------+-----+ | count| 2| 2| | mean| 3.5| null| | stddev|2.1213203435596424| null| | min| 2|Alice| | max| 5| Bob| +-------+------------------+-----+ Use summary for expanded statistics and control over which statistics to compute. """ if len(cols) == 1 and isinstance(cols[0], list): cols = cols[0] jdf = self._jdf.describe(self._jseq(cols)) return DataFrame(jdf, self.sql_ctx)
python
def describe(self, *cols): """Computes basic statistics for numeric and string columns. This include count, mean, stddev, min, and max. If no columns are given, this function computes statistics for all numerical or string columns. .. note:: This function is meant for exploratory data analysis, as we make no guarantee about the backward compatibility of the schema of the resulting DataFrame. >>> df.describe(['age']).show() +-------+------------------+ |summary| age| +-------+------------------+ | count| 2| | mean| 3.5| | stddev|2.1213203435596424| | min| 2| | max| 5| +-------+------------------+ >>> df.describe().show() +-------+------------------+-----+ |summary| age| name| +-------+------------------+-----+ | count| 2| 2| | mean| 3.5| null| | stddev|2.1213203435596424| null| | min| 2|Alice| | max| 5| Bob| +-------+------------------+-----+ Use summary for expanded statistics and control over which statistics to compute. """ if len(cols) == 1 and isinstance(cols[0], list): cols = cols[0] jdf = self._jdf.describe(self._jseq(cols)) return DataFrame(jdf, self.sql_ctx)
[ "def", "describe", "(", "self", ",", "*", "cols", ")", ":", "if", "len", "(", "cols", ")", "==", "1", "and", "isinstance", "(", "cols", "[", "0", "]", ",", "list", ")", ":", "cols", "=", "cols", "[", "0", "]", "jdf", "=", "self", ".", "_jdf", ".", "describe", "(", "self", ".", "_jseq", "(", "cols", ")", ")", "return", "DataFrame", "(", "jdf", ",", "self", ".", "sql_ctx", ")" ]
Computes basic statistics for numeric and string columns. This include count, mean, stddev, min, and max. If no columns are given, this function computes statistics for all numerical or string columns. .. note:: This function is meant for exploratory data analysis, as we make no guarantee about the backward compatibility of the schema of the resulting DataFrame. >>> df.describe(['age']).show() +-------+------------------+ |summary| age| +-------+------------------+ | count| 2| | mean| 3.5| | stddev|2.1213203435596424| | min| 2| | max| 5| +-------+------------------+ >>> df.describe().show() +-------+------------------+-----+ |summary| age| name| +-------+------------------+-----+ | count| 2| 2| | mean| 3.5| null| | stddev|2.1213203435596424| null| | min| 2|Alice| | max| 5| Bob| +-------+------------------+-----+ Use summary for expanded statistics and control over which statistics to compute.
[ "Computes", "basic", "statistics", "for", "numeric", "and", "string", "columns", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/dataframe.py#L1134-L1169
19,124
apache/spark
python/pyspark/sql/dataframe.py
DataFrame.head
def head(self, n=None): """Returns the first ``n`` rows. .. note:: This method should only be used if the resulting array is expected to be small, as all the data is loaded into the driver's memory. :param n: int, default 1. Number of rows to return. :return: If n is greater than 1, return a list of :class:`Row`. If n is 1, return a single Row. >>> df.head() Row(age=2, name=u'Alice') >>> df.head(1) [Row(age=2, name=u'Alice')] """ if n is None: rs = self.head(1) return rs[0] if rs else None return self.take(n)
python
def head(self, n=None): """Returns the first ``n`` rows. .. note:: This method should only be used if the resulting array is expected to be small, as all the data is loaded into the driver's memory. :param n: int, default 1. Number of rows to return. :return: If n is greater than 1, return a list of :class:`Row`. If n is 1, return a single Row. >>> df.head() Row(age=2, name=u'Alice') >>> df.head(1) [Row(age=2, name=u'Alice')] """ if n is None: rs = self.head(1) return rs[0] if rs else None return self.take(n)
[ "def", "head", "(", "self", ",", "n", "=", "None", ")", ":", "if", "n", "is", "None", ":", "rs", "=", "self", ".", "head", "(", "1", ")", "return", "rs", "[", "0", "]", "if", "rs", "else", "None", "return", "self", ".", "take", "(", "n", ")" ]
Returns the first ``n`` rows. .. note:: This method should only be used if the resulting array is expected to be small, as all the data is loaded into the driver's memory. :param n: int, default 1. Number of rows to return. :return: If n is greater than 1, return a list of :class:`Row`. If n is 1, return a single Row. >>> df.head() Row(age=2, name=u'Alice') >>> df.head(1) [Row(age=2, name=u'Alice')]
[ "Returns", "the", "first", "n", "rows", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/dataframe.py#L1230-L1248
19,125
apache/spark
python/pyspark/sql/dataframe.py
DataFrame.filter
def filter(self, condition): """Filters rows using the given condition. :func:`where` is an alias for :func:`filter`. :param condition: a :class:`Column` of :class:`types.BooleanType` or a string of SQL expression. >>> df.filter(df.age > 3).collect() [Row(age=5, name=u'Bob')] >>> df.where(df.age == 2).collect() [Row(age=2, name=u'Alice')] >>> df.filter("age > 3").collect() [Row(age=5, name=u'Bob')] >>> df.where("age = 2").collect() [Row(age=2, name=u'Alice')] """ if isinstance(condition, basestring): jdf = self._jdf.filter(condition) elif isinstance(condition, Column): jdf = self._jdf.filter(condition._jc) else: raise TypeError("condition should be string or Column") return DataFrame(jdf, self.sql_ctx)
python
def filter(self, condition): """Filters rows using the given condition. :func:`where` is an alias for :func:`filter`. :param condition: a :class:`Column` of :class:`types.BooleanType` or a string of SQL expression. >>> df.filter(df.age > 3).collect() [Row(age=5, name=u'Bob')] >>> df.where(df.age == 2).collect() [Row(age=2, name=u'Alice')] >>> df.filter("age > 3").collect() [Row(age=5, name=u'Bob')] >>> df.where("age = 2").collect() [Row(age=2, name=u'Alice')] """ if isinstance(condition, basestring): jdf = self._jdf.filter(condition) elif isinstance(condition, Column): jdf = self._jdf.filter(condition._jc) else: raise TypeError("condition should be string or Column") return DataFrame(jdf, self.sql_ctx)
[ "def", "filter", "(", "self", ",", "condition", ")", ":", "if", "isinstance", "(", "condition", ",", "basestring", ")", ":", "jdf", "=", "self", ".", "_jdf", ".", "filter", "(", "condition", ")", "elif", "isinstance", "(", "condition", ",", "Column", ")", ":", "jdf", "=", "self", ".", "_jdf", ".", "filter", "(", "condition", ".", "_jc", ")", "else", ":", "raise", "TypeError", "(", "\"condition should be string or Column\"", ")", "return", "DataFrame", "(", "jdf", ",", "self", ".", "sql_ctx", ")" ]
Filters rows using the given condition. :func:`where` is an alias for :func:`filter`. :param condition: a :class:`Column` of :class:`types.BooleanType` or a string of SQL expression. >>> df.filter(df.age > 3).collect() [Row(age=5, name=u'Bob')] >>> df.where(df.age == 2).collect() [Row(age=2, name=u'Alice')] >>> df.filter("age > 3").collect() [Row(age=5, name=u'Bob')] >>> df.where("age = 2").collect() [Row(age=2, name=u'Alice')]
[ "Filters", "rows", "using", "the", "given", "condition", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/dataframe.py#L1335-L1359
19,126
apache/spark
python/pyspark/sql/dataframe.py
DataFrame.approxQuantile
def approxQuantile(self, col, probabilities, relativeError): """ Calculates the approximate quantiles of numerical columns of a DataFrame. The result of this algorithm has the following deterministic bound: If the DataFrame has N elements and if we request the quantile at probability `p` up to error `err`, then the algorithm will return a sample `x` from the DataFrame so that the *exact* rank of `x` is close to (p * N). More precisely, floor((p - err) * N) <= rank(x) <= ceil((p + err) * N). This method implements a variation of the Greenwald-Khanna algorithm (with some speed optimizations). The algorithm was first present in [[https://doi.org/10.1145/375663.375670 Space-efficient Online Computation of Quantile Summaries]] by Greenwald and Khanna. Note that null values will be ignored in numerical columns before calculation. For columns only containing null values, an empty list is returned. :param col: str, list. Can be a single column name, or a list of names for multiple columns. :param probabilities: a list of quantile probabilities Each number must belong to [0, 1]. For example 0 is the minimum, 0.5 is the median, 1 is the maximum. :param relativeError: The relative target precision to achieve (>= 0). If set to zero, the exact quantiles are computed, which could be very expensive. Note that values greater than 1 are accepted but give the same result as 1. :return: the approximate quantiles at the given probabilities. If the input `col` is a string, the output is a list of floats. If the input `col` is a list or tuple of strings, the output is also a list, but each element in it is a list of floats, i.e., the output is a list of list of floats. .. versionchanged:: 2.2 Added support for multiple columns. """ if not isinstance(col, (basestring, list, tuple)): raise ValueError("col should be a string, list or tuple, but got %r" % type(col)) isStr = isinstance(col, basestring) if isinstance(col, tuple): col = list(col) elif isStr: col = [col] for c in col: if not isinstance(c, basestring): raise ValueError("columns should be strings, but got %r" % type(c)) col = _to_list(self._sc, col) if not isinstance(probabilities, (list, tuple)): raise ValueError("probabilities should be a list or tuple") if isinstance(probabilities, tuple): probabilities = list(probabilities) for p in probabilities: if not isinstance(p, (float, int, long)) or p < 0 or p > 1: raise ValueError("probabilities should be numerical (float, int, long) in [0,1].") probabilities = _to_list(self._sc, probabilities) if not isinstance(relativeError, (float, int, long)) or relativeError < 0: raise ValueError("relativeError should be numerical (float, int, long) >= 0.") relativeError = float(relativeError) jaq = self._jdf.stat().approxQuantile(col, probabilities, relativeError) jaq_list = [list(j) for j in jaq] return jaq_list[0] if isStr else jaq_list
python
def approxQuantile(self, col, probabilities, relativeError): """ Calculates the approximate quantiles of numerical columns of a DataFrame. The result of this algorithm has the following deterministic bound: If the DataFrame has N elements and if we request the quantile at probability `p` up to error `err`, then the algorithm will return a sample `x` from the DataFrame so that the *exact* rank of `x` is close to (p * N). More precisely, floor((p - err) * N) <= rank(x) <= ceil((p + err) * N). This method implements a variation of the Greenwald-Khanna algorithm (with some speed optimizations). The algorithm was first present in [[https://doi.org/10.1145/375663.375670 Space-efficient Online Computation of Quantile Summaries]] by Greenwald and Khanna. Note that null values will be ignored in numerical columns before calculation. For columns only containing null values, an empty list is returned. :param col: str, list. Can be a single column name, or a list of names for multiple columns. :param probabilities: a list of quantile probabilities Each number must belong to [0, 1]. For example 0 is the minimum, 0.5 is the median, 1 is the maximum. :param relativeError: The relative target precision to achieve (>= 0). If set to zero, the exact quantiles are computed, which could be very expensive. Note that values greater than 1 are accepted but give the same result as 1. :return: the approximate quantiles at the given probabilities. If the input `col` is a string, the output is a list of floats. If the input `col` is a list or tuple of strings, the output is also a list, but each element in it is a list of floats, i.e., the output is a list of list of floats. .. versionchanged:: 2.2 Added support for multiple columns. """ if not isinstance(col, (basestring, list, tuple)): raise ValueError("col should be a string, list or tuple, but got %r" % type(col)) isStr = isinstance(col, basestring) if isinstance(col, tuple): col = list(col) elif isStr: col = [col] for c in col: if not isinstance(c, basestring): raise ValueError("columns should be strings, but got %r" % type(c)) col = _to_list(self._sc, col) if not isinstance(probabilities, (list, tuple)): raise ValueError("probabilities should be a list or tuple") if isinstance(probabilities, tuple): probabilities = list(probabilities) for p in probabilities: if not isinstance(p, (float, int, long)) or p < 0 or p > 1: raise ValueError("probabilities should be numerical (float, int, long) in [0,1].") probabilities = _to_list(self._sc, probabilities) if not isinstance(relativeError, (float, int, long)) or relativeError < 0: raise ValueError("relativeError should be numerical (float, int, long) >= 0.") relativeError = float(relativeError) jaq = self._jdf.stat().approxQuantile(col, probabilities, relativeError) jaq_list = [list(j) for j in jaq] return jaq_list[0] if isStr else jaq_list
[ "def", "approxQuantile", "(", "self", ",", "col", ",", "probabilities", ",", "relativeError", ")", ":", "if", "not", "isinstance", "(", "col", ",", "(", "basestring", ",", "list", ",", "tuple", ")", ")", ":", "raise", "ValueError", "(", "\"col should be a string, list or tuple, but got %r\"", "%", "type", "(", "col", ")", ")", "isStr", "=", "isinstance", "(", "col", ",", "basestring", ")", "if", "isinstance", "(", "col", ",", "tuple", ")", ":", "col", "=", "list", "(", "col", ")", "elif", "isStr", ":", "col", "=", "[", "col", "]", "for", "c", "in", "col", ":", "if", "not", "isinstance", "(", "c", ",", "basestring", ")", ":", "raise", "ValueError", "(", "\"columns should be strings, but got %r\"", "%", "type", "(", "c", ")", ")", "col", "=", "_to_list", "(", "self", ".", "_sc", ",", "col", ")", "if", "not", "isinstance", "(", "probabilities", ",", "(", "list", ",", "tuple", ")", ")", ":", "raise", "ValueError", "(", "\"probabilities should be a list or tuple\"", ")", "if", "isinstance", "(", "probabilities", ",", "tuple", ")", ":", "probabilities", "=", "list", "(", "probabilities", ")", "for", "p", "in", "probabilities", ":", "if", "not", "isinstance", "(", "p", ",", "(", "float", ",", "int", ",", "long", ")", ")", "or", "p", "<", "0", "or", "p", ">", "1", ":", "raise", "ValueError", "(", "\"probabilities should be numerical (float, int, long) in [0,1].\"", ")", "probabilities", "=", "_to_list", "(", "self", ".", "_sc", ",", "probabilities", ")", "if", "not", "isinstance", "(", "relativeError", ",", "(", "float", ",", "int", ",", "long", ")", ")", "or", "relativeError", "<", "0", ":", "raise", "ValueError", "(", "\"relativeError should be numerical (float, int, long) >= 0.\"", ")", "relativeError", "=", "float", "(", "relativeError", ")", "jaq", "=", "self", ".", "_jdf", ".", "stat", "(", ")", ".", "approxQuantile", "(", "col", ",", "probabilities", ",", "relativeError", ")", "jaq_list", "=", "[", "list", "(", "j", ")", "for", "j", "in", "jaq", "]", "return", "jaq_list", "[", "0", "]", "if", "isStr", "else", "jaq_list" ]
Calculates the approximate quantiles of numerical columns of a DataFrame. The result of this algorithm has the following deterministic bound: If the DataFrame has N elements and if we request the quantile at probability `p` up to error `err`, then the algorithm will return a sample `x` from the DataFrame so that the *exact* rank of `x` is close to (p * N). More precisely, floor((p - err) * N) <= rank(x) <= ceil((p + err) * N). This method implements a variation of the Greenwald-Khanna algorithm (with some speed optimizations). The algorithm was first present in [[https://doi.org/10.1145/375663.375670 Space-efficient Online Computation of Quantile Summaries]] by Greenwald and Khanna. Note that null values will be ignored in numerical columns before calculation. For columns only containing null values, an empty list is returned. :param col: str, list. Can be a single column name, or a list of names for multiple columns. :param probabilities: a list of quantile probabilities Each number must belong to [0, 1]. For example 0 is the minimum, 0.5 is the median, 1 is the maximum. :param relativeError: The relative target precision to achieve (>= 0). If set to zero, the exact quantiles are computed, which could be very expensive. Note that values greater than 1 are accepted but give the same result as 1. :return: the approximate quantiles at the given probabilities. If the input `col` is a string, the output is a list of floats. If the input `col` is a list or tuple of strings, the output is also a list, but each element in it is a list of floats, i.e., the output is a list of list of floats. .. versionchanged:: 2.2 Added support for multiple columns.
[ "Calculates", "the", "approximate", "quantiles", "of", "numerical", "columns", "of", "a", "DataFrame", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/dataframe.py#L1808-L1879
19,127
apache/spark
python/pyspark/sql/dataframe.py
DataFrame._collectAsArrow
def _collectAsArrow(self): """ Returns all records as a list of ArrowRecordBatches, pyarrow must be installed and available on driver and worker Python environments. .. note:: Experimental. """ with SCCallSiteSync(self._sc) as css: sock_info = self._jdf.collectAsArrowToPython() # Collect list of un-ordered batches where last element is a list of correct order indices results = list(_load_from_socket(sock_info, ArrowCollectSerializer())) batches = results[:-1] batch_order = results[-1] # Re-order the batch list using the correct order return [batches[i] for i in batch_order]
python
def _collectAsArrow(self): """ Returns all records as a list of ArrowRecordBatches, pyarrow must be installed and available on driver and worker Python environments. .. note:: Experimental. """ with SCCallSiteSync(self._sc) as css: sock_info = self._jdf.collectAsArrowToPython() # Collect list of un-ordered batches where last element is a list of correct order indices results = list(_load_from_socket(sock_info, ArrowCollectSerializer())) batches = results[:-1] batch_order = results[-1] # Re-order the batch list using the correct order return [batches[i] for i in batch_order]
[ "def", "_collectAsArrow", "(", "self", ")", ":", "with", "SCCallSiteSync", "(", "self", ".", "_sc", ")", "as", "css", ":", "sock_info", "=", "self", ".", "_jdf", ".", "collectAsArrowToPython", "(", ")", "# Collect list of un-ordered batches where last element is a list of correct order indices", "results", "=", "list", "(", "_load_from_socket", "(", "sock_info", ",", "ArrowCollectSerializer", "(", ")", ")", ")", "batches", "=", "results", "[", ":", "-", "1", "]", "batch_order", "=", "results", "[", "-", "1", "]", "# Re-order the batch list using the correct order", "return", "[", "batches", "[", "i", "]", "for", "i", "in", "batch_order", "]" ]
Returns all records as a list of ArrowRecordBatches, pyarrow must be installed and available on driver and worker Python environments. .. note:: Experimental.
[ "Returns", "all", "records", "as", "a", "list", "of", "ArrowRecordBatches", "pyarrow", "must", "be", "installed", "and", "available", "on", "driver", "and", "worker", "Python", "environments", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/dataframe.py#L2194-L2210
19,128
apache/spark
sql/gen-sql-markdown.py
_list_function_infos
def _list_function_infos(jvm): """ Returns a list of function information via JVM. Sorts wrapped expression infos by name and returns them. """ jinfos = jvm.org.apache.spark.sql.api.python.PythonSQLUtils.listBuiltinFunctionInfos() infos = [] for jinfo in jinfos: name = jinfo.getName() usage = jinfo.getUsage() usage = usage.replace("_FUNC_", name) if usage is not None else usage infos.append(ExpressionInfo( className=jinfo.getClassName(), name=name, usage=usage, arguments=jinfo.getArguments().replace("_FUNC_", name), examples=jinfo.getExamples().replace("_FUNC_", name), note=jinfo.getNote(), since=jinfo.getSince(), deprecated=jinfo.getDeprecated())) return sorted(infos, key=lambda i: i.name)
python
def _list_function_infos(jvm): """ Returns a list of function information via JVM. Sorts wrapped expression infos by name and returns them. """ jinfos = jvm.org.apache.spark.sql.api.python.PythonSQLUtils.listBuiltinFunctionInfos() infos = [] for jinfo in jinfos: name = jinfo.getName() usage = jinfo.getUsage() usage = usage.replace("_FUNC_", name) if usage is not None else usage infos.append(ExpressionInfo( className=jinfo.getClassName(), name=name, usage=usage, arguments=jinfo.getArguments().replace("_FUNC_", name), examples=jinfo.getExamples().replace("_FUNC_", name), note=jinfo.getNote(), since=jinfo.getSince(), deprecated=jinfo.getDeprecated())) return sorted(infos, key=lambda i: i.name)
[ "def", "_list_function_infos", "(", "jvm", ")", ":", "jinfos", "=", "jvm", ".", "org", ".", "apache", ".", "spark", ".", "sql", ".", "api", ".", "python", ".", "PythonSQLUtils", ".", "listBuiltinFunctionInfos", "(", ")", "infos", "=", "[", "]", "for", "jinfo", "in", "jinfos", ":", "name", "=", "jinfo", ".", "getName", "(", ")", "usage", "=", "jinfo", ".", "getUsage", "(", ")", "usage", "=", "usage", ".", "replace", "(", "\"_FUNC_\"", ",", "name", ")", "if", "usage", "is", "not", "None", "else", "usage", "infos", ".", "append", "(", "ExpressionInfo", "(", "className", "=", "jinfo", ".", "getClassName", "(", ")", ",", "name", "=", "name", ",", "usage", "=", "usage", ",", "arguments", "=", "jinfo", ".", "getArguments", "(", ")", ".", "replace", "(", "\"_FUNC_\"", ",", "name", ")", ",", "examples", "=", "jinfo", ".", "getExamples", "(", ")", ".", "replace", "(", "\"_FUNC_\"", ",", "name", ")", ",", "note", "=", "jinfo", ".", "getNote", "(", ")", ",", "since", "=", "jinfo", ".", "getSince", "(", ")", ",", "deprecated", "=", "jinfo", ".", "getDeprecated", "(", ")", ")", ")", "return", "sorted", "(", "infos", ",", "key", "=", "lambda", "i", ":", "i", ".", "name", ")" ]
Returns a list of function information via JVM. Sorts wrapped expression infos by name and returns them.
[ "Returns", "a", "list", "of", "function", "information", "via", "JVM", ".", "Sorts", "wrapped", "expression", "infos", "by", "name", "and", "returns", "them", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/sql/gen-sql-markdown.py#L26-L47
19,129
apache/spark
sql/gen-sql-markdown.py
_make_pretty_usage
def _make_pretty_usage(usage): """ Makes the usage description pretty and returns a formatted string if `usage` is not an empty string. Otherwise, returns None. """ if usage is not None and usage.strip() != "": usage = "\n".join(map(lambda u: u.strip(), usage.split("\n"))) return "%s\n\n" % usage
python
def _make_pretty_usage(usage): """ Makes the usage description pretty and returns a formatted string if `usage` is not an empty string. Otherwise, returns None. """ if usage is not None and usage.strip() != "": usage = "\n".join(map(lambda u: u.strip(), usage.split("\n"))) return "%s\n\n" % usage
[ "def", "_make_pretty_usage", "(", "usage", ")", ":", "if", "usage", "is", "not", "None", "and", "usage", ".", "strip", "(", ")", "!=", "\"\"", ":", "usage", "=", "\"\\n\"", ".", "join", "(", "map", "(", "lambda", "u", ":", "u", ".", "strip", "(", ")", ",", "usage", ".", "split", "(", "\"\\n\"", ")", ")", ")", "return", "\"%s\\n\\n\"", "%", "usage" ]
Makes the usage description pretty and returns a formatted string if `usage` is not an empty string. Otherwise, returns None.
[ "Makes", "the", "usage", "description", "pretty", "and", "returns", "a", "formatted", "string", "if", "usage", "is", "not", "an", "empty", "string", ".", "Otherwise", "returns", "None", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/sql/gen-sql-markdown.py#L50-L58
19,130
apache/spark
sql/gen-sql-markdown.py
_make_pretty_arguments
def _make_pretty_arguments(arguments): """ Makes the arguments description pretty and returns a formatted string if `arguments` starts with the argument prefix. Otherwise, returns None. Expected input: Arguments: * arg0 - ... ... * arg0 - ... ... Expected output: **Arguments:** * arg0 - ... ... * arg0 - ... ... """ if arguments.startswith("\n Arguments:"): arguments = "\n".join(map(lambda u: u[6:], arguments.strip().split("\n")[1:])) return "**Arguments:**\n\n%s\n\n" % arguments
python
def _make_pretty_arguments(arguments): """ Makes the arguments description pretty and returns a formatted string if `arguments` starts with the argument prefix. Otherwise, returns None. Expected input: Arguments: * arg0 - ... ... * arg0 - ... ... Expected output: **Arguments:** * arg0 - ... ... * arg0 - ... ... """ if arguments.startswith("\n Arguments:"): arguments = "\n".join(map(lambda u: u[6:], arguments.strip().split("\n")[1:])) return "**Arguments:**\n\n%s\n\n" % arguments
[ "def", "_make_pretty_arguments", "(", "arguments", ")", ":", "if", "arguments", ".", "startswith", "(", "\"\\n Arguments:\"", ")", ":", "arguments", "=", "\"\\n\"", ".", "join", "(", "map", "(", "lambda", "u", ":", "u", "[", "6", ":", "]", ",", "arguments", ".", "strip", "(", ")", ".", "split", "(", "\"\\n\"", ")", "[", "1", ":", "]", ")", ")", "return", "\"**Arguments:**\\n\\n%s\\n\\n\"", "%", "arguments" ]
Makes the arguments description pretty and returns a formatted string if `arguments` starts with the argument prefix. Otherwise, returns None. Expected input: Arguments: * arg0 - ... ... * arg0 - ... ... Expected output: **Arguments:** * arg0 - ... ... * arg0 - ... ...
[ "Makes", "the", "arguments", "description", "pretty", "and", "returns", "a", "formatted", "string", "if", "arguments", "starts", "with", "the", "argument", "prefix", ".", "Otherwise", "returns", "None", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/sql/gen-sql-markdown.py#L61-L86
19,131
apache/spark
sql/gen-sql-markdown.py
_make_pretty_examples
def _make_pretty_examples(examples): """ Makes the examples description pretty and returns a formatted string if `examples` starts with the example prefix. Otherwise, returns None. Expected input: Examples: > SELECT ...; ... > SELECT ...; ... Expected output: **Examples:** ``` > SELECT ...; ... > SELECT ...; ... ``` """ if examples.startswith("\n Examples:"): examples = "\n".join(map(lambda u: u[6:], examples.strip().split("\n")[1:])) return "**Examples:**\n\n```\n%s\n```\n\n" % examples
python
def _make_pretty_examples(examples): """ Makes the examples description pretty and returns a formatted string if `examples` starts with the example prefix. Otherwise, returns None. Expected input: Examples: > SELECT ...; ... > SELECT ...; ... Expected output: **Examples:** ``` > SELECT ...; ... > SELECT ...; ... ``` """ if examples.startswith("\n Examples:"): examples = "\n".join(map(lambda u: u[6:], examples.strip().split("\n")[1:])) return "**Examples:**\n\n```\n%s\n```\n\n" % examples
[ "def", "_make_pretty_examples", "(", "examples", ")", ":", "if", "examples", ".", "startswith", "(", "\"\\n Examples:\"", ")", ":", "examples", "=", "\"\\n\"", ".", "join", "(", "map", "(", "lambda", "u", ":", "u", "[", "6", ":", "]", ",", "examples", ".", "strip", "(", ")", ".", "split", "(", "\"\\n\"", ")", "[", "1", ":", "]", ")", ")", "return", "\"**Examples:**\\n\\n```\\n%s\\n```\\n\\n\"", "%", "examples" ]
Makes the examples description pretty and returns a formatted string if `examples` starts with the example prefix. Otherwise, returns None. Expected input: Examples: > SELECT ...; ... > SELECT ...; ... Expected output: **Examples:** ``` > SELECT ...; ... > SELECT ...; ... ```
[ "Makes", "the", "examples", "description", "pretty", "and", "returns", "a", "formatted", "string", "if", "examples", "starts", "with", "the", "example", "prefix", ".", "Otherwise", "returns", "None", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/sql/gen-sql-markdown.py#L89-L116
19,132
apache/spark
sql/gen-sql-markdown.py
_make_pretty_note
def _make_pretty_note(note): """ Makes the note description pretty and returns a formatted string if `note` is not an empty string. Otherwise, returns None. Expected input: ... Expected output: **Note:** ... """ if note != "": note = "\n".join(map(lambda n: n[4:], note.split("\n"))) return "**Note:**\n%s\n" % note
python
def _make_pretty_note(note): """ Makes the note description pretty and returns a formatted string if `note` is not an empty string. Otherwise, returns None. Expected input: ... Expected output: **Note:** ... """ if note != "": note = "\n".join(map(lambda n: n[4:], note.split("\n"))) return "**Note:**\n%s\n" % note
[ "def", "_make_pretty_note", "(", "note", ")", ":", "if", "note", "!=", "\"\"", ":", "note", "=", "\"\\n\"", ".", "join", "(", "map", "(", "lambda", "n", ":", "n", "[", "4", ":", "]", ",", "note", ".", "split", "(", "\"\\n\"", ")", ")", ")", "return", "\"**Note:**\\n%s\\n\"", "%", "note" ]
Makes the note description pretty and returns a formatted string if `note` is not an empty string. Otherwise, returns None. Expected input: ... Expected output: **Note:** ...
[ "Makes", "the", "note", "description", "pretty", "and", "returns", "a", "formatted", "string", "if", "note", "is", "not", "an", "empty", "string", ".", "Otherwise", "returns", "None", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/sql/gen-sql-markdown.py#L119-L137
19,133
apache/spark
sql/gen-sql-markdown.py
_make_pretty_deprecated
def _make_pretty_deprecated(deprecated): """ Makes the deprecated description pretty and returns a formatted string if `deprecated` is not an empty string. Otherwise, returns None. Expected input: ... Expected output: **Deprecated:** ... """ if deprecated != "": deprecated = "\n".join(map(lambda n: n[4:], deprecated.split("\n"))) return "**Deprecated:**\n%s\n" % deprecated
python
def _make_pretty_deprecated(deprecated): """ Makes the deprecated description pretty and returns a formatted string if `deprecated` is not an empty string. Otherwise, returns None. Expected input: ... Expected output: **Deprecated:** ... """ if deprecated != "": deprecated = "\n".join(map(lambda n: n[4:], deprecated.split("\n"))) return "**Deprecated:**\n%s\n" % deprecated
[ "def", "_make_pretty_deprecated", "(", "deprecated", ")", ":", "if", "deprecated", "!=", "\"\"", ":", "deprecated", "=", "\"\\n\"", ".", "join", "(", "map", "(", "lambda", "n", ":", "n", "[", "4", ":", "]", ",", "deprecated", ".", "split", "(", "\"\\n\"", ")", ")", ")", "return", "\"**Deprecated:**\\n%s\\n\"", "%", "deprecated" ]
Makes the deprecated description pretty and returns a formatted string if `deprecated` is not an empty string. Otherwise, returns None. Expected input: ... Expected output: **Deprecated:** ...
[ "Makes", "the", "deprecated", "description", "pretty", "and", "returns", "a", "formatted", "string", "if", "deprecated", "is", "not", "an", "empty", "string", ".", "Otherwise", "returns", "None", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/sql/gen-sql-markdown.py#L140-L158
19,134
apache/spark
sql/gen-sql-markdown.py
generate_sql_markdown
def generate_sql_markdown(jvm, path): """ Generates a markdown file after listing the function information. The output file is created in `path`. Expected output: ### NAME USAGE **Arguments:** ARGUMENTS **Examples:** ``` EXAMPLES ``` **Note:** NOTE **Since:** SINCE **Deprecated:** DEPRECATED <br/> """ with open(path, 'w') as mdfile: for info in _list_function_infos(jvm): name = info.name usage = _make_pretty_usage(info.usage) arguments = _make_pretty_arguments(info.arguments) examples = _make_pretty_examples(info.examples) note = _make_pretty_note(info.note) since = info.since deprecated = _make_pretty_deprecated(info.deprecated) mdfile.write("### %s\n\n" % name) if usage is not None: mdfile.write("%s\n\n" % usage.strip()) if arguments is not None: mdfile.write(arguments) if examples is not None: mdfile.write(examples) if note is not None: mdfile.write(note) if since is not None and since != "": mdfile.write("**Since:** %s\n\n" % since.strip()) if deprecated is not None: mdfile.write(deprecated) mdfile.write("<br/>\n\n")
python
def generate_sql_markdown(jvm, path): """ Generates a markdown file after listing the function information. The output file is created in `path`. Expected output: ### NAME USAGE **Arguments:** ARGUMENTS **Examples:** ``` EXAMPLES ``` **Note:** NOTE **Since:** SINCE **Deprecated:** DEPRECATED <br/> """ with open(path, 'w') as mdfile: for info in _list_function_infos(jvm): name = info.name usage = _make_pretty_usage(info.usage) arguments = _make_pretty_arguments(info.arguments) examples = _make_pretty_examples(info.examples) note = _make_pretty_note(info.note) since = info.since deprecated = _make_pretty_deprecated(info.deprecated) mdfile.write("### %s\n\n" % name) if usage is not None: mdfile.write("%s\n\n" % usage.strip()) if arguments is not None: mdfile.write(arguments) if examples is not None: mdfile.write(examples) if note is not None: mdfile.write(note) if since is not None and since != "": mdfile.write("**Since:** %s\n\n" % since.strip()) if deprecated is not None: mdfile.write(deprecated) mdfile.write("<br/>\n\n")
[ "def", "generate_sql_markdown", "(", "jvm", ",", "path", ")", ":", "with", "open", "(", "path", ",", "'w'", ")", "as", "mdfile", ":", "for", "info", "in", "_list_function_infos", "(", "jvm", ")", ":", "name", "=", "info", ".", "name", "usage", "=", "_make_pretty_usage", "(", "info", ".", "usage", ")", "arguments", "=", "_make_pretty_arguments", "(", "info", ".", "arguments", ")", "examples", "=", "_make_pretty_examples", "(", "info", ".", "examples", ")", "note", "=", "_make_pretty_note", "(", "info", ".", "note", ")", "since", "=", "info", ".", "since", "deprecated", "=", "_make_pretty_deprecated", "(", "info", ".", "deprecated", ")", "mdfile", ".", "write", "(", "\"### %s\\n\\n\"", "%", "name", ")", "if", "usage", "is", "not", "None", ":", "mdfile", ".", "write", "(", "\"%s\\n\\n\"", "%", "usage", ".", "strip", "(", ")", ")", "if", "arguments", "is", "not", "None", ":", "mdfile", ".", "write", "(", "arguments", ")", "if", "examples", "is", "not", "None", ":", "mdfile", ".", "write", "(", "examples", ")", "if", "note", "is", "not", "None", ":", "mdfile", ".", "write", "(", "note", ")", "if", "since", "is", "not", "None", "and", "since", "!=", "\"\"", ":", "mdfile", ".", "write", "(", "\"**Since:** %s\\n\\n\"", "%", "since", ".", "strip", "(", ")", ")", "if", "deprecated", "is", "not", "None", ":", "mdfile", ".", "write", "(", "deprecated", ")", "mdfile", ".", "write", "(", "\"<br/>\\n\\n\"", ")" ]
Generates a markdown file after listing the function information. The output file is created in `path`. Expected output: ### NAME USAGE **Arguments:** ARGUMENTS **Examples:** ``` EXAMPLES ``` **Note:** NOTE **Since:** SINCE **Deprecated:** DEPRECATED <br/>
[ "Generates", "a", "markdown", "file", "after", "listing", "the", "function", "information", ".", "The", "output", "file", "is", "created", "in", "path", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/sql/gen-sql-markdown.py#L161-L218
19,135
apache/spark
python/pyspark/mllib/classification.py
LogisticRegressionWithLBFGS.train
def train(cls, data, iterations=100, initialWeights=None, regParam=0.0, regType="l2", intercept=False, corrections=10, tolerance=1e-6, validateData=True, numClasses=2): """ Train a logistic regression model on the given data. :param data: The training data, an RDD of LabeledPoint. :param iterations: The number of iterations. (default: 100) :param initialWeights: The initial weights. (default: None) :param regParam: The regularizer parameter. (default: 0.0) :param regType: The type of regularizer used for training our model. Supported values: - "l1" for using L1 regularization - "l2" for using L2 regularization (default) - None for no regularization :param intercept: Boolean parameter which indicates the use or not of the augmented representation for training data (i.e., whether bias features are activated or not). (default: False) :param corrections: The number of corrections used in the LBFGS update. If a known updater is used for binary classification, it calls the ml implementation and this parameter will have no effect. (default: 10) :param tolerance: The convergence tolerance of iterations for L-BFGS. (default: 1e-6) :param validateData: Boolean parameter which indicates if the algorithm should validate data before training. (default: True) :param numClasses: The number of classes (i.e., outcomes) a label can take in Multinomial Logistic Regression. (default: 2) >>> data = [ ... LabeledPoint(0.0, [0.0, 1.0]), ... LabeledPoint(1.0, [1.0, 0.0]), ... ] >>> lrm = LogisticRegressionWithLBFGS.train(sc.parallelize(data), iterations=10) >>> lrm.predict([1.0, 0.0]) 1 >>> lrm.predict([0.0, 1.0]) 0 """ def train(rdd, i): return callMLlibFunc("trainLogisticRegressionModelWithLBFGS", rdd, int(iterations), i, float(regParam), regType, bool(intercept), int(corrections), float(tolerance), bool(validateData), int(numClasses)) if initialWeights is None: if numClasses == 2: initialWeights = [0.0] * len(data.first().features) else: if intercept: initialWeights = [0.0] * (len(data.first().features) + 1) * (numClasses - 1) else: initialWeights = [0.0] * len(data.first().features) * (numClasses - 1) return _regression_train_wrapper(train, LogisticRegressionModel, data, initialWeights)
python
def train(cls, data, iterations=100, initialWeights=None, regParam=0.0, regType="l2", intercept=False, corrections=10, tolerance=1e-6, validateData=True, numClasses=2): """ Train a logistic regression model on the given data. :param data: The training data, an RDD of LabeledPoint. :param iterations: The number of iterations. (default: 100) :param initialWeights: The initial weights. (default: None) :param regParam: The regularizer parameter. (default: 0.0) :param regType: The type of regularizer used for training our model. Supported values: - "l1" for using L1 regularization - "l2" for using L2 regularization (default) - None for no regularization :param intercept: Boolean parameter which indicates the use or not of the augmented representation for training data (i.e., whether bias features are activated or not). (default: False) :param corrections: The number of corrections used in the LBFGS update. If a known updater is used for binary classification, it calls the ml implementation and this parameter will have no effect. (default: 10) :param tolerance: The convergence tolerance of iterations for L-BFGS. (default: 1e-6) :param validateData: Boolean parameter which indicates if the algorithm should validate data before training. (default: True) :param numClasses: The number of classes (i.e., outcomes) a label can take in Multinomial Logistic Regression. (default: 2) >>> data = [ ... LabeledPoint(0.0, [0.0, 1.0]), ... LabeledPoint(1.0, [1.0, 0.0]), ... ] >>> lrm = LogisticRegressionWithLBFGS.train(sc.parallelize(data), iterations=10) >>> lrm.predict([1.0, 0.0]) 1 >>> lrm.predict([0.0, 1.0]) 0 """ def train(rdd, i): return callMLlibFunc("trainLogisticRegressionModelWithLBFGS", rdd, int(iterations), i, float(regParam), regType, bool(intercept), int(corrections), float(tolerance), bool(validateData), int(numClasses)) if initialWeights is None: if numClasses == 2: initialWeights = [0.0] * len(data.first().features) else: if intercept: initialWeights = [0.0] * (len(data.first().features) + 1) * (numClasses - 1) else: initialWeights = [0.0] * len(data.first().features) * (numClasses - 1) return _regression_train_wrapper(train, LogisticRegressionModel, data, initialWeights)
[ "def", "train", "(", "cls", ",", "data", ",", "iterations", "=", "100", ",", "initialWeights", "=", "None", ",", "regParam", "=", "0.0", ",", "regType", "=", "\"l2\"", ",", "intercept", "=", "False", ",", "corrections", "=", "10", ",", "tolerance", "=", "1e-6", ",", "validateData", "=", "True", ",", "numClasses", "=", "2", ")", ":", "def", "train", "(", "rdd", ",", "i", ")", ":", "return", "callMLlibFunc", "(", "\"trainLogisticRegressionModelWithLBFGS\"", ",", "rdd", ",", "int", "(", "iterations", ")", ",", "i", ",", "float", "(", "regParam", ")", ",", "regType", ",", "bool", "(", "intercept", ")", ",", "int", "(", "corrections", ")", ",", "float", "(", "tolerance", ")", ",", "bool", "(", "validateData", ")", ",", "int", "(", "numClasses", ")", ")", "if", "initialWeights", "is", "None", ":", "if", "numClasses", "==", "2", ":", "initialWeights", "=", "[", "0.0", "]", "*", "len", "(", "data", ".", "first", "(", ")", ".", "features", ")", "else", ":", "if", "intercept", ":", "initialWeights", "=", "[", "0.0", "]", "*", "(", "len", "(", "data", ".", "first", "(", ")", ".", "features", ")", "+", "1", ")", "*", "(", "numClasses", "-", "1", ")", "else", ":", "initialWeights", "=", "[", "0.0", "]", "*", "len", "(", "data", ".", "first", "(", ")", ".", "features", ")", "*", "(", "numClasses", "-", "1", ")", "return", "_regression_train_wrapper", "(", "train", ",", "LogisticRegressionModel", ",", "data", ",", "initialWeights", ")" ]
Train a logistic regression model on the given data. :param data: The training data, an RDD of LabeledPoint. :param iterations: The number of iterations. (default: 100) :param initialWeights: The initial weights. (default: None) :param regParam: The regularizer parameter. (default: 0.0) :param regType: The type of regularizer used for training our model. Supported values: - "l1" for using L1 regularization - "l2" for using L2 regularization (default) - None for no regularization :param intercept: Boolean parameter which indicates the use or not of the augmented representation for training data (i.e., whether bias features are activated or not). (default: False) :param corrections: The number of corrections used in the LBFGS update. If a known updater is used for binary classification, it calls the ml implementation and this parameter will have no effect. (default: 10) :param tolerance: The convergence tolerance of iterations for L-BFGS. (default: 1e-6) :param validateData: Boolean parameter which indicates if the algorithm should validate data before training. (default: True) :param numClasses: The number of classes (i.e., outcomes) a label can take in Multinomial Logistic Regression. (default: 2) >>> data = [ ... LabeledPoint(0.0, [0.0, 1.0]), ... LabeledPoint(1.0, [1.0, 0.0]), ... ] >>> lrm = LogisticRegressionWithLBFGS.train(sc.parallelize(data), iterations=10) >>> lrm.predict([1.0, 0.0]) 1 >>> lrm.predict([0.0, 1.0]) 0
[ "Train", "a", "logistic", "regression", "model", "on", "the", "given", "data", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/classification.py#L332-L400
19,136
apache/spark
python/pyspark/heapq3.py
heappush
def heappush(heap, item): """Push item onto heap, maintaining the heap invariant.""" heap.append(item) _siftdown(heap, 0, len(heap)-1)
python
def heappush(heap, item): """Push item onto heap, maintaining the heap invariant.""" heap.append(item) _siftdown(heap, 0, len(heap)-1)
[ "def", "heappush", "(", "heap", ",", "item", ")", ":", "heap", ".", "append", "(", "item", ")", "_siftdown", "(", "heap", ",", "0", ",", "len", "(", "heap", ")", "-", "1", ")" ]
Push item onto heap, maintaining the heap invariant.
[ "Push", "item", "onto", "heap", "maintaining", "the", "heap", "invariant", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/heapq3.py#L411-L414
19,137
apache/spark
python/pyspark/heapq3.py
heappop
def heappop(heap): """Pop the smallest item off the heap, maintaining the heap invariant.""" lastelt = heap.pop() # raises appropriate IndexError if heap is empty if heap: returnitem = heap[0] heap[0] = lastelt _siftup(heap, 0) return returnitem return lastelt
python
def heappop(heap): """Pop the smallest item off the heap, maintaining the heap invariant.""" lastelt = heap.pop() # raises appropriate IndexError if heap is empty if heap: returnitem = heap[0] heap[0] = lastelt _siftup(heap, 0) return returnitem return lastelt
[ "def", "heappop", "(", "heap", ")", ":", "lastelt", "=", "heap", ".", "pop", "(", ")", "# raises appropriate IndexError if heap is empty", "if", "heap", ":", "returnitem", "=", "heap", "[", "0", "]", "heap", "[", "0", "]", "=", "lastelt", "_siftup", "(", "heap", ",", "0", ")", "return", "returnitem", "return", "lastelt" ]
Pop the smallest item off the heap, maintaining the heap invariant.
[ "Pop", "the", "smallest", "item", "off", "the", "heap", "maintaining", "the", "heap", "invariant", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/heapq3.py#L416-L424
19,138
apache/spark
python/pyspark/heapq3.py
heapreplace
def heapreplace(heap, item): """Pop and return the current smallest value, and add the new item. This is more efficient than heappop() followed by heappush(), and can be more appropriate when using a fixed-size heap. Note that the value returned may be larger than item! That constrains reasonable uses of this routine unless written as part of a conditional replacement: if item > heap[0]: item = heapreplace(heap, item) """ returnitem = heap[0] # raises appropriate IndexError if heap is empty heap[0] = item _siftup(heap, 0) return returnitem
python
def heapreplace(heap, item): """Pop and return the current smallest value, and add the new item. This is more efficient than heappop() followed by heappush(), and can be more appropriate when using a fixed-size heap. Note that the value returned may be larger than item! That constrains reasonable uses of this routine unless written as part of a conditional replacement: if item > heap[0]: item = heapreplace(heap, item) """ returnitem = heap[0] # raises appropriate IndexError if heap is empty heap[0] = item _siftup(heap, 0) return returnitem
[ "def", "heapreplace", "(", "heap", ",", "item", ")", ":", "returnitem", "=", "heap", "[", "0", "]", "# raises appropriate IndexError if heap is empty", "heap", "[", "0", "]", "=", "item", "_siftup", "(", "heap", ",", "0", ")", "return", "returnitem" ]
Pop and return the current smallest value, and add the new item. This is more efficient than heappop() followed by heappush(), and can be more appropriate when using a fixed-size heap. Note that the value returned may be larger than item! That constrains reasonable uses of this routine unless written as part of a conditional replacement: if item > heap[0]: item = heapreplace(heap, item)
[ "Pop", "and", "return", "the", "current", "smallest", "value", "and", "add", "the", "new", "item", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/heapq3.py#L426-L440
19,139
apache/spark
python/pyspark/heapq3.py
heappushpop
def heappushpop(heap, item): """Fast version of a heappush followed by a heappop.""" if heap and heap[0] < item: item, heap[0] = heap[0], item _siftup(heap, 0) return item
python
def heappushpop(heap, item): """Fast version of a heappush followed by a heappop.""" if heap and heap[0] < item: item, heap[0] = heap[0], item _siftup(heap, 0) return item
[ "def", "heappushpop", "(", "heap", ",", "item", ")", ":", "if", "heap", "and", "heap", "[", "0", "]", "<", "item", ":", "item", ",", "heap", "[", "0", "]", "=", "heap", "[", "0", "]", ",", "item", "_siftup", "(", "heap", ",", "0", ")", "return", "item" ]
Fast version of a heappush followed by a heappop.
[ "Fast", "version", "of", "a", "heappush", "followed", "by", "a", "heappop", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/heapq3.py#L442-L447
19,140
apache/spark
python/pyspark/heapq3.py
_heappop_max
def _heappop_max(heap): """Maxheap version of a heappop.""" lastelt = heap.pop() # raises appropriate IndexError if heap is empty if heap: returnitem = heap[0] heap[0] = lastelt _siftup_max(heap, 0) return returnitem return lastelt
python
def _heappop_max(heap): """Maxheap version of a heappop.""" lastelt = heap.pop() # raises appropriate IndexError if heap is empty if heap: returnitem = heap[0] heap[0] = lastelt _siftup_max(heap, 0) return returnitem return lastelt
[ "def", "_heappop_max", "(", "heap", ")", ":", "lastelt", "=", "heap", ".", "pop", "(", ")", "# raises appropriate IndexError if heap is empty", "if", "heap", ":", "returnitem", "=", "heap", "[", "0", "]", "heap", "[", "0", "]", "=", "lastelt", "_siftup_max", "(", "heap", ",", "0", ")", "return", "returnitem", "return", "lastelt" ]
Maxheap version of a heappop.
[ "Maxheap", "version", "of", "a", "heappop", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/heapq3.py#L460-L468
19,141
apache/spark
python/pyspark/heapq3.py
_heapreplace_max
def _heapreplace_max(heap, item): """Maxheap version of a heappop followed by a heappush.""" returnitem = heap[0] # raises appropriate IndexError if heap is empty heap[0] = item _siftup_max(heap, 0) return returnitem
python
def _heapreplace_max(heap, item): """Maxheap version of a heappop followed by a heappush.""" returnitem = heap[0] # raises appropriate IndexError if heap is empty heap[0] = item _siftup_max(heap, 0) return returnitem
[ "def", "_heapreplace_max", "(", "heap", ",", "item", ")", ":", "returnitem", "=", "heap", "[", "0", "]", "# raises appropriate IndexError if heap is empty", "heap", "[", "0", "]", "=", "item", "_siftup_max", "(", "heap", ",", "0", ")", "return", "returnitem" ]
Maxheap version of a heappop followed by a heappush.
[ "Maxheap", "version", "of", "a", "heappop", "followed", "by", "a", "heappush", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/heapq3.py#L470-L475
19,142
apache/spark
python/pyspark/heapq3.py
_siftdown_max
def _siftdown_max(heap, startpos, pos): 'Maxheap variant of _siftdown' newitem = heap[pos] # Follow the path to the root, moving parents down until finding a place # newitem fits. while pos > startpos: parentpos = (pos - 1) >> 1 parent = heap[parentpos] if parent < newitem: heap[pos] = parent pos = parentpos continue break heap[pos] = newitem
python
def _siftdown_max(heap, startpos, pos): 'Maxheap variant of _siftdown' newitem = heap[pos] # Follow the path to the root, moving parents down until finding a place # newitem fits. while pos > startpos: parentpos = (pos - 1) >> 1 parent = heap[parentpos] if parent < newitem: heap[pos] = parent pos = parentpos continue break heap[pos] = newitem
[ "def", "_siftdown_max", "(", "heap", ",", "startpos", ",", "pos", ")", ":", "newitem", "=", "heap", "[", "pos", "]", "# Follow the path to the root, moving parents down until finding a place", "# newitem fits.", "while", "pos", ">", "startpos", ":", "parentpos", "=", "(", "pos", "-", "1", ")", ">>", "1", "parent", "=", "heap", "[", "parentpos", "]", "if", "parent", "<", "newitem", ":", "heap", "[", "pos", "]", "=", "parent", "pos", "=", "parentpos", "continue", "break", "heap", "[", "pos", "]", "=", "newitem" ]
Maxheap variant of _siftdown
[ "Maxheap", "variant", "of", "_siftdown" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/heapq3.py#L559-L572
19,143
apache/spark
python/pyspark/heapq3.py
_siftup_max
def _siftup_max(heap, pos): 'Maxheap variant of _siftup' endpos = len(heap) startpos = pos newitem = heap[pos] # Bubble up the larger child until hitting a leaf. childpos = 2*pos + 1 # leftmost child position while childpos < endpos: # Set childpos to index of larger child. rightpos = childpos + 1 if rightpos < endpos and not heap[rightpos] < heap[childpos]: childpos = rightpos # Move the larger child up. heap[pos] = heap[childpos] pos = childpos childpos = 2*pos + 1 # The leaf at pos is empty now. Put newitem there, and bubble it up # to its final resting place (by sifting its parents down). heap[pos] = newitem _siftdown_max(heap, startpos, pos)
python
def _siftup_max(heap, pos): 'Maxheap variant of _siftup' endpos = len(heap) startpos = pos newitem = heap[pos] # Bubble up the larger child until hitting a leaf. childpos = 2*pos + 1 # leftmost child position while childpos < endpos: # Set childpos to index of larger child. rightpos = childpos + 1 if rightpos < endpos and not heap[rightpos] < heap[childpos]: childpos = rightpos # Move the larger child up. heap[pos] = heap[childpos] pos = childpos childpos = 2*pos + 1 # The leaf at pos is empty now. Put newitem there, and bubble it up # to its final resting place (by sifting its parents down). heap[pos] = newitem _siftdown_max(heap, startpos, pos)
[ "def", "_siftup_max", "(", "heap", ",", "pos", ")", ":", "endpos", "=", "len", "(", "heap", ")", "startpos", "=", "pos", "newitem", "=", "heap", "[", "pos", "]", "# Bubble up the larger child until hitting a leaf.", "childpos", "=", "2", "*", "pos", "+", "1", "# leftmost child position", "while", "childpos", "<", "endpos", ":", "# Set childpos to index of larger child.", "rightpos", "=", "childpos", "+", "1", "if", "rightpos", "<", "endpos", "and", "not", "heap", "[", "rightpos", "]", "<", "heap", "[", "childpos", "]", ":", "childpos", "=", "rightpos", "# Move the larger child up.", "heap", "[", "pos", "]", "=", "heap", "[", "childpos", "]", "pos", "=", "childpos", "childpos", "=", "2", "*", "pos", "+", "1", "# The leaf at pos is empty now. Put newitem there, and bubble it up", "# to its final resting place (by sifting its parents down).", "heap", "[", "pos", "]", "=", "newitem", "_siftdown_max", "(", "heap", ",", "startpos", ",", "pos", ")" ]
Maxheap variant of _siftup
[ "Maxheap", "variant", "of", "_siftup" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/heapq3.py#L574-L593
19,144
apache/spark
python/pyspark/heapq3.py
merge
def merge(iterables, key=None, reverse=False): '''Merge multiple sorted inputs into a single sorted output. Similar to sorted(itertools.chain(*iterables)) but returns a generator, does not pull the data into memory all at once, and assumes that each of the input streams is already sorted (smallest to largest). >>> list(merge([1,3,5,7], [0,2,4,8], [5,10,15,20], [], [25])) [0, 1, 2, 3, 4, 5, 5, 7, 8, 10, 15, 20, 25] If *key* is not None, applies a key function to each element to determine its sort order. >>> list(merge(['dog', 'horse'], ['cat', 'fish', 'kangaroo'], key=len)) ['dog', 'cat', 'fish', 'horse', 'kangaroo'] ''' h = [] h_append = h.append if reverse: _heapify = _heapify_max _heappop = _heappop_max _heapreplace = _heapreplace_max direction = -1 else: _heapify = heapify _heappop = heappop _heapreplace = heapreplace direction = 1 if key is None: for order, it in enumerate(map(iter, iterables)): try: h_append([next(it), order * direction, it]) except StopIteration: pass _heapify(h) while len(h) > 1: try: while True: value, order, it = s = h[0] yield value s[0] = next(it) # raises StopIteration when exhausted _heapreplace(h, s) # restore heap condition except StopIteration: _heappop(h) # remove empty iterator if h: # fast case when only a single iterator remains value, order, it = h[0] yield value for value in it: yield value return for order, it in enumerate(map(iter, iterables)): try: value = next(it) h_append([key(value), order * direction, value, it]) except StopIteration: pass _heapify(h) while len(h) > 1: try: while True: key_value, order, value, it = s = h[0] yield value value = next(it) s[0] = key(value) s[2] = value _heapreplace(h, s) except StopIteration: _heappop(h) if h: key_value, order, value, it = h[0] yield value for value in it: yield value
python
def merge(iterables, key=None, reverse=False): '''Merge multiple sorted inputs into a single sorted output. Similar to sorted(itertools.chain(*iterables)) but returns a generator, does not pull the data into memory all at once, and assumes that each of the input streams is already sorted (smallest to largest). >>> list(merge([1,3,5,7], [0,2,4,8], [5,10,15,20], [], [25])) [0, 1, 2, 3, 4, 5, 5, 7, 8, 10, 15, 20, 25] If *key* is not None, applies a key function to each element to determine its sort order. >>> list(merge(['dog', 'horse'], ['cat', 'fish', 'kangaroo'], key=len)) ['dog', 'cat', 'fish', 'horse', 'kangaroo'] ''' h = [] h_append = h.append if reverse: _heapify = _heapify_max _heappop = _heappop_max _heapreplace = _heapreplace_max direction = -1 else: _heapify = heapify _heappop = heappop _heapreplace = heapreplace direction = 1 if key is None: for order, it in enumerate(map(iter, iterables)): try: h_append([next(it), order * direction, it]) except StopIteration: pass _heapify(h) while len(h) > 1: try: while True: value, order, it = s = h[0] yield value s[0] = next(it) # raises StopIteration when exhausted _heapreplace(h, s) # restore heap condition except StopIteration: _heappop(h) # remove empty iterator if h: # fast case when only a single iterator remains value, order, it = h[0] yield value for value in it: yield value return for order, it in enumerate(map(iter, iterables)): try: value = next(it) h_append([key(value), order * direction, value, it]) except StopIteration: pass _heapify(h) while len(h) > 1: try: while True: key_value, order, value, it = s = h[0] yield value value = next(it) s[0] = key(value) s[2] = value _heapreplace(h, s) except StopIteration: _heappop(h) if h: key_value, order, value, it = h[0] yield value for value in it: yield value
[ "def", "merge", "(", "iterables", ",", "key", "=", "None", ",", "reverse", "=", "False", ")", ":", "h", "=", "[", "]", "h_append", "=", "h", ".", "append", "if", "reverse", ":", "_heapify", "=", "_heapify_max", "_heappop", "=", "_heappop_max", "_heapreplace", "=", "_heapreplace_max", "direction", "=", "-", "1", "else", ":", "_heapify", "=", "heapify", "_heappop", "=", "heappop", "_heapreplace", "=", "heapreplace", "direction", "=", "1", "if", "key", "is", "None", ":", "for", "order", ",", "it", "in", "enumerate", "(", "map", "(", "iter", ",", "iterables", ")", ")", ":", "try", ":", "h_append", "(", "[", "next", "(", "it", ")", ",", "order", "*", "direction", ",", "it", "]", ")", "except", "StopIteration", ":", "pass", "_heapify", "(", "h", ")", "while", "len", "(", "h", ")", ">", "1", ":", "try", ":", "while", "True", ":", "value", ",", "order", ",", "it", "=", "s", "=", "h", "[", "0", "]", "yield", "value", "s", "[", "0", "]", "=", "next", "(", "it", ")", "# raises StopIteration when exhausted", "_heapreplace", "(", "h", ",", "s", ")", "# restore heap condition", "except", "StopIteration", ":", "_heappop", "(", "h", ")", "# remove empty iterator", "if", "h", ":", "# fast case when only a single iterator remains", "value", ",", "order", ",", "it", "=", "h", "[", "0", "]", "yield", "value", "for", "value", "in", "it", ":", "yield", "value", "return", "for", "order", ",", "it", "in", "enumerate", "(", "map", "(", "iter", ",", "iterables", ")", ")", ":", "try", ":", "value", "=", "next", "(", "it", ")", "h_append", "(", "[", "key", "(", "value", ")", ",", "order", "*", "direction", ",", "value", ",", "it", "]", ")", "except", "StopIteration", ":", "pass", "_heapify", "(", "h", ")", "while", "len", "(", "h", ")", ">", "1", ":", "try", ":", "while", "True", ":", "key_value", ",", "order", ",", "value", ",", "it", "=", "s", "=", "h", "[", "0", "]", "yield", "value", "value", "=", "next", "(", "it", ")", "s", "[", "0", "]", "=", "key", "(", "value", ")", "s", "[", "2", "]", "=", "value", "_heapreplace", "(", "h", ",", "s", ")", "except", "StopIteration", ":", "_heappop", "(", "h", ")", "if", "h", ":", "key_value", ",", "order", ",", "value", ",", "it", "=", "h", "[", "0", "]", "yield", "value", "for", "value", "in", "it", ":", "yield", "value" ]
Merge multiple sorted inputs into a single sorted output. Similar to sorted(itertools.chain(*iterables)) but returns a generator, does not pull the data into memory all at once, and assumes that each of the input streams is already sorted (smallest to largest). >>> list(merge([1,3,5,7], [0,2,4,8], [5,10,15,20], [], [25])) [0, 1, 2, 3, 4, 5, 5, 7, 8, 10, 15, 20, 25] If *key* is not None, applies a key function to each element to determine its sort order. >>> list(merge(['dog', 'horse'], ['cat', 'fish', 'kangaroo'], key=len)) ['dog', 'cat', 'fish', 'horse', 'kangaroo']
[ "Merge", "multiple", "sorted", "inputs", "into", "a", "single", "sorted", "output", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/heapq3.py#L595-L673
19,145
apache/spark
python/pyspark/heapq3.py
nsmallest
def nsmallest(n, iterable, key=None): """Find the n smallest elements in a dataset. Equivalent to: sorted(iterable, key=key)[:n] """ # Short-cut for n==1 is to use min() if n == 1: it = iter(iterable) sentinel = object() if key is None: result = min(it, default=sentinel) else: result = min(it, default=sentinel, key=key) return [] if result is sentinel else [result] # When n>=size, it's faster to use sorted() try: size = len(iterable) except (TypeError, AttributeError): pass else: if n >= size: return sorted(iterable, key=key)[:n] # When key is none, use simpler decoration if key is None: it = iter(iterable) # put the range(n) first so that zip() doesn't # consume one too many elements from the iterator result = [(elem, i) for i, elem in zip(range(n), it)] if not result: return result _heapify_max(result) top = result[0][0] order = n _heapreplace = _heapreplace_max for elem in it: if elem < top: _heapreplace(result, (elem, order)) top = result[0][0] order += 1 result.sort() return [r[0] for r in result] # General case, slowest method it = iter(iterable) result = [(key(elem), i, elem) for i, elem in zip(range(n), it)] if not result: return result _heapify_max(result) top = result[0][0] order = n _heapreplace = _heapreplace_max for elem in it: k = key(elem) if k < top: _heapreplace(result, (k, order, elem)) top = result[0][0] order += 1 result.sort() return [r[2] for r in result]
python
def nsmallest(n, iterable, key=None): """Find the n smallest elements in a dataset. Equivalent to: sorted(iterable, key=key)[:n] """ # Short-cut for n==1 is to use min() if n == 1: it = iter(iterable) sentinel = object() if key is None: result = min(it, default=sentinel) else: result = min(it, default=sentinel, key=key) return [] if result is sentinel else [result] # When n>=size, it's faster to use sorted() try: size = len(iterable) except (TypeError, AttributeError): pass else: if n >= size: return sorted(iterable, key=key)[:n] # When key is none, use simpler decoration if key is None: it = iter(iterable) # put the range(n) first so that zip() doesn't # consume one too many elements from the iterator result = [(elem, i) for i, elem in zip(range(n), it)] if not result: return result _heapify_max(result) top = result[0][0] order = n _heapreplace = _heapreplace_max for elem in it: if elem < top: _heapreplace(result, (elem, order)) top = result[0][0] order += 1 result.sort() return [r[0] for r in result] # General case, slowest method it = iter(iterable) result = [(key(elem), i, elem) for i, elem in zip(range(n), it)] if not result: return result _heapify_max(result) top = result[0][0] order = n _heapreplace = _heapreplace_max for elem in it: k = key(elem) if k < top: _heapreplace(result, (k, order, elem)) top = result[0][0] order += 1 result.sort() return [r[2] for r in result]
[ "def", "nsmallest", "(", "n", ",", "iterable", ",", "key", "=", "None", ")", ":", "# Short-cut for n==1 is to use min()", "if", "n", "==", "1", ":", "it", "=", "iter", "(", "iterable", ")", "sentinel", "=", "object", "(", ")", "if", "key", "is", "None", ":", "result", "=", "min", "(", "it", ",", "default", "=", "sentinel", ")", "else", ":", "result", "=", "min", "(", "it", ",", "default", "=", "sentinel", ",", "key", "=", "key", ")", "return", "[", "]", "if", "result", "is", "sentinel", "else", "[", "result", "]", "# When n>=size, it's faster to use sorted()", "try", ":", "size", "=", "len", "(", "iterable", ")", "except", "(", "TypeError", ",", "AttributeError", ")", ":", "pass", "else", ":", "if", "n", ">=", "size", ":", "return", "sorted", "(", "iterable", ",", "key", "=", "key", ")", "[", ":", "n", "]", "# When key is none, use simpler decoration", "if", "key", "is", "None", ":", "it", "=", "iter", "(", "iterable", ")", "# put the range(n) first so that zip() doesn't", "# consume one too many elements from the iterator", "result", "=", "[", "(", "elem", ",", "i", ")", "for", "i", ",", "elem", "in", "zip", "(", "range", "(", "n", ")", ",", "it", ")", "]", "if", "not", "result", ":", "return", "result", "_heapify_max", "(", "result", ")", "top", "=", "result", "[", "0", "]", "[", "0", "]", "order", "=", "n", "_heapreplace", "=", "_heapreplace_max", "for", "elem", "in", "it", ":", "if", "elem", "<", "top", ":", "_heapreplace", "(", "result", ",", "(", "elem", ",", "order", ")", ")", "top", "=", "result", "[", "0", "]", "[", "0", "]", "order", "+=", "1", "result", ".", "sort", "(", ")", "return", "[", "r", "[", "0", "]", "for", "r", "in", "result", "]", "# General case, slowest method", "it", "=", "iter", "(", "iterable", ")", "result", "=", "[", "(", "key", "(", "elem", ")", ",", "i", ",", "elem", ")", "for", "i", ",", "elem", "in", "zip", "(", "range", "(", "n", ")", ",", "it", ")", "]", "if", "not", "result", ":", "return", "result", "_heapify_max", "(", "result", ")", "top", "=", "result", "[", "0", "]", "[", "0", "]", "order", "=", "n", "_heapreplace", "=", "_heapreplace_max", "for", "elem", "in", "it", ":", "k", "=", "key", "(", "elem", ")", "if", "k", "<", "top", ":", "_heapreplace", "(", "result", ",", "(", "k", ",", "order", ",", "elem", ")", ")", "top", "=", "result", "[", "0", "]", "[", "0", "]", "order", "+=", "1", "result", ".", "sort", "(", ")", "return", "[", "r", "[", "2", "]", "for", "r", "in", "result", "]" ]
Find the n smallest elements in a dataset. Equivalent to: sorted(iterable, key=key)[:n]
[ "Find", "the", "n", "smallest", "elements", "in", "a", "dataset", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/heapq3.py#L742-L803
19,146
apache/spark
python/pyspark/heapq3.py
nlargest
def nlargest(n, iterable, key=None): """Find the n largest elements in a dataset. Equivalent to: sorted(iterable, key=key, reverse=True)[:n] """ # Short-cut for n==1 is to use max() if n == 1: it = iter(iterable) sentinel = object() if key is None: result = max(it, default=sentinel) else: result = max(it, default=sentinel, key=key) return [] if result is sentinel else [result] # When n>=size, it's faster to use sorted() try: size = len(iterable) except (TypeError, AttributeError): pass else: if n >= size: return sorted(iterable, key=key, reverse=True)[:n] # When key is none, use simpler decoration if key is None: it = iter(iterable) result = [(elem, i) for i, elem in zip(range(0, -n, -1), it)] if not result: return result heapify(result) top = result[0][0] order = -n _heapreplace = heapreplace for elem in it: if top < elem: _heapreplace(result, (elem, order)) top = result[0][0] order -= 1 result.sort(reverse=True) return [r[0] for r in result] # General case, slowest method it = iter(iterable) result = [(key(elem), i, elem) for i, elem in zip(range(0, -n, -1), it)] if not result: return result heapify(result) top = result[0][0] order = -n _heapreplace = heapreplace for elem in it: k = key(elem) if top < k: _heapreplace(result, (k, order, elem)) top = result[0][0] order -= 1 result.sort(reverse=True) return [r[2] for r in result]
python
def nlargest(n, iterable, key=None): """Find the n largest elements in a dataset. Equivalent to: sorted(iterable, key=key, reverse=True)[:n] """ # Short-cut for n==1 is to use max() if n == 1: it = iter(iterable) sentinel = object() if key is None: result = max(it, default=sentinel) else: result = max(it, default=sentinel, key=key) return [] if result is sentinel else [result] # When n>=size, it's faster to use sorted() try: size = len(iterable) except (TypeError, AttributeError): pass else: if n >= size: return sorted(iterable, key=key, reverse=True)[:n] # When key is none, use simpler decoration if key is None: it = iter(iterable) result = [(elem, i) for i, elem in zip(range(0, -n, -1), it)] if not result: return result heapify(result) top = result[0][0] order = -n _heapreplace = heapreplace for elem in it: if top < elem: _heapreplace(result, (elem, order)) top = result[0][0] order -= 1 result.sort(reverse=True) return [r[0] for r in result] # General case, slowest method it = iter(iterable) result = [(key(elem), i, elem) for i, elem in zip(range(0, -n, -1), it)] if not result: return result heapify(result) top = result[0][0] order = -n _heapreplace = heapreplace for elem in it: k = key(elem) if top < k: _heapreplace(result, (k, order, elem)) top = result[0][0] order -= 1 result.sort(reverse=True) return [r[2] for r in result]
[ "def", "nlargest", "(", "n", ",", "iterable", ",", "key", "=", "None", ")", ":", "# Short-cut for n==1 is to use max()", "if", "n", "==", "1", ":", "it", "=", "iter", "(", "iterable", ")", "sentinel", "=", "object", "(", ")", "if", "key", "is", "None", ":", "result", "=", "max", "(", "it", ",", "default", "=", "sentinel", ")", "else", ":", "result", "=", "max", "(", "it", ",", "default", "=", "sentinel", ",", "key", "=", "key", ")", "return", "[", "]", "if", "result", "is", "sentinel", "else", "[", "result", "]", "# When n>=size, it's faster to use sorted()", "try", ":", "size", "=", "len", "(", "iterable", ")", "except", "(", "TypeError", ",", "AttributeError", ")", ":", "pass", "else", ":", "if", "n", ">=", "size", ":", "return", "sorted", "(", "iterable", ",", "key", "=", "key", ",", "reverse", "=", "True", ")", "[", ":", "n", "]", "# When key is none, use simpler decoration", "if", "key", "is", "None", ":", "it", "=", "iter", "(", "iterable", ")", "result", "=", "[", "(", "elem", ",", "i", ")", "for", "i", ",", "elem", "in", "zip", "(", "range", "(", "0", ",", "-", "n", ",", "-", "1", ")", ",", "it", ")", "]", "if", "not", "result", ":", "return", "result", "heapify", "(", "result", ")", "top", "=", "result", "[", "0", "]", "[", "0", "]", "order", "=", "-", "n", "_heapreplace", "=", "heapreplace", "for", "elem", "in", "it", ":", "if", "top", "<", "elem", ":", "_heapreplace", "(", "result", ",", "(", "elem", ",", "order", ")", ")", "top", "=", "result", "[", "0", "]", "[", "0", "]", "order", "-=", "1", "result", ".", "sort", "(", "reverse", "=", "True", ")", "return", "[", "r", "[", "0", "]", "for", "r", "in", "result", "]", "# General case, slowest method", "it", "=", "iter", "(", "iterable", ")", "result", "=", "[", "(", "key", "(", "elem", ")", ",", "i", ",", "elem", ")", "for", "i", ",", "elem", "in", "zip", "(", "range", "(", "0", ",", "-", "n", ",", "-", "1", ")", ",", "it", ")", "]", "if", "not", "result", ":", "return", "result", "heapify", "(", "result", ")", "top", "=", "result", "[", "0", "]", "[", "0", "]", "order", "=", "-", "n", "_heapreplace", "=", "heapreplace", "for", "elem", "in", "it", ":", "k", "=", "key", "(", "elem", ")", "if", "top", "<", "k", ":", "_heapreplace", "(", "result", ",", "(", "k", ",", "order", ",", "elem", ")", ")", "top", "=", "result", "[", "0", "]", "[", "0", "]", "order", "-=", "1", "result", ".", "sort", "(", "reverse", "=", "True", ")", "return", "[", "r", "[", "2", "]", "for", "r", "in", "result", "]" ]
Find the n largest elements in a dataset. Equivalent to: sorted(iterable, key=key, reverse=True)[:n]
[ "Find", "the", "n", "largest", "elements", "in", "a", "dataset", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/heapq3.py#L805-L864
19,147
apache/spark
python/pyspark/ml/stat.py
Correlation.corr
def corr(dataset, column, method="pearson"): """ Compute the correlation matrix with specified method using dataset. :param dataset: A Dataset or a DataFrame. :param column: The name of the column of vectors for which the correlation coefficient needs to be computed. This must be a column of the dataset, and it must contain Vector objects. :param method: String specifying the method to use for computing correlation. Supported: `pearson` (default), `spearman`. :return: A DataFrame that contains the correlation matrix of the column of vectors. This DataFrame contains a single row and a single column of name '$METHODNAME($COLUMN)'. >>> from pyspark.ml.linalg import Vectors >>> from pyspark.ml.stat import Correlation >>> dataset = [[Vectors.dense([1, 0, 0, -2])], ... [Vectors.dense([4, 5, 0, 3])], ... [Vectors.dense([6, 7, 0, 8])], ... [Vectors.dense([9, 0, 0, 1])]] >>> dataset = spark.createDataFrame(dataset, ['features']) >>> pearsonCorr = Correlation.corr(dataset, 'features', 'pearson').collect()[0][0] >>> print(str(pearsonCorr).replace('nan', 'NaN')) DenseMatrix([[ 1. , 0.0556..., NaN, 0.4004...], [ 0.0556..., 1. , NaN, 0.9135...], [ NaN, NaN, 1. , NaN], [ 0.4004..., 0.9135..., NaN, 1. ]]) >>> spearmanCorr = Correlation.corr(dataset, 'features', method='spearman').collect()[0][0] >>> print(str(spearmanCorr).replace('nan', 'NaN')) DenseMatrix([[ 1. , 0.1054..., NaN, 0.4 ], [ 0.1054..., 1. , NaN, 0.9486... ], [ NaN, NaN, 1. , NaN], [ 0.4 , 0.9486... , NaN, 1. ]]) """ sc = SparkContext._active_spark_context javaCorrObj = _jvm().org.apache.spark.ml.stat.Correlation args = [_py2java(sc, arg) for arg in (dataset, column, method)] return _java2py(sc, javaCorrObj.corr(*args))
python
def corr(dataset, column, method="pearson"): """ Compute the correlation matrix with specified method using dataset. :param dataset: A Dataset or a DataFrame. :param column: The name of the column of vectors for which the correlation coefficient needs to be computed. This must be a column of the dataset, and it must contain Vector objects. :param method: String specifying the method to use for computing correlation. Supported: `pearson` (default), `spearman`. :return: A DataFrame that contains the correlation matrix of the column of vectors. This DataFrame contains a single row and a single column of name '$METHODNAME($COLUMN)'. >>> from pyspark.ml.linalg import Vectors >>> from pyspark.ml.stat import Correlation >>> dataset = [[Vectors.dense([1, 0, 0, -2])], ... [Vectors.dense([4, 5, 0, 3])], ... [Vectors.dense([6, 7, 0, 8])], ... [Vectors.dense([9, 0, 0, 1])]] >>> dataset = spark.createDataFrame(dataset, ['features']) >>> pearsonCorr = Correlation.corr(dataset, 'features', 'pearson').collect()[0][0] >>> print(str(pearsonCorr).replace('nan', 'NaN')) DenseMatrix([[ 1. , 0.0556..., NaN, 0.4004...], [ 0.0556..., 1. , NaN, 0.9135...], [ NaN, NaN, 1. , NaN], [ 0.4004..., 0.9135..., NaN, 1. ]]) >>> spearmanCorr = Correlation.corr(dataset, 'features', method='spearman').collect()[0][0] >>> print(str(spearmanCorr).replace('nan', 'NaN')) DenseMatrix([[ 1. , 0.1054..., NaN, 0.4 ], [ 0.1054..., 1. , NaN, 0.9486... ], [ NaN, NaN, 1. , NaN], [ 0.4 , 0.9486... , NaN, 1. ]]) """ sc = SparkContext._active_spark_context javaCorrObj = _jvm().org.apache.spark.ml.stat.Correlation args = [_py2java(sc, arg) for arg in (dataset, column, method)] return _java2py(sc, javaCorrObj.corr(*args))
[ "def", "corr", "(", "dataset", ",", "column", ",", "method", "=", "\"pearson\"", ")", ":", "sc", "=", "SparkContext", ".", "_active_spark_context", "javaCorrObj", "=", "_jvm", "(", ")", ".", "org", ".", "apache", ".", "spark", ".", "ml", ".", "stat", ".", "Correlation", "args", "=", "[", "_py2java", "(", "sc", ",", "arg", ")", "for", "arg", "in", "(", "dataset", ",", "column", ",", "method", ")", "]", "return", "_java2py", "(", "sc", ",", "javaCorrObj", ".", "corr", "(", "*", "args", ")", ")" ]
Compute the correlation matrix with specified method using dataset. :param dataset: A Dataset or a DataFrame. :param column: The name of the column of vectors for which the correlation coefficient needs to be computed. This must be a column of the dataset, and it must contain Vector objects. :param method: String specifying the method to use for computing correlation. Supported: `pearson` (default), `spearman`. :return: A DataFrame that contains the correlation matrix of the column of vectors. This DataFrame contains a single row and a single column of name '$METHODNAME($COLUMN)'. >>> from pyspark.ml.linalg import Vectors >>> from pyspark.ml.stat import Correlation >>> dataset = [[Vectors.dense([1, 0, 0, -2])], ... [Vectors.dense([4, 5, 0, 3])], ... [Vectors.dense([6, 7, 0, 8])], ... [Vectors.dense([9, 0, 0, 1])]] >>> dataset = spark.createDataFrame(dataset, ['features']) >>> pearsonCorr = Correlation.corr(dataset, 'features', 'pearson').collect()[0][0] >>> print(str(pearsonCorr).replace('nan', 'NaN')) DenseMatrix([[ 1. , 0.0556..., NaN, 0.4004...], [ 0.0556..., 1. , NaN, 0.9135...], [ NaN, NaN, 1. , NaN], [ 0.4004..., 0.9135..., NaN, 1. ]]) >>> spearmanCorr = Correlation.corr(dataset, 'features', method='spearman').collect()[0][0] >>> print(str(spearmanCorr).replace('nan', 'NaN')) DenseMatrix([[ 1. , 0.1054..., NaN, 0.4 ], [ 0.1054..., 1. , NaN, 0.9486... ], [ NaN, NaN, 1. , NaN], [ 0.4 , 0.9486... , NaN, 1. ]])
[ "Compute", "the", "correlation", "matrix", "with", "specified", "method", "using", "dataset", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/stat.py#L95-L136
19,148
apache/spark
python/pyspark/ml/stat.py
Summarizer.metrics
def metrics(*metrics): """ Given a list of metrics, provides a builder that it turns computes metrics from a column. See the documentation of [[Summarizer]] for an example. The following metrics are accepted (case sensitive): - mean: a vector that contains the coefficient-wise mean. - variance: a vector tha contains the coefficient-wise variance. - count: the count of all vectors seen. - numNonzeros: a vector with the number of non-zeros for each coefficients - max: the maximum for each coefficient. - min: the minimum for each coefficient. - normL2: the Euclidean norm for each coefficient. - normL1: the L1 norm of each coefficient (sum of the absolute values). :param metrics: metrics that can be provided. :return: an object of :py:class:`pyspark.ml.stat.SummaryBuilder` Note: Currently, the performance of this interface is about 2x~3x slower then using the RDD interface. """ sc = SparkContext._active_spark_context js = JavaWrapper._new_java_obj("org.apache.spark.ml.stat.Summarizer.metrics", _to_seq(sc, metrics)) return SummaryBuilder(js)
python
def metrics(*metrics): """ Given a list of metrics, provides a builder that it turns computes metrics from a column. See the documentation of [[Summarizer]] for an example. The following metrics are accepted (case sensitive): - mean: a vector that contains the coefficient-wise mean. - variance: a vector tha contains the coefficient-wise variance. - count: the count of all vectors seen. - numNonzeros: a vector with the number of non-zeros for each coefficients - max: the maximum for each coefficient. - min: the minimum for each coefficient. - normL2: the Euclidean norm for each coefficient. - normL1: the L1 norm of each coefficient (sum of the absolute values). :param metrics: metrics that can be provided. :return: an object of :py:class:`pyspark.ml.stat.SummaryBuilder` Note: Currently, the performance of this interface is about 2x~3x slower then using the RDD interface. """ sc = SparkContext._active_spark_context js = JavaWrapper._new_java_obj("org.apache.spark.ml.stat.Summarizer.metrics", _to_seq(sc, metrics)) return SummaryBuilder(js)
[ "def", "metrics", "(", "*", "metrics", ")", ":", "sc", "=", "SparkContext", ".", "_active_spark_context", "js", "=", "JavaWrapper", ".", "_new_java_obj", "(", "\"org.apache.spark.ml.stat.Summarizer.metrics\"", ",", "_to_seq", "(", "sc", ",", "metrics", ")", ")", "return", "SummaryBuilder", "(", "js", ")" ]
Given a list of metrics, provides a builder that it turns computes metrics from a column. See the documentation of [[Summarizer]] for an example. The following metrics are accepted (case sensitive): - mean: a vector that contains the coefficient-wise mean. - variance: a vector tha contains the coefficient-wise variance. - count: the count of all vectors seen. - numNonzeros: a vector with the number of non-zeros for each coefficients - max: the maximum for each coefficient. - min: the minimum for each coefficient. - normL2: the Euclidean norm for each coefficient. - normL1: the L1 norm of each coefficient (sum of the absolute values). :param metrics: metrics that can be provided. :return: an object of :py:class:`pyspark.ml.stat.SummaryBuilder` Note: Currently, the performance of this interface is about 2x~3x slower then using the RDD interface.
[ "Given", "a", "list", "of", "metrics", "provides", "a", "builder", "that", "it", "turns", "computes", "metrics", "from", "a", "column", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/stat.py#L326-L353
19,149
apache/spark
python/pyspark/ml/stat.py
SummaryBuilder.summary
def summary(self, featuresCol, weightCol=None): """ Returns an aggregate object that contains the summary of the column with the requested metrics. :param featuresCol: a column that contains features Vector object. :param weightCol: a column that contains weight value. Default weight is 1.0. :return: an aggregate column that contains the statistics. The exact content of this structure is determined during the creation of the builder. """ featuresCol, weightCol = Summarizer._check_param(featuresCol, weightCol) return Column(self._java_obj.summary(featuresCol._jc, weightCol._jc))
python
def summary(self, featuresCol, weightCol=None): """ Returns an aggregate object that contains the summary of the column with the requested metrics. :param featuresCol: a column that contains features Vector object. :param weightCol: a column that contains weight value. Default weight is 1.0. :return: an aggregate column that contains the statistics. The exact content of this structure is determined during the creation of the builder. """ featuresCol, weightCol = Summarizer._check_param(featuresCol, weightCol) return Column(self._java_obj.summary(featuresCol._jc, weightCol._jc))
[ "def", "summary", "(", "self", ",", "featuresCol", ",", "weightCol", "=", "None", ")", ":", "featuresCol", ",", "weightCol", "=", "Summarizer", ".", "_check_param", "(", "featuresCol", ",", "weightCol", ")", "return", "Column", "(", "self", ".", "_java_obj", ".", "summary", "(", "featuresCol", ".", "_jc", ",", "weightCol", ".", "_jc", ")", ")" ]
Returns an aggregate object that contains the summary of the column with the requested metrics. :param featuresCol: a column that contains features Vector object. :param weightCol: a column that contains weight value. Default weight is 1.0. :return: an aggregate column that contains the statistics. The exact content of this structure is determined during the creation of the builder.
[ "Returns", "an", "aggregate", "object", "that", "contains", "the", "summary", "of", "the", "column", "with", "the", "requested", "metrics", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/stat.py#L372-L386
19,150
apache/spark
python/pyspark/ml/tuning.py
ParamGridBuilder.build
def build(self): """ Builds and returns all combinations of parameters specified by the param grid. """ keys = self._param_grid.keys() grid_values = self._param_grid.values() def to_key_value_pairs(keys, values): return [(key, key.typeConverter(value)) for key, value in zip(keys, values)] return [dict(to_key_value_pairs(keys, prod)) for prod in itertools.product(*grid_values)]
python
def build(self): """ Builds and returns all combinations of parameters specified by the param grid. """ keys = self._param_grid.keys() grid_values = self._param_grid.values() def to_key_value_pairs(keys, values): return [(key, key.typeConverter(value)) for key, value in zip(keys, values)] return [dict(to_key_value_pairs(keys, prod)) for prod in itertools.product(*grid_values)]
[ "def", "build", "(", "self", ")", ":", "keys", "=", "self", ".", "_param_grid", ".", "keys", "(", ")", "grid_values", "=", "self", ".", "_param_grid", ".", "values", "(", ")", "def", "to_key_value_pairs", "(", "keys", ",", "values", ")", ":", "return", "[", "(", "key", ",", "key", ".", "typeConverter", "(", "value", ")", ")", "for", "key", ",", "value", "in", "zip", "(", "keys", ",", "values", ")", "]", "return", "[", "dict", "(", "to_key_value_pairs", "(", "keys", ",", "prod", ")", ")", "for", "prod", "in", "itertools", ".", "product", "(", "*", "grid_values", ")", "]" ]
Builds and returns all combinations of parameters specified by the param grid.
[ "Builds", "and", "returns", "all", "combinations", "of", "parameters", "specified", "by", "the", "param", "grid", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/tuning.py#L111-L122
19,151
apache/spark
python/pyspark/ml/tuning.py
ValidatorParams._from_java_impl
def _from_java_impl(cls, java_stage): """ Return Python estimator, estimatorParamMaps, and evaluator from a Java ValidatorParams. """ # Load information from java_stage to the instance. estimator = JavaParams._from_java(java_stage.getEstimator()) evaluator = JavaParams._from_java(java_stage.getEvaluator()) epms = [estimator._transfer_param_map_from_java(epm) for epm in java_stage.getEstimatorParamMaps()] return estimator, epms, evaluator
python
def _from_java_impl(cls, java_stage): """ Return Python estimator, estimatorParamMaps, and evaluator from a Java ValidatorParams. """ # Load information from java_stage to the instance. estimator = JavaParams._from_java(java_stage.getEstimator()) evaluator = JavaParams._from_java(java_stage.getEvaluator()) epms = [estimator._transfer_param_map_from_java(epm) for epm in java_stage.getEstimatorParamMaps()] return estimator, epms, evaluator
[ "def", "_from_java_impl", "(", "cls", ",", "java_stage", ")", ":", "# Load information from java_stage to the instance.", "estimator", "=", "JavaParams", ".", "_from_java", "(", "java_stage", ".", "getEstimator", "(", ")", ")", "evaluator", "=", "JavaParams", ".", "_from_java", "(", "java_stage", ".", "getEvaluator", "(", ")", ")", "epms", "=", "[", "estimator", ".", "_transfer_param_map_from_java", "(", "epm", ")", "for", "epm", "in", "java_stage", ".", "getEstimatorParamMaps", "(", ")", "]", "return", "estimator", ",", "epms", ",", "evaluator" ]
Return Python estimator, estimatorParamMaps, and evaluator from a Java ValidatorParams.
[ "Return", "Python", "estimator", "estimatorParamMaps", "and", "evaluator", "from", "a", "Java", "ValidatorParams", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/tuning.py#L173-L183
19,152
apache/spark
python/pyspark/ml/tuning.py
ValidatorParams._to_java_impl
def _to_java_impl(self): """ Return Java estimator, estimatorParamMaps, and evaluator from this Python instance. """ gateway = SparkContext._gateway cls = SparkContext._jvm.org.apache.spark.ml.param.ParamMap java_epms = gateway.new_array(cls, len(self.getEstimatorParamMaps())) for idx, epm in enumerate(self.getEstimatorParamMaps()): java_epms[idx] = self.getEstimator()._transfer_param_map_to_java(epm) java_estimator = self.getEstimator()._to_java() java_evaluator = self.getEvaluator()._to_java() return java_estimator, java_epms, java_evaluator
python
def _to_java_impl(self): """ Return Java estimator, estimatorParamMaps, and evaluator from this Python instance. """ gateway = SparkContext._gateway cls = SparkContext._jvm.org.apache.spark.ml.param.ParamMap java_epms = gateway.new_array(cls, len(self.getEstimatorParamMaps())) for idx, epm in enumerate(self.getEstimatorParamMaps()): java_epms[idx] = self.getEstimator()._transfer_param_map_to_java(epm) java_estimator = self.getEstimator()._to_java() java_evaluator = self.getEvaluator()._to_java() return java_estimator, java_epms, java_evaluator
[ "def", "_to_java_impl", "(", "self", ")", ":", "gateway", "=", "SparkContext", ".", "_gateway", "cls", "=", "SparkContext", ".", "_jvm", ".", "org", ".", "apache", ".", "spark", ".", "ml", ".", "param", ".", "ParamMap", "java_epms", "=", "gateway", ".", "new_array", "(", "cls", ",", "len", "(", "self", ".", "getEstimatorParamMaps", "(", ")", ")", ")", "for", "idx", ",", "epm", "in", "enumerate", "(", "self", ".", "getEstimatorParamMaps", "(", ")", ")", ":", "java_epms", "[", "idx", "]", "=", "self", ".", "getEstimator", "(", ")", ".", "_transfer_param_map_to_java", "(", "epm", ")", "java_estimator", "=", "self", ".", "getEstimator", "(", ")", ".", "_to_java", "(", ")", "java_evaluator", "=", "self", ".", "getEvaluator", "(", ")", ".", "_to_java", "(", ")", "return", "java_estimator", ",", "java_epms", ",", "java_evaluator" ]
Return Java estimator, estimatorParamMaps, and evaluator from this Python instance.
[ "Return", "Java", "estimator", "estimatorParamMaps", "and", "evaluator", "from", "this", "Python", "instance", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/tuning.py#L185-L199
19,153
apache/spark
python/pyspark/ml/tuning.py
CrossValidator._from_java
def _from_java(cls, java_stage): """ Given a Java CrossValidator, create and return a Python wrapper of it. Used for ML persistence. """ estimator, epms, evaluator = super(CrossValidator, cls)._from_java_impl(java_stage) numFolds = java_stage.getNumFolds() seed = java_stage.getSeed() parallelism = java_stage.getParallelism() collectSubModels = java_stage.getCollectSubModels() # Create a new instance of this stage. py_stage = cls(estimator=estimator, estimatorParamMaps=epms, evaluator=evaluator, numFolds=numFolds, seed=seed, parallelism=parallelism, collectSubModels=collectSubModels) py_stage._resetUid(java_stage.uid()) return py_stage
python
def _from_java(cls, java_stage): """ Given a Java CrossValidator, create and return a Python wrapper of it. Used for ML persistence. """ estimator, epms, evaluator = super(CrossValidator, cls)._from_java_impl(java_stage) numFolds = java_stage.getNumFolds() seed = java_stage.getSeed() parallelism = java_stage.getParallelism() collectSubModels = java_stage.getCollectSubModels() # Create a new instance of this stage. py_stage = cls(estimator=estimator, estimatorParamMaps=epms, evaluator=evaluator, numFolds=numFolds, seed=seed, parallelism=parallelism, collectSubModels=collectSubModels) py_stage._resetUid(java_stage.uid()) return py_stage
[ "def", "_from_java", "(", "cls", ",", "java_stage", ")", ":", "estimator", ",", "epms", ",", "evaluator", "=", "super", "(", "CrossValidator", ",", "cls", ")", ".", "_from_java_impl", "(", "java_stage", ")", "numFolds", "=", "java_stage", ".", "getNumFolds", "(", ")", "seed", "=", "java_stage", ".", "getSeed", "(", ")", "parallelism", "=", "java_stage", ".", "getParallelism", "(", ")", "collectSubModels", "=", "java_stage", ".", "getCollectSubModels", "(", ")", "# Create a new instance of this stage.", "py_stage", "=", "cls", "(", "estimator", "=", "estimator", ",", "estimatorParamMaps", "=", "epms", ",", "evaluator", "=", "evaluator", ",", "numFolds", "=", "numFolds", ",", "seed", "=", "seed", ",", "parallelism", "=", "parallelism", ",", "collectSubModels", "=", "collectSubModels", ")", "py_stage", ".", "_resetUid", "(", "java_stage", ".", "uid", "(", ")", ")", "return", "py_stage" ]
Given a Java CrossValidator, create and return a Python wrapper of it. Used for ML persistence.
[ "Given", "a", "Java", "CrossValidator", "create", "and", "return", "a", "Python", "wrapper", "of", "it", ".", "Used", "for", "ML", "persistence", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/tuning.py#L351-L367
19,154
apache/spark
python/pyspark/ml/tuning.py
CrossValidator._to_java
def _to_java(self): """ Transfer this instance to a Java CrossValidator. Used for ML persistence. :return: Java object equivalent to this instance. """ estimator, epms, evaluator = super(CrossValidator, self)._to_java_impl() _java_obj = JavaParams._new_java_obj("org.apache.spark.ml.tuning.CrossValidator", self.uid) _java_obj.setEstimatorParamMaps(epms) _java_obj.setEvaluator(evaluator) _java_obj.setEstimator(estimator) _java_obj.setSeed(self.getSeed()) _java_obj.setNumFolds(self.getNumFolds()) _java_obj.setParallelism(self.getParallelism()) _java_obj.setCollectSubModels(self.getCollectSubModels()) return _java_obj
python
def _to_java(self): """ Transfer this instance to a Java CrossValidator. Used for ML persistence. :return: Java object equivalent to this instance. """ estimator, epms, evaluator = super(CrossValidator, self)._to_java_impl() _java_obj = JavaParams._new_java_obj("org.apache.spark.ml.tuning.CrossValidator", self.uid) _java_obj.setEstimatorParamMaps(epms) _java_obj.setEvaluator(evaluator) _java_obj.setEstimator(estimator) _java_obj.setSeed(self.getSeed()) _java_obj.setNumFolds(self.getNumFolds()) _java_obj.setParallelism(self.getParallelism()) _java_obj.setCollectSubModels(self.getCollectSubModels()) return _java_obj
[ "def", "_to_java", "(", "self", ")", ":", "estimator", ",", "epms", ",", "evaluator", "=", "super", "(", "CrossValidator", ",", "self", ")", ".", "_to_java_impl", "(", ")", "_java_obj", "=", "JavaParams", ".", "_new_java_obj", "(", "\"org.apache.spark.ml.tuning.CrossValidator\"", ",", "self", ".", "uid", ")", "_java_obj", ".", "setEstimatorParamMaps", "(", "epms", ")", "_java_obj", ".", "setEvaluator", "(", "evaluator", ")", "_java_obj", ".", "setEstimator", "(", "estimator", ")", "_java_obj", ".", "setSeed", "(", "self", ".", "getSeed", "(", ")", ")", "_java_obj", ".", "setNumFolds", "(", "self", ".", "getNumFolds", "(", ")", ")", "_java_obj", ".", "setParallelism", "(", "self", ".", "getParallelism", "(", ")", ")", "_java_obj", ".", "setCollectSubModels", "(", "self", ".", "getCollectSubModels", "(", ")", ")", "return", "_java_obj" ]
Transfer this instance to a Java CrossValidator. Used for ML persistence. :return: Java object equivalent to this instance.
[ "Transfer", "this", "instance", "to", "a", "Java", "CrossValidator", ".", "Used", "for", "ML", "persistence", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/tuning.py#L369-L387
19,155
apache/spark
python/pyspark/ml/tuning.py
CrossValidatorModel.copy
def copy(self, extra=None): """ Creates a copy of this instance with a randomly generated uid and some extra params. This copies the underlying bestModel, creates a deep copy of the embedded paramMap, and copies the embedded and extra parameters over. It does not copy the extra Params into the subModels. :param extra: Extra parameters to copy to the new instance :return: Copy of this instance """ if extra is None: extra = dict() bestModel = self.bestModel.copy(extra) avgMetrics = self.avgMetrics subModels = self.subModels return CrossValidatorModel(bestModel, avgMetrics, subModels)
python
def copy(self, extra=None): """ Creates a copy of this instance with a randomly generated uid and some extra params. This copies the underlying bestModel, creates a deep copy of the embedded paramMap, and copies the embedded and extra parameters over. It does not copy the extra Params into the subModels. :param extra: Extra parameters to copy to the new instance :return: Copy of this instance """ if extra is None: extra = dict() bestModel = self.bestModel.copy(extra) avgMetrics = self.avgMetrics subModels = self.subModels return CrossValidatorModel(bestModel, avgMetrics, subModels)
[ "def", "copy", "(", "self", ",", "extra", "=", "None", ")", ":", "if", "extra", "is", "None", ":", "extra", "=", "dict", "(", ")", "bestModel", "=", "self", ".", "bestModel", ".", "copy", "(", "extra", ")", "avgMetrics", "=", "self", ".", "avgMetrics", "subModels", "=", "self", ".", "subModels", "return", "CrossValidatorModel", "(", "bestModel", ",", "avgMetrics", ",", "subModels", ")" ]
Creates a copy of this instance with a randomly generated uid and some extra params. This copies the underlying bestModel, creates a deep copy of the embedded paramMap, and copies the embedded and extra parameters over. It does not copy the extra Params into the subModels. :param extra: Extra parameters to copy to the new instance :return: Copy of this instance
[ "Creates", "a", "copy", "of", "this", "instance", "with", "a", "randomly", "generated", "uid", "and", "some", "extra", "params", ".", "This", "copies", "the", "underlying", "bestModel", "creates", "a", "deep", "copy", "of", "the", "embedded", "paramMap", "and", "copies", "the", "embedded", "and", "extra", "parameters", "over", ".", "It", "does", "not", "copy", "the", "extra", "Params", "into", "the", "subModels", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/tuning.py#L414-L430
19,156
apache/spark
python/pyspark/ml/tuning.py
TrainValidationSplit.copy
def copy(self, extra=None): """ Creates a copy of this instance with a randomly generated uid and some extra params. This copies creates a deep copy of the embedded paramMap, and copies the embedded and extra parameters over. :param extra: Extra parameters to copy to the new instance :return: Copy of this instance """ if extra is None: extra = dict() newTVS = Params.copy(self, extra) if self.isSet(self.estimator): newTVS.setEstimator(self.getEstimator().copy(extra)) # estimatorParamMaps remain the same if self.isSet(self.evaluator): newTVS.setEvaluator(self.getEvaluator().copy(extra)) return newTVS
python
def copy(self, extra=None): """ Creates a copy of this instance with a randomly generated uid and some extra params. This copies creates a deep copy of the embedded paramMap, and copies the embedded and extra parameters over. :param extra: Extra parameters to copy to the new instance :return: Copy of this instance """ if extra is None: extra = dict() newTVS = Params.copy(self, extra) if self.isSet(self.estimator): newTVS.setEstimator(self.getEstimator().copy(extra)) # estimatorParamMaps remain the same if self.isSet(self.evaluator): newTVS.setEvaluator(self.getEvaluator().copy(extra)) return newTVS
[ "def", "copy", "(", "self", ",", "extra", "=", "None", ")", ":", "if", "extra", "is", "None", ":", "extra", "=", "dict", "(", ")", "newTVS", "=", "Params", ".", "copy", "(", "self", ",", "extra", ")", "if", "self", ".", "isSet", "(", "self", ".", "estimator", ")", ":", "newTVS", ".", "setEstimator", "(", "self", ".", "getEstimator", "(", ")", ".", "copy", "(", "extra", ")", ")", "# estimatorParamMaps remain the same", "if", "self", ".", "isSet", "(", "self", ".", "evaluator", ")", ":", "newTVS", ".", "setEvaluator", "(", "self", ".", "getEvaluator", "(", ")", ".", "copy", "(", "extra", ")", ")", "return", "newTVS" ]
Creates a copy of this instance with a randomly generated uid and some extra params. This copies creates a deep copy of the embedded paramMap, and copies the embedded and extra parameters over. :param extra: Extra parameters to copy to the new instance :return: Copy of this instance
[ "Creates", "a", "copy", "of", "this", "instance", "with", "a", "randomly", "generated", "uid", "and", "some", "extra", "params", ".", "This", "copies", "creates", "a", "deep", "copy", "of", "the", "embedded", "paramMap", "and", "copies", "the", "embedded", "and", "extra", "parameters", "over", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/tuning.py#L598-L615
19,157
apache/spark
python/pyspark/ml/tuning.py
TrainValidationSplit._from_java
def _from_java(cls, java_stage): """ Given a Java TrainValidationSplit, create and return a Python wrapper of it. Used for ML persistence. """ estimator, epms, evaluator = super(TrainValidationSplit, cls)._from_java_impl(java_stage) trainRatio = java_stage.getTrainRatio() seed = java_stage.getSeed() parallelism = java_stage.getParallelism() collectSubModels = java_stage.getCollectSubModels() # Create a new instance of this stage. py_stage = cls(estimator=estimator, estimatorParamMaps=epms, evaluator=evaluator, trainRatio=trainRatio, seed=seed, parallelism=parallelism, collectSubModels=collectSubModels) py_stage._resetUid(java_stage.uid()) return py_stage
python
def _from_java(cls, java_stage): """ Given a Java TrainValidationSplit, create and return a Python wrapper of it. Used for ML persistence. """ estimator, epms, evaluator = super(TrainValidationSplit, cls)._from_java_impl(java_stage) trainRatio = java_stage.getTrainRatio() seed = java_stage.getSeed() parallelism = java_stage.getParallelism() collectSubModels = java_stage.getCollectSubModels() # Create a new instance of this stage. py_stage = cls(estimator=estimator, estimatorParamMaps=epms, evaluator=evaluator, trainRatio=trainRatio, seed=seed, parallelism=parallelism, collectSubModels=collectSubModels) py_stage._resetUid(java_stage.uid()) return py_stage
[ "def", "_from_java", "(", "cls", ",", "java_stage", ")", ":", "estimator", ",", "epms", ",", "evaluator", "=", "super", "(", "TrainValidationSplit", ",", "cls", ")", ".", "_from_java_impl", "(", "java_stage", ")", "trainRatio", "=", "java_stage", ".", "getTrainRatio", "(", ")", "seed", "=", "java_stage", ".", "getSeed", "(", ")", "parallelism", "=", "java_stage", ".", "getParallelism", "(", ")", "collectSubModels", "=", "java_stage", ".", "getCollectSubModels", "(", ")", "# Create a new instance of this stage.", "py_stage", "=", "cls", "(", "estimator", "=", "estimator", ",", "estimatorParamMaps", "=", "epms", ",", "evaluator", "=", "evaluator", ",", "trainRatio", "=", "trainRatio", ",", "seed", "=", "seed", ",", "parallelism", "=", "parallelism", ",", "collectSubModels", "=", "collectSubModels", ")", "py_stage", ".", "_resetUid", "(", "java_stage", ".", "uid", "(", ")", ")", "return", "py_stage" ]
Given a Java TrainValidationSplit, create and return a Python wrapper of it. Used for ML persistence.
[ "Given", "a", "Java", "TrainValidationSplit", "create", "and", "return", "a", "Python", "wrapper", "of", "it", ".", "Used", "for", "ML", "persistence", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/tuning.py#L629-L645
19,158
apache/spark
python/pyspark/ml/tuning.py
TrainValidationSplitModel.copy
def copy(self, extra=None): """ Creates a copy of this instance with a randomly generated uid and some extra params. This copies the underlying bestModel, creates a deep copy of the embedded paramMap, and copies the embedded and extra parameters over. And, this creates a shallow copy of the validationMetrics. It does not copy the extra Params into the subModels. :param extra: Extra parameters to copy to the new instance :return: Copy of this instance """ if extra is None: extra = dict() bestModel = self.bestModel.copy(extra) validationMetrics = list(self.validationMetrics) subModels = self.subModels return TrainValidationSplitModel(bestModel, validationMetrics, subModels)
python
def copy(self, extra=None): """ Creates a copy of this instance with a randomly generated uid and some extra params. This copies the underlying bestModel, creates a deep copy of the embedded paramMap, and copies the embedded and extra parameters over. And, this creates a shallow copy of the validationMetrics. It does not copy the extra Params into the subModels. :param extra: Extra parameters to copy to the new instance :return: Copy of this instance """ if extra is None: extra = dict() bestModel = self.bestModel.copy(extra) validationMetrics = list(self.validationMetrics) subModels = self.subModels return TrainValidationSplitModel(bestModel, validationMetrics, subModels)
[ "def", "copy", "(", "self", ",", "extra", "=", "None", ")", ":", "if", "extra", "is", "None", ":", "extra", "=", "dict", "(", ")", "bestModel", "=", "self", ".", "bestModel", ".", "copy", "(", "extra", ")", "validationMetrics", "=", "list", "(", "self", ".", "validationMetrics", ")", "subModels", "=", "self", ".", "subModels", "return", "TrainValidationSplitModel", "(", "bestModel", ",", "validationMetrics", ",", "subModels", ")" ]
Creates a copy of this instance with a randomly generated uid and some extra params. This copies the underlying bestModel, creates a deep copy of the embedded paramMap, and copies the embedded and extra parameters over. And, this creates a shallow copy of the validationMetrics. It does not copy the extra Params into the subModels. :param extra: Extra parameters to copy to the new instance :return: Copy of this instance
[ "Creates", "a", "copy", "of", "this", "instance", "with", "a", "randomly", "generated", "uid", "and", "some", "extra", "params", ".", "This", "copies", "the", "underlying", "bestModel", "creates", "a", "deep", "copy", "of", "the", "embedded", "paramMap", "and", "copies", "the", "embedded", "and", "extra", "parameters", "over", ".", "And", "this", "creates", "a", "shallow", "copy", "of", "the", "validationMetrics", ".", "It", "does", "not", "copy", "the", "extra", "Params", "into", "the", "subModels", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/tuning.py#L689-L706
19,159
apache/spark
python/pyspark/ml/tuning.py
TrainValidationSplitModel._from_java
def _from_java(cls, java_stage): """ Given a Java TrainValidationSplitModel, create and return a Python wrapper of it. Used for ML persistence. """ # Load information from java_stage to the instance. bestModel = JavaParams._from_java(java_stage.bestModel()) estimator, epms, evaluator = super(TrainValidationSplitModel, cls)._from_java_impl(java_stage) # Create a new instance of this stage. py_stage = cls(bestModel=bestModel).setEstimator(estimator) py_stage = py_stage.setEstimatorParamMaps(epms).setEvaluator(evaluator) if java_stage.hasSubModels(): py_stage.subModels = [JavaParams._from_java(sub_model) for sub_model in java_stage.subModels()] py_stage._resetUid(java_stage.uid()) return py_stage
python
def _from_java(cls, java_stage): """ Given a Java TrainValidationSplitModel, create and return a Python wrapper of it. Used for ML persistence. """ # Load information from java_stage to the instance. bestModel = JavaParams._from_java(java_stage.bestModel()) estimator, epms, evaluator = super(TrainValidationSplitModel, cls)._from_java_impl(java_stage) # Create a new instance of this stage. py_stage = cls(bestModel=bestModel).setEstimator(estimator) py_stage = py_stage.setEstimatorParamMaps(epms).setEvaluator(evaluator) if java_stage.hasSubModels(): py_stage.subModels = [JavaParams._from_java(sub_model) for sub_model in java_stage.subModels()] py_stage._resetUid(java_stage.uid()) return py_stage
[ "def", "_from_java", "(", "cls", ",", "java_stage", ")", ":", "# Load information from java_stage to the instance.", "bestModel", "=", "JavaParams", ".", "_from_java", "(", "java_stage", ".", "bestModel", "(", ")", ")", "estimator", ",", "epms", ",", "evaluator", "=", "super", "(", "TrainValidationSplitModel", ",", "cls", ")", ".", "_from_java_impl", "(", "java_stage", ")", "# Create a new instance of this stage.", "py_stage", "=", "cls", "(", "bestModel", "=", "bestModel", ")", ".", "setEstimator", "(", "estimator", ")", "py_stage", "=", "py_stage", ".", "setEstimatorParamMaps", "(", "epms", ")", ".", "setEvaluator", "(", "evaluator", ")", "if", "java_stage", ".", "hasSubModels", "(", ")", ":", "py_stage", ".", "subModels", "=", "[", "JavaParams", ".", "_from_java", "(", "sub_model", ")", "for", "sub_model", "in", "java_stage", ".", "subModels", "(", ")", "]", "py_stage", ".", "_resetUid", "(", "java_stage", ".", "uid", "(", ")", ")", "return", "py_stage" ]
Given a Java TrainValidationSplitModel, create and return a Python wrapper of it. Used for ML persistence.
[ "Given", "a", "Java", "TrainValidationSplitModel", "create", "and", "return", "a", "Python", "wrapper", "of", "it", ".", "Used", "for", "ML", "persistence", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/tuning.py#L720-L739
19,160
apache/spark
python/pyspark/sql/conf.py
RuntimeConfig.get
def get(self, key, default=_NoValue): """Returns the value of Spark runtime configuration property for the given key, assuming it is set. """ self._checkType(key, "key") if default is _NoValue: return self._jconf.get(key) else: if default is not None: self._checkType(default, "default") return self._jconf.get(key, default)
python
def get(self, key, default=_NoValue): """Returns the value of Spark runtime configuration property for the given key, assuming it is set. """ self._checkType(key, "key") if default is _NoValue: return self._jconf.get(key) else: if default is not None: self._checkType(default, "default") return self._jconf.get(key, default)
[ "def", "get", "(", "self", ",", "key", ",", "default", "=", "_NoValue", ")", ":", "self", ".", "_checkType", "(", "key", ",", "\"key\"", ")", "if", "default", "is", "_NoValue", ":", "return", "self", ".", "_jconf", ".", "get", "(", "key", ")", "else", ":", "if", "default", "is", "not", "None", ":", "self", ".", "_checkType", "(", "default", ",", "\"default\"", ")", "return", "self", ".", "_jconf", ".", "get", "(", "key", ",", "default", ")" ]
Returns the value of Spark runtime configuration property for the given key, assuming it is set.
[ "Returns", "the", "value", "of", "Spark", "runtime", "configuration", "property", "for", "the", "given", "key", "assuming", "it", "is", "set", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/conf.py#L45-L55
19,161
apache/spark
python/pyspark/sql/conf.py
RuntimeConfig._checkType
def _checkType(self, obj, identifier): """Assert that an object is of type str.""" if not isinstance(obj, basestring): raise TypeError("expected %s '%s' to be a string (was '%s')" % (identifier, obj, type(obj).__name__))
python
def _checkType(self, obj, identifier): """Assert that an object is of type str.""" if not isinstance(obj, basestring): raise TypeError("expected %s '%s' to be a string (was '%s')" % (identifier, obj, type(obj).__name__))
[ "def", "_checkType", "(", "self", ",", "obj", ",", "identifier", ")", ":", "if", "not", "isinstance", "(", "obj", ",", "basestring", ")", ":", "raise", "TypeError", "(", "\"expected %s '%s' to be a string (was '%s')\"", "%", "(", "identifier", ",", "obj", ",", "type", "(", "obj", ")", ".", "__name__", ")", ")" ]
Assert that an object is of type str.
[ "Assert", "that", "an", "object", "is", "of", "type", "str", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/conf.py#L63-L67
19,162
apache/spark
python/pyspark/sql/functions.py
_create_function
def _create_function(name, doc=""): """Create a PySpark function by its name""" def _(col): sc = SparkContext._active_spark_context jc = getattr(sc._jvm.functions, name)(col._jc if isinstance(col, Column) else col) return Column(jc) _.__name__ = name _.__doc__ = doc return _
python
def _create_function(name, doc=""): """Create a PySpark function by its name""" def _(col): sc = SparkContext._active_spark_context jc = getattr(sc._jvm.functions, name)(col._jc if isinstance(col, Column) else col) return Column(jc) _.__name__ = name _.__doc__ = doc return _
[ "def", "_create_function", "(", "name", ",", "doc", "=", "\"\"", ")", ":", "def", "_", "(", "col", ")", ":", "sc", "=", "SparkContext", ".", "_active_spark_context", "jc", "=", "getattr", "(", "sc", ".", "_jvm", ".", "functions", ",", "name", ")", "(", "col", ".", "_jc", "if", "isinstance", "(", "col", ",", "Column", ")", "else", "col", ")", "return", "Column", "(", "jc", ")", "_", ".", "__name__", "=", "name", "_", ".", "__doc__", "=", "doc", "return", "_" ]
Create a PySpark function by its name
[ "Create", "a", "PySpark", "function", "by", "its", "name" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/functions.py#L47-L55
19,163
apache/spark
python/pyspark/sql/functions.py
_wrap_deprecated_function
def _wrap_deprecated_function(func, message): """ Wrap the deprecated function to print out deprecation warnings""" def _(col): warnings.warn(message, DeprecationWarning) return func(col) return functools.wraps(func)(_)
python
def _wrap_deprecated_function(func, message): """ Wrap the deprecated function to print out deprecation warnings""" def _(col): warnings.warn(message, DeprecationWarning) return func(col) return functools.wraps(func)(_)
[ "def", "_wrap_deprecated_function", "(", "func", ",", "message", ")", ":", "def", "_", "(", "col", ")", ":", "warnings", ".", "warn", "(", "message", ",", "DeprecationWarning", ")", "return", "func", "(", "col", ")", "return", "functools", ".", "wraps", "(", "func", ")", "(", "_", ")" ]
Wrap the deprecated function to print out deprecation warnings
[ "Wrap", "the", "deprecated", "function", "to", "print", "out", "deprecation", "warnings" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/functions.py#L72-L77
19,164
apache/spark
python/pyspark/sql/functions.py
_create_binary_mathfunction
def _create_binary_mathfunction(name, doc=""): """ Create a binary mathfunction by name""" def _(col1, col2): sc = SparkContext._active_spark_context # For legacy reasons, the arguments here can be implicitly converted into floats, # if they are not columns or strings. if isinstance(col1, Column): arg1 = col1._jc elif isinstance(col1, basestring): arg1 = _create_column_from_name(col1) else: arg1 = float(col1) if isinstance(col2, Column): arg2 = col2._jc elif isinstance(col2, basestring): arg2 = _create_column_from_name(col2) else: arg2 = float(col2) jc = getattr(sc._jvm.functions, name)(arg1, arg2) return Column(jc) _.__name__ = name _.__doc__ = doc return _
python
def _create_binary_mathfunction(name, doc=""): """ Create a binary mathfunction by name""" def _(col1, col2): sc = SparkContext._active_spark_context # For legacy reasons, the arguments here can be implicitly converted into floats, # if they are not columns or strings. if isinstance(col1, Column): arg1 = col1._jc elif isinstance(col1, basestring): arg1 = _create_column_from_name(col1) else: arg1 = float(col1) if isinstance(col2, Column): arg2 = col2._jc elif isinstance(col2, basestring): arg2 = _create_column_from_name(col2) else: arg2 = float(col2) jc = getattr(sc._jvm.functions, name)(arg1, arg2) return Column(jc) _.__name__ = name _.__doc__ = doc return _
[ "def", "_create_binary_mathfunction", "(", "name", ",", "doc", "=", "\"\"", ")", ":", "def", "_", "(", "col1", ",", "col2", ")", ":", "sc", "=", "SparkContext", ".", "_active_spark_context", "# For legacy reasons, the arguments here can be implicitly converted into floats,", "# if they are not columns or strings.", "if", "isinstance", "(", "col1", ",", "Column", ")", ":", "arg1", "=", "col1", ".", "_jc", "elif", "isinstance", "(", "col1", ",", "basestring", ")", ":", "arg1", "=", "_create_column_from_name", "(", "col1", ")", "else", ":", "arg1", "=", "float", "(", "col1", ")", "if", "isinstance", "(", "col2", ",", "Column", ")", ":", "arg2", "=", "col2", ".", "_jc", "elif", "isinstance", "(", "col2", ",", "basestring", ")", ":", "arg2", "=", "_create_column_from_name", "(", "col2", ")", "else", ":", "arg2", "=", "float", "(", "col2", ")", "jc", "=", "getattr", "(", "sc", ".", "_jvm", ".", "functions", ",", "name", ")", "(", "arg1", ",", "arg2", ")", "return", "Column", "(", "jc", ")", "_", ".", "__name__", "=", "name", "_", ".", "__doc__", "=", "doc", "return", "_" ]
Create a binary mathfunction by name
[ "Create", "a", "binary", "mathfunction", "by", "name" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/functions.py#L80-L104
19,165
apache/spark
python/pyspark/sql/functions.py
_create_window_function
def _create_window_function(name, doc=''): """ Create a window function by name """ def _(): sc = SparkContext._active_spark_context jc = getattr(sc._jvm.functions, name)() return Column(jc) _.__name__ = name _.__doc__ = 'Window function: ' + doc return _
python
def _create_window_function(name, doc=''): """ Create a window function by name """ def _(): sc = SparkContext._active_spark_context jc = getattr(sc._jvm.functions, name)() return Column(jc) _.__name__ = name _.__doc__ = 'Window function: ' + doc return _
[ "def", "_create_window_function", "(", "name", ",", "doc", "=", "''", ")", ":", "def", "_", "(", ")", ":", "sc", "=", "SparkContext", ".", "_active_spark_context", "jc", "=", "getattr", "(", "sc", ".", "_jvm", ".", "functions", ",", "name", ")", "(", ")", "return", "Column", "(", "jc", ")", "_", ".", "__name__", "=", "name", "_", ".", "__doc__", "=", "'Window function: '", "+", "doc", "return", "_" ]
Create a window function by name
[ "Create", "a", "window", "function", "by", "name" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/functions.py#L107-L115
19,166
apache/spark
python/pyspark/sql/functions.py
broadcast
def broadcast(df): """Marks a DataFrame as small enough for use in broadcast joins.""" sc = SparkContext._active_spark_context return DataFrame(sc._jvm.functions.broadcast(df._jdf), df.sql_ctx)
python
def broadcast(df): """Marks a DataFrame as small enough for use in broadcast joins.""" sc = SparkContext._active_spark_context return DataFrame(sc._jvm.functions.broadcast(df._jdf), df.sql_ctx)
[ "def", "broadcast", "(", "df", ")", ":", "sc", "=", "SparkContext", ".", "_active_spark_context", "return", "DataFrame", "(", "sc", ".", "_jvm", ".", "functions", ".", "broadcast", "(", "df", ".", "_jdf", ")", ",", "df", ".", "sql_ctx", ")" ]
Marks a DataFrame as small enough for use in broadcast joins.
[ "Marks", "a", "DataFrame", "as", "small", "enough", "for", "use", "in", "broadcast", "joins", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/functions.py#L333-L337
19,167
apache/spark
python/pyspark/sql/functions.py
nanvl
def nanvl(col1, col2): """Returns col1 if it is not NaN, or col2 if col1 is NaN. Both inputs should be floating point columns (:class:`DoubleType` or :class:`FloatType`). >>> df = spark.createDataFrame([(1.0, float('nan')), (float('nan'), 2.0)], ("a", "b")) >>> df.select(nanvl("a", "b").alias("r1"), nanvl(df.a, df.b).alias("r2")).collect() [Row(r1=1.0, r2=1.0), Row(r1=2.0, r2=2.0)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.nanvl(_to_java_column(col1), _to_java_column(col2)))
python
def nanvl(col1, col2): """Returns col1 if it is not NaN, or col2 if col1 is NaN. Both inputs should be floating point columns (:class:`DoubleType` or :class:`FloatType`). >>> df = spark.createDataFrame([(1.0, float('nan')), (float('nan'), 2.0)], ("a", "b")) >>> df.select(nanvl("a", "b").alias("r1"), nanvl(df.a, df.b).alias("r2")).collect() [Row(r1=1.0, r2=1.0), Row(r1=2.0, r2=2.0)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.nanvl(_to_java_column(col1), _to_java_column(col2)))
[ "def", "nanvl", "(", "col1", ",", "col2", ")", ":", "sc", "=", "SparkContext", ".", "_active_spark_context", "return", "Column", "(", "sc", ".", "_jvm", ".", "functions", ".", "nanvl", "(", "_to_java_column", "(", "col1", ")", ",", "_to_java_column", "(", "col2", ")", ")", ")" ]
Returns col1 if it is not NaN, or col2 if col1 is NaN. Both inputs should be floating point columns (:class:`DoubleType` or :class:`FloatType`). >>> df = spark.createDataFrame([(1.0, float('nan')), (float('nan'), 2.0)], ("a", "b")) >>> df.select(nanvl("a", "b").alias("r1"), nanvl(df.a, df.b).alias("r2")).collect() [Row(r1=1.0, r2=1.0), Row(r1=2.0, r2=2.0)]
[ "Returns", "col1", "if", "it", "is", "not", "NaN", "or", "col2", "if", "col1", "is", "NaN", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/functions.py#L565-L575
19,168
apache/spark
python/pyspark/sql/functions.py
shiftLeft
def shiftLeft(col, numBits): """Shift the given value numBits left. >>> spark.createDataFrame([(21,)], ['a']).select(shiftLeft('a', 1).alias('r')).collect() [Row(r=42)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.shiftLeft(_to_java_column(col), numBits))
python
def shiftLeft(col, numBits): """Shift the given value numBits left. >>> spark.createDataFrame([(21,)], ['a']).select(shiftLeft('a', 1).alias('r')).collect() [Row(r=42)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.shiftLeft(_to_java_column(col), numBits))
[ "def", "shiftLeft", "(", "col", ",", "numBits", ")", ":", "sc", "=", "SparkContext", ".", "_active_spark_context", "return", "Column", "(", "sc", ".", "_jvm", ".", "functions", ".", "shiftLeft", "(", "_to_java_column", "(", "col", ")", ",", "numBits", ")", ")" ]
Shift the given value numBits left. >>> spark.createDataFrame([(21,)], ['a']).select(shiftLeft('a', 1).alias('r')).collect() [Row(r=42)]
[ "Shift", "the", "given", "value", "numBits", "left", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/functions.py#L645-L652
19,169
apache/spark
python/pyspark/sql/functions.py
expr
def expr(str): """Parses the expression string into the column that it represents >>> df.select(expr("length(name)")).collect() [Row(length(name)=5), Row(length(name)=3)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.expr(str))
python
def expr(str): """Parses the expression string into the column that it represents >>> df.select(expr("length(name)")).collect() [Row(length(name)=5), Row(length(name)=3)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.expr(str))
[ "def", "expr", "(", "str", ")", ":", "sc", "=", "SparkContext", ".", "_active_spark_context", "return", "Column", "(", "sc", ".", "_jvm", ".", "functions", ".", "expr", "(", "str", ")", ")" ]
Parses the expression string into the column that it represents >>> df.select(expr("length(name)")).collect() [Row(length(name)=5), Row(length(name)=3)]
[ "Parses", "the", "expression", "string", "into", "the", "column", "that", "it", "represents" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/functions.py#L694-L701
19,170
apache/spark
python/pyspark/sql/functions.py
log
def log(arg1, arg2=None): """Returns the first argument-based logarithm of the second argument. If there is only one argument, then this takes the natural logarithm of the argument. >>> df.select(log(10.0, df.age).alias('ten')).rdd.map(lambda l: str(l.ten)[:7]).collect() ['0.30102', '0.69897'] >>> df.select(log(df.age).alias('e')).rdd.map(lambda l: str(l.e)[:7]).collect() ['0.69314', '1.60943'] """ sc = SparkContext._active_spark_context if arg2 is None: jc = sc._jvm.functions.log(_to_java_column(arg1)) else: jc = sc._jvm.functions.log(arg1, _to_java_column(arg2)) return Column(jc)
python
def log(arg1, arg2=None): """Returns the first argument-based logarithm of the second argument. If there is only one argument, then this takes the natural logarithm of the argument. >>> df.select(log(10.0, df.age).alias('ten')).rdd.map(lambda l: str(l.ten)[:7]).collect() ['0.30102', '0.69897'] >>> df.select(log(df.age).alias('e')).rdd.map(lambda l: str(l.e)[:7]).collect() ['0.69314', '1.60943'] """ sc = SparkContext._active_spark_context if arg2 is None: jc = sc._jvm.functions.log(_to_java_column(arg1)) else: jc = sc._jvm.functions.log(arg1, _to_java_column(arg2)) return Column(jc)
[ "def", "log", "(", "arg1", ",", "arg2", "=", "None", ")", ":", "sc", "=", "SparkContext", ".", "_active_spark_context", "if", "arg2", "is", "None", ":", "jc", "=", "sc", ".", "_jvm", ".", "functions", ".", "log", "(", "_to_java_column", "(", "arg1", ")", ")", "else", ":", "jc", "=", "sc", ".", "_jvm", ".", "functions", ".", "log", "(", "arg1", ",", "_to_java_column", "(", "arg2", ")", ")", "return", "Column", "(", "jc", ")" ]
Returns the first argument-based logarithm of the second argument. If there is only one argument, then this takes the natural logarithm of the argument. >>> df.select(log(10.0, df.age).alias('ten')).rdd.map(lambda l: str(l.ten)[:7]).collect() ['0.30102', '0.69897'] >>> df.select(log(df.age).alias('e')).rdd.map(lambda l: str(l.e)[:7]).collect() ['0.69314', '1.60943']
[ "Returns", "the", "first", "argument", "-", "based", "logarithm", "of", "the", "second", "argument", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/functions.py#L778-L794
19,171
apache/spark
python/pyspark/sql/functions.py
conv
def conv(col, fromBase, toBase): """ Convert a number in a string column from one base to another. >>> df = spark.createDataFrame([("010101",)], ['n']) >>> df.select(conv(df.n, 2, 16).alias('hex')).collect() [Row(hex=u'15')] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.conv(_to_java_column(col), fromBase, toBase))
python
def conv(col, fromBase, toBase): """ Convert a number in a string column from one base to another. >>> df = spark.createDataFrame([("010101",)], ['n']) >>> df.select(conv(df.n, 2, 16).alias('hex')).collect() [Row(hex=u'15')] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.conv(_to_java_column(col), fromBase, toBase))
[ "def", "conv", "(", "col", ",", "fromBase", ",", "toBase", ")", ":", "sc", "=", "SparkContext", ".", "_active_spark_context", "return", "Column", "(", "sc", ".", "_jvm", ".", "functions", ".", "conv", "(", "_to_java_column", "(", "col", ")", ",", "fromBase", ",", "toBase", ")", ")" ]
Convert a number in a string column from one base to another. >>> df = spark.createDataFrame([("010101",)], ['n']) >>> df.select(conv(df.n, 2, 16).alias('hex')).collect() [Row(hex=u'15')]
[ "Convert", "a", "number", "in", "a", "string", "column", "from", "one", "base", "to", "another", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/functions.py#L810-L819
19,172
apache/spark
python/pyspark/sql/functions.py
date_add
def date_add(start, days): """ Returns the date that is `days` days after `start` >>> df = spark.createDataFrame([('2015-04-08',)], ['dt']) >>> df.select(date_add(df.dt, 1).alias('next_date')).collect() [Row(next_date=datetime.date(2015, 4, 9))] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.date_add(_to_java_column(start), days))
python
def date_add(start, days): """ Returns the date that is `days` days after `start` >>> df = spark.createDataFrame([('2015-04-08',)], ['dt']) >>> df.select(date_add(df.dt, 1).alias('next_date')).collect() [Row(next_date=datetime.date(2015, 4, 9))] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.date_add(_to_java_column(start), days))
[ "def", "date_add", "(", "start", ",", "days", ")", ":", "sc", "=", "SparkContext", ".", "_active_spark_context", "return", "Column", "(", "sc", ".", "_jvm", ".", "functions", ".", "date_add", "(", "_to_java_column", "(", "start", ")", ",", "days", ")", ")" ]
Returns the date that is `days` days after `start` >>> df = spark.createDataFrame([('2015-04-08',)], ['dt']) >>> df.select(date_add(df.dt, 1).alias('next_date')).collect() [Row(next_date=datetime.date(2015, 4, 9))]
[ "Returns", "the", "date", "that", "is", "days", "days", "after", "start" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/functions.py#L1058-L1067
19,173
apache/spark
python/pyspark/sql/functions.py
datediff
def datediff(end, start): """ Returns the number of days from `start` to `end`. >>> df = spark.createDataFrame([('2015-04-08','2015-05-10')], ['d1', 'd2']) >>> df.select(datediff(df.d2, df.d1).alias('diff')).collect() [Row(diff=32)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.datediff(_to_java_column(end), _to_java_column(start)))
python
def datediff(end, start): """ Returns the number of days from `start` to `end`. >>> df = spark.createDataFrame([('2015-04-08','2015-05-10')], ['d1', 'd2']) >>> df.select(datediff(df.d2, df.d1).alias('diff')).collect() [Row(diff=32)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.datediff(_to_java_column(end), _to_java_column(start)))
[ "def", "datediff", "(", "end", ",", "start", ")", ":", "sc", "=", "SparkContext", ".", "_active_spark_context", "return", "Column", "(", "sc", ".", "_jvm", ".", "functions", ".", "datediff", "(", "_to_java_column", "(", "end", ")", ",", "_to_java_column", "(", "start", ")", ")", ")" ]
Returns the number of days from `start` to `end`. >>> df = spark.createDataFrame([('2015-04-08','2015-05-10')], ['d1', 'd2']) >>> df.select(datediff(df.d2, df.d1).alias('diff')).collect() [Row(diff=32)]
[ "Returns", "the", "number", "of", "days", "from", "start", "to", "end", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/functions.py#L1084-L1093
19,174
apache/spark
python/pyspark/sql/functions.py
add_months
def add_months(start, months): """ Returns the date that is `months` months after `start` >>> df = spark.createDataFrame([('2015-04-08',)], ['dt']) >>> df.select(add_months(df.dt, 1).alias('next_month')).collect() [Row(next_month=datetime.date(2015, 5, 8))] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.add_months(_to_java_column(start), months))
python
def add_months(start, months): """ Returns the date that is `months` months after `start` >>> df = spark.createDataFrame([('2015-04-08',)], ['dt']) >>> df.select(add_months(df.dt, 1).alias('next_month')).collect() [Row(next_month=datetime.date(2015, 5, 8))] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.add_months(_to_java_column(start), months))
[ "def", "add_months", "(", "start", ",", "months", ")", ":", "sc", "=", "SparkContext", ".", "_active_spark_context", "return", "Column", "(", "sc", ".", "_jvm", ".", "functions", ".", "add_months", "(", "_to_java_column", "(", "start", ")", ",", "months", ")", ")" ]
Returns the date that is `months` months after `start` >>> df = spark.createDataFrame([('2015-04-08',)], ['dt']) >>> df.select(add_months(df.dt, 1).alias('next_month')).collect() [Row(next_month=datetime.date(2015, 5, 8))]
[ "Returns", "the", "date", "that", "is", "months", "months", "after", "start" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/functions.py#L1097-L1106
19,175
apache/spark
python/pyspark/sql/functions.py
date_trunc
def date_trunc(format, timestamp): """ Returns timestamp truncated to the unit specified by the format. :param format: 'year', 'yyyy', 'yy', 'month', 'mon', 'mm', 'day', 'dd', 'hour', 'minute', 'second', 'week', 'quarter' >>> df = spark.createDataFrame([('1997-02-28 05:02:11',)], ['t']) >>> df.select(date_trunc('year', df.t).alias('year')).collect() [Row(year=datetime.datetime(1997, 1, 1, 0, 0))] >>> df.select(date_trunc('mon', df.t).alias('month')).collect() [Row(month=datetime.datetime(1997, 2, 1, 0, 0))] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.date_trunc(format, _to_java_column(timestamp)))
python
def date_trunc(format, timestamp): """ Returns timestamp truncated to the unit specified by the format. :param format: 'year', 'yyyy', 'yy', 'month', 'mon', 'mm', 'day', 'dd', 'hour', 'minute', 'second', 'week', 'quarter' >>> df = spark.createDataFrame([('1997-02-28 05:02:11',)], ['t']) >>> df.select(date_trunc('year', df.t).alias('year')).collect() [Row(year=datetime.datetime(1997, 1, 1, 0, 0))] >>> df.select(date_trunc('mon', df.t).alias('month')).collect() [Row(month=datetime.datetime(1997, 2, 1, 0, 0))] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.date_trunc(format, _to_java_column(timestamp)))
[ "def", "date_trunc", "(", "format", ",", "timestamp", ")", ":", "sc", "=", "SparkContext", ".", "_active_spark_context", "return", "Column", "(", "sc", ".", "_jvm", ".", "functions", ".", "date_trunc", "(", "format", ",", "_to_java_column", "(", "timestamp", ")", ")", ")" ]
Returns timestamp truncated to the unit specified by the format. :param format: 'year', 'yyyy', 'yy', 'month', 'mon', 'mm', 'day', 'dd', 'hour', 'minute', 'second', 'week', 'quarter' >>> df = spark.createDataFrame([('1997-02-28 05:02:11',)], ['t']) >>> df.select(date_trunc('year', df.t).alias('year')).collect() [Row(year=datetime.datetime(1997, 1, 1, 0, 0))] >>> df.select(date_trunc('mon', df.t).alias('month')).collect() [Row(month=datetime.datetime(1997, 2, 1, 0, 0))]
[ "Returns", "timestamp", "truncated", "to", "the", "unit", "specified", "by", "the", "format", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/functions.py#L1197-L1211
19,176
apache/spark
python/pyspark/sql/functions.py
next_day
def next_day(date, dayOfWeek): """ Returns the first date which is later than the value of the date column. Day of the week parameter is case insensitive, and accepts: "Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun". >>> df = spark.createDataFrame([('2015-07-27',)], ['d']) >>> df.select(next_day(df.d, 'Sun').alias('date')).collect() [Row(date=datetime.date(2015, 8, 2))] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.next_day(_to_java_column(date), dayOfWeek))
python
def next_day(date, dayOfWeek): """ Returns the first date which is later than the value of the date column. Day of the week parameter is case insensitive, and accepts: "Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun". >>> df = spark.createDataFrame([('2015-07-27',)], ['d']) >>> df.select(next_day(df.d, 'Sun').alias('date')).collect() [Row(date=datetime.date(2015, 8, 2))] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.next_day(_to_java_column(date), dayOfWeek))
[ "def", "next_day", "(", "date", ",", "dayOfWeek", ")", ":", "sc", "=", "SparkContext", ".", "_active_spark_context", "return", "Column", "(", "sc", ".", "_jvm", ".", "functions", ".", "next_day", "(", "_to_java_column", "(", "date", ")", ",", "dayOfWeek", ")", ")" ]
Returns the first date which is later than the value of the date column. Day of the week parameter is case insensitive, and accepts: "Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun". >>> df = spark.createDataFrame([('2015-07-27',)], ['d']) >>> df.select(next_day(df.d, 'Sun').alias('date')).collect() [Row(date=datetime.date(2015, 8, 2))]
[ "Returns", "the", "first", "date", "which", "is", "later", "than", "the", "value", "of", "the", "date", "column", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/functions.py#L1215-L1227
19,177
apache/spark
python/pyspark/sql/functions.py
last_day
def last_day(date): """ Returns the last day of the month which the given date belongs to. >>> df = spark.createDataFrame([('1997-02-10',)], ['d']) >>> df.select(last_day(df.d).alias('date')).collect() [Row(date=datetime.date(1997, 2, 28))] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.last_day(_to_java_column(date)))
python
def last_day(date): """ Returns the last day of the month which the given date belongs to. >>> df = spark.createDataFrame([('1997-02-10',)], ['d']) >>> df.select(last_day(df.d).alias('date')).collect() [Row(date=datetime.date(1997, 2, 28))] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.last_day(_to_java_column(date)))
[ "def", "last_day", "(", "date", ")", ":", "sc", "=", "SparkContext", ".", "_active_spark_context", "return", "Column", "(", "sc", ".", "_jvm", ".", "functions", ".", "last_day", "(", "_to_java_column", "(", "date", ")", ")", ")" ]
Returns the last day of the month which the given date belongs to. >>> df = spark.createDataFrame([('1997-02-10',)], ['d']) >>> df.select(last_day(df.d).alias('date')).collect() [Row(date=datetime.date(1997, 2, 28))]
[ "Returns", "the", "last", "day", "of", "the", "month", "which", "the", "given", "date", "belongs", "to", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/functions.py#L1231-L1240
19,178
apache/spark
python/pyspark/sql/functions.py
from_utc_timestamp
def from_utc_timestamp(timestamp, tz): """ This is a common function for databases supporting TIMESTAMP WITHOUT TIMEZONE. This function takes a timestamp which is timezone-agnostic, and interprets it as a timestamp in UTC, and renders that timestamp as a timestamp in the given time zone. However, timestamp in Spark represents number of microseconds from the Unix epoch, which is not timezone-agnostic. So in Spark this function just shift the timestamp value from UTC timezone to the given timezone. This function may return confusing result if the input is a string with timezone, e.g. '2018-03-13T06:18:23+00:00'. The reason is that, Spark firstly cast the string to timestamp according to the timezone in the string, and finally display the result by converting the timestamp to string according to the session local timezone. :param timestamp: the column that contains timestamps :param tz: a string that has the ID of timezone, e.g. "GMT", "America/Los_Angeles", etc .. versionchanged:: 2.4 `tz` can take a :class:`Column` containing timezone ID strings. >>> df = spark.createDataFrame([('1997-02-28 10:30:00', 'JST')], ['ts', 'tz']) >>> df.select(from_utc_timestamp(df.ts, "PST").alias('local_time')).collect() [Row(local_time=datetime.datetime(1997, 2, 28, 2, 30))] >>> df.select(from_utc_timestamp(df.ts, df.tz).alias('local_time')).collect() [Row(local_time=datetime.datetime(1997, 2, 28, 19, 30))] .. note:: Deprecated in 3.0. See SPARK-25496 """ warnings.warn("Deprecated in 3.0. See SPARK-25496", DeprecationWarning) sc = SparkContext._active_spark_context if isinstance(tz, Column): tz = _to_java_column(tz) return Column(sc._jvm.functions.from_utc_timestamp(_to_java_column(timestamp), tz))
python
def from_utc_timestamp(timestamp, tz): """ This is a common function for databases supporting TIMESTAMP WITHOUT TIMEZONE. This function takes a timestamp which is timezone-agnostic, and interprets it as a timestamp in UTC, and renders that timestamp as a timestamp in the given time zone. However, timestamp in Spark represents number of microseconds from the Unix epoch, which is not timezone-agnostic. So in Spark this function just shift the timestamp value from UTC timezone to the given timezone. This function may return confusing result if the input is a string with timezone, e.g. '2018-03-13T06:18:23+00:00'. The reason is that, Spark firstly cast the string to timestamp according to the timezone in the string, and finally display the result by converting the timestamp to string according to the session local timezone. :param timestamp: the column that contains timestamps :param tz: a string that has the ID of timezone, e.g. "GMT", "America/Los_Angeles", etc .. versionchanged:: 2.4 `tz` can take a :class:`Column` containing timezone ID strings. >>> df = spark.createDataFrame([('1997-02-28 10:30:00', 'JST')], ['ts', 'tz']) >>> df.select(from_utc_timestamp(df.ts, "PST").alias('local_time')).collect() [Row(local_time=datetime.datetime(1997, 2, 28, 2, 30))] >>> df.select(from_utc_timestamp(df.ts, df.tz).alias('local_time')).collect() [Row(local_time=datetime.datetime(1997, 2, 28, 19, 30))] .. note:: Deprecated in 3.0. See SPARK-25496 """ warnings.warn("Deprecated in 3.0. See SPARK-25496", DeprecationWarning) sc = SparkContext._active_spark_context if isinstance(tz, Column): tz = _to_java_column(tz) return Column(sc._jvm.functions.from_utc_timestamp(_to_java_column(timestamp), tz))
[ "def", "from_utc_timestamp", "(", "timestamp", ",", "tz", ")", ":", "warnings", ".", "warn", "(", "\"Deprecated in 3.0. See SPARK-25496\"", ",", "DeprecationWarning", ")", "sc", "=", "SparkContext", ".", "_active_spark_context", "if", "isinstance", "(", "tz", ",", "Column", ")", ":", "tz", "=", "_to_java_column", "(", "tz", ")", "return", "Column", "(", "sc", ".", "_jvm", ".", "functions", ".", "from_utc_timestamp", "(", "_to_java_column", "(", "timestamp", ")", ",", "tz", ")", ")" ]
This is a common function for databases supporting TIMESTAMP WITHOUT TIMEZONE. This function takes a timestamp which is timezone-agnostic, and interprets it as a timestamp in UTC, and renders that timestamp as a timestamp in the given time zone. However, timestamp in Spark represents number of microseconds from the Unix epoch, which is not timezone-agnostic. So in Spark this function just shift the timestamp value from UTC timezone to the given timezone. This function may return confusing result if the input is a string with timezone, e.g. '2018-03-13T06:18:23+00:00'. The reason is that, Spark firstly cast the string to timestamp according to the timezone in the string, and finally display the result by converting the timestamp to string according to the session local timezone. :param timestamp: the column that contains timestamps :param tz: a string that has the ID of timezone, e.g. "GMT", "America/Los_Angeles", etc .. versionchanged:: 2.4 `tz` can take a :class:`Column` containing timezone ID strings. >>> df = spark.createDataFrame([('1997-02-28 10:30:00', 'JST')], ['ts', 'tz']) >>> df.select(from_utc_timestamp(df.ts, "PST").alias('local_time')).collect() [Row(local_time=datetime.datetime(1997, 2, 28, 2, 30))] >>> df.select(from_utc_timestamp(df.ts, df.tz).alias('local_time')).collect() [Row(local_time=datetime.datetime(1997, 2, 28, 19, 30))] .. note:: Deprecated in 3.0. See SPARK-25496
[ "This", "is", "a", "common", "function", "for", "databases", "supporting", "TIMESTAMP", "WITHOUT", "TIMEZONE", ".", "This", "function", "takes", "a", "timestamp", "which", "is", "timezone", "-", "agnostic", "and", "interprets", "it", "as", "a", "timestamp", "in", "UTC", "and", "renders", "that", "timestamp", "as", "a", "timestamp", "in", "the", "given", "time", "zone", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/functions.py#L1283-L1316
19,179
apache/spark
python/pyspark/sql/functions.py
hash
def hash(*cols): """Calculates the hash code of given columns, and returns the result as an int column. >>> spark.createDataFrame([('ABC',)], ['a']).select(hash('a').alias('hash')).collect() [Row(hash=-757602832)] """ sc = SparkContext._active_spark_context jc = sc._jvm.functions.hash(_to_seq(sc, cols, _to_java_column)) return Column(jc)
python
def hash(*cols): """Calculates the hash code of given columns, and returns the result as an int column. >>> spark.createDataFrame([('ABC',)], ['a']).select(hash('a').alias('hash')).collect() [Row(hash=-757602832)] """ sc = SparkContext._active_spark_context jc = sc._jvm.functions.hash(_to_seq(sc, cols, _to_java_column)) return Column(jc)
[ "def", "hash", "(", "*", "cols", ")", ":", "sc", "=", "SparkContext", ".", "_active_spark_context", "jc", "=", "sc", ".", "_jvm", ".", "functions", ".", "hash", "(", "_to_seq", "(", "sc", ",", "cols", ",", "_to_java_column", ")", ")", "return", "Column", "(", "jc", ")" ]
Calculates the hash code of given columns, and returns the result as an int column. >>> spark.createDataFrame([('ABC',)], ['a']).select(hash('a').alias('hash')).collect() [Row(hash=-757602832)]
[ "Calculates", "the", "hash", "code", "of", "given", "columns", "and", "returns", "the", "result", "as", "an", "int", "column", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/functions.py#L1466-L1474
19,180
apache/spark
python/pyspark/sql/functions.py
concat_ws
def concat_ws(sep, *cols): """ Concatenates multiple input string columns together into a single string column, using the given separator. >>> df = spark.createDataFrame([('abcd','123')], ['s', 'd']) >>> df.select(concat_ws('-', df.s, df.d).alias('s')).collect() [Row(s=u'abcd-123')] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.concat_ws(sep, _to_seq(sc, cols, _to_java_column)))
python
def concat_ws(sep, *cols): """ Concatenates multiple input string columns together into a single string column, using the given separator. >>> df = spark.createDataFrame([('abcd','123')], ['s', 'd']) >>> df.select(concat_ws('-', df.s, df.d).alias('s')).collect() [Row(s=u'abcd-123')] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.concat_ws(sep, _to_seq(sc, cols, _to_java_column)))
[ "def", "concat_ws", "(", "sep", ",", "*", "cols", ")", ":", "sc", "=", "SparkContext", ".", "_active_spark_context", "return", "Column", "(", "sc", ".", "_jvm", ".", "functions", ".", "concat_ws", "(", "sep", ",", "_to_seq", "(", "sc", ",", "cols", ",", "_to_java_column", ")", ")", ")" ]
Concatenates multiple input string columns together into a single string column, using the given separator. >>> df = spark.createDataFrame([('abcd','123')], ['s', 'd']) >>> df.select(concat_ws('-', df.s, df.d).alias('s')).collect() [Row(s=u'abcd-123')]
[ "Concatenates", "multiple", "input", "string", "columns", "together", "into", "a", "single", "string", "column", "using", "the", "given", "separator", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/functions.py#L1511-L1521
19,181
apache/spark
python/pyspark/sql/functions.py
format_string
def format_string(format, *cols): """ Formats the arguments in printf-style and returns the result as a string column. :param col: the column name of the numeric value to be formatted :param d: the N decimal places >>> df = spark.createDataFrame([(5, "hello")], ['a', 'b']) >>> df.select(format_string('%d %s', df.a, df.b).alias('v')).collect() [Row(v=u'5 hello')] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.format_string(format, _to_seq(sc, cols, _to_java_column)))
python
def format_string(format, *cols): """ Formats the arguments in printf-style and returns the result as a string column. :param col: the column name of the numeric value to be formatted :param d: the N decimal places >>> df = spark.createDataFrame([(5, "hello")], ['a', 'b']) >>> df.select(format_string('%d %s', df.a, df.b).alias('v')).collect() [Row(v=u'5 hello')] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.format_string(format, _to_seq(sc, cols, _to_java_column)))
[ "def", "format_string", "(", "format", ",", "*", "cols", ")", ":", "sc", "=", "SparkContext", ".", "_active_spark_context", "return", "Column", "(", "sc", ".", "_jvm", ".", "functions", ".", "format_string", "(", "format", ",", "_to_seq", "(", "sc", ",", "cols", ",", "_to_java_column", ")", ")", ")" ]
Formats the arguments in printf-style and returns the result as a string column. :param col: the column name of the numeric value to be formatted :param d: the N decimal places >>> df = spark.createDataFrame([(5, "hello")], ['a', 'b']) >>> df.select(format_string('%d %s', df.a, df.b).alias('v')).collect() [Row(v=u'5 hello')]
[ "Formats", "the", "arguments", "in", "printf", "-", "style", "and", "returns", "the", "result", "as", "a", "string", "column", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/functions.py#L1563-L1575
19,182
apache/spark
python/pyspark/sql/functions.py
instr
def instr(str, substr): """ Locate the position of the first occurrence of substr column in the given string. Returns null if either of the arguments are null. .. note:: The position is not zero based, but 1 based index. Returns 0 if substr could not be found in str. >>> df = spark.createDataFrame([('abcd',)], ['s',]) >>> df.select(instr(df.s, 'b').alias('s')).collect() [Row(s=2)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.instr(_to_java_column(str), substr))
python
def instr(str, substr): """ Locate the position of the first occurrence of substr column in the given string. Returns null if either of the arguments are null. .. note:: The position is not zero based, but 1 based index. Returns 0 if substr could not be found in str. >>> df = spark.createDataFrame([('abcd',)], ['s',]) >>> df.select(instr(df.s, 'b').alias('s')).collect() [Row(s=2)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.instr(_to_java_column(str), substr))
[ "def", "instr", "(", "str", ",", "substr", ")", ":", "sc", "=", "SparkContext", ".", "_active_spark_context", "return", "Column", "(", "sc", ".", "_jvm", ".", "functions", ".", "instr", "(", "_to_java_column", "(", "str", ")", ",", "substr", ")", ")" ]
Locate the position of the first occurrence of substr column in the given string. Returns null if either of the arguments are null. .. note:: The position is not zero based, but 1 based index. Returns 0 if substr could not be found in str. >>> df = spark.createDataFrame([('abcd',)], ['s',]) >>> df.select(instr(df.s, 'b').alias('s')).collect() [Row(s=2)]
[ "Locate", "the", "position", "of", "the", "first", "occurrence", "of", "substr", "column", "in", "the", "given", "string", ".", "Returns", "null", "if", "either", "of", "the", "arguments", "are", "null", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/functions.py#L1579-L1592
19,183
apache/spark
python/pyspark/sql/functions.py
substring
def substring(str, pos, len): """ Substring starts at `pos` and is of length `len` when str is String type or returns the slice of byte array that starts at `pos` in byte and is of length `len` when str is Binary type. .. note:: The position is not zero based, but 1 based index. >>> df = spark.createDataFrame([('abcd',)], ['s',]) >>> df.select(substring(df.s, 1, 2).alias('s')).collect() [Row(s=u'ab')] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.substring(_to_java_column(str), pos, len))
python
def substring(str, pos, len): """ Substring starts at `pos` and is of length `len` when str is String type or returns the slice of byte array that starts at `pos` in byte and is of length `len` when str is Binary type. .. note:: The position is not zero based, but 1 based index. >>> df = spark.createDataFrame([('abcd',)], ['s',]) >>> df.select(substring(df.s, 1, 2).alias('s')).collect() [Row(s=u'ab')] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.substring(_to_java_column(str), pos, len))
[ "def", "substring", "(", "str", ",", "pos", ",", "len", ")", ":", "sc", "=", "SparkContext", ".", "_active_spark_context", "return", "Column", "(", "sc", ".", "_jvm", ".", "functions", ".", "substring", "(", "_to_java_column", "(", "str", ")", ",", "pos", ",", "len", ")", ")" ]
Substring starts at `pos` and is of length `len` when str is String type or returns the slice of byte array that starts at `pos` in byte and is of length `len` when str is Binary type. .. note:: The position is not zero based, but 1 based index. >>> df = spark.createDataFrame([('abcd',)], ['s',]) >>> df.select(substring(df.s, 1, 2).alias('s')).collect() [Row(s=u'ab')]
[ "Substring", "starts", "at", "pos", "and", "is", "of", "length", "len", "when", "str", "is", "String", "type", "or", "returns", "the", "slice", "of", "byte", "array", "that", "starts", "at", "pos", "in", "byte", "and", "is", "of", "length", "len", "when", "str", "is", "Binary", "type", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/functions.py#L1597-L1610
19,184
apache/spark
python/pyspark/sql/functions.py
levenshtein
def levenshtein(left, right): """Computes the Levenshtein distance of the two given strings. >>> df0 = spark.createDataFrame([('kitten', 'sitting',)], ['l', 'r']) >>> df0.select(levenshtein('l', 'r').alias('d')).collect() [Row(d=3)] """ sc = SparkContext._active_spark_context jc = sc._jvm.functions.levenshtein(_to_java_column(left), _to_java_column(right)) return Column(jc)
python
def levenshtein(left, right): """Computes the Levenshtein distance of the two given strings. >>> df0 = spark.createDataFrame([('kitten', 'sitting',)], ['l', 'r']) >>> df0.select(levenshtein('l', 'r').alias('d')).collect() [Row(d=3)] """ sc = SparkContext._active_spark_context jc = sc._jvm.functions.levenshtein(_to_java_column(left), _to_java_column(right)) return Column(jc)
[ "def", "levenshtein", "(", "left", ",", "right", ")", ":", "sc", "=", "SparkContext", ".", "_active_spark_context", "jc", "=", "sc", ".", "_jvm", ".", "functions", ".", "levenshtein", "(", "_to_java_column", "(", "left", ")", ",", "_to_java_column", "(", "right", ")", ")", "return", "Column", "(", "jc", ")" ]
Computes the Levenshtein distance of the two given strings. >>> df0 = spark.createDataFrame([('kitten', 'sitting',)], ['l', 'r']) >>> df0.select(levenshtein('l', 'r').alias('d')).collect() [Row(d=3)]
[ "Computes", "the", "Levenshtein", "distance", "of", "the", "two", "given", "strings", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/functions.py#L1634-L1643
19,185
apache/spark
python/pyspark/sql/functions.py
locate
def locate(substr, str, pos=1): """ Locate the position of the first occurrence of substr in a string column, after position pos. .. note:: The position is not zero based, but 1 based index. Returns 0 if substr could not be found in str. :param substr: a string :param str: a Column of :class:`pyspark.sql.types.StringType` :param pos: start position (zero based) >>> df = spark.createDataFrame([('abcd',)], ['s',]) >>> df.select(locate('b', df.s, 1).alias('s')).collect() [Row(s=2)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.locate(substr, _to_java_column(str), pos))
python
def locate(substr, str, pos=1): """ Locate the position of the first occurrence of substr in a string column, after position pos. .. note:: The position is not zero based, but 1 based index. Returns 0 if substr could not be found in str. :param substr: a string :param str: a Column of :class:`pyspark.sql.types.StringType` :param pos: start position (zero based) >>> df = spark.createDataFrame([('abcd',)], ['s',]) >>> df.select(locate('b', df.s, 1).alias('s')).collect() [Row(s=2)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.locate(substr, _to_java_column(str), pos))
[ "def", "locate", "(", "substr", ",", "str", ",", "pos", "=", "1", ")", ":", "sc", "=", "SparkContext", ".", "_active_spark_context", "return", "Column", "(", "sc", ".", "_jvm", ".", "functions", ".", "locate", "(", "substr", ",", "_to_java_column", "(", "str", ")", ",", "pos", ")", ")" ]
Locate the position of the first occurrence of substr in a string column, after position pos. .. note:: The position is not zero based, but 1 based index. Returns 0 if substr could not be found in str. :param substr: a string :param str: a Column of :class:`pyspark.sql.types.StringType` :param pos: start position (zero based) >>> df = spark.createDataFrame([('abcd',)], ['s',]) >>> df.select(locate('b', df.s, 1).alias('s')).collect() [Row(s=2)]
[ "Locate", "the", "position", "of", "the", "first", "occurrence", "of", "substr", "in", "a", "string", "column", "after", "position", "pos", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/functions.py#L1647-L1663
19,186
apache/spark
python/pyspark/sql/functions.py
lpad
def lpad(col, len, pad): """ Left-pad the string column to width `len` with `pad`. >>> df = spark.createDataFrame([('abcd',)], ['s',]) >>> df.select(lpad(df.s, 6, '#').alias('s')).collect() [Row(s=u'##abcd')] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.lpad(_to_java_column(col), len, pad))
python
def lpad(col, len, pad): """ Left-pad the string column to width `len` with `pad`. >>> df = spark.createDataFrame([('abcd',)], ['s',]) >>> df.select(lpad(df.s, 6, '#').alias('s')).collect() [Row(s=u'##abcd')] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.lpad(_to_java_column(col), len, pad))
[ "def", "lpad", "(", "col", ",", "len", ",", "pad", ")", ":", "sc", "=", "SparkContext", ".", "_active_spark_context", "return", "Column", "(", "sc", ".", "_jvm", ".", "functions", ".", "lpad", "(", "_to_java_column", "(", "col", ")", ",", "len", ",", "pad", ")", ")" ]
Left-pad the string column to width `len` with `pad`. >>> df = spark.createDataFrame([('abcd',)], ['s',]) >>> df.select(lpad(df.s, 6, '#').alias('s')).collect() [Row(s=u'##abcd')]
[ "Left", "-", "pad", "the", "string", "column", "to", "width", "len", "with", "pad", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/functions.py#L1668-L1677
19,187
apache/spark
python/pyspark/sql/functions.py
repeat
def repeat(col, n): """ Repeats a string column n times, and returns it as a new string column. >>> df = spark.createDataFrame([('ab',)], ['s',]) >>> df.select(repeat(df.s, 3).alias('s')).collect() [Row(s=u'ababab')] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.repeat(_to_java_column(col), n))
python
def repeat(col, n): """ Repeats a string column n times, and returns it as a new string column. >>> df = spark.createDataFrame([('ab',)], ['s',]) >>> df.select(repeat(df.s, 3).alias('s')).collect() [Row(s=u'ababab')] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.repeat(_to_java_column(col), n))
[ "def", "repeat", "(", "col", ",", "n", ")", ":", "sc", "=", "SparkContext", ".", "_active_spark_context", "return", "Column", "(", "sc", ".", "_jvm", ".", "functions", ".", "repeat", "(", "_to_java_column", "(", "col", ")", ",", "n", ")", ")" ]
Repeats a string column n times, and returns it as a new string column. >>> df = spark.createDataFrame([('ab',)], ['s',]) >>> df.select(repeat(df.s, 3).alias('s')).collect() [Row(s=u'ababab')]
[ "Repeats", "a", "string", "column", "n", "times", "and", "returns", "it", "as", "a", "new", "string", "column", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/functions.py#L1696-L1705
19,188
apache/spark
python/pyspark/sql/functions.py
split
def split(str, pattern, limit=-1): """ Splits str around matches of the given pattern. :param str: a string expression to split :param pattern: a string representing a regular expression. The regex string should be a Java regular expression. :param limit: an integer which controls the number of times `pattern` is applied. * ``limit > 0``: The resulting array's length will not be more than `limit`, and the resulting array's last entry will contain all input beyond the last matched pattern. * ``limit <= 0``: `pattern` will be applied as many times as possible, and the resulting array can be of any size. .. versionchanged:: 3.0 `split` now takes an optional `limit` field. If not provided, default limit value is -1. >>> df = spark.createDataFrame([('oneAtwoBthreeC',)], ['s',]) >>> df.select(split(df.s, '[ABC]', 2).alias('s')).collect() [Row(s=[u'one', u'twoBthreeC'])] >>> df.select(split(df.s, '[ABC]', -1).alias('s')).collect() [Row(s=[u'one', u'two', u'three', u''])] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.split(_to_java_column(str), pattern, limit))
python
def split(str, pattern, limit=-1): """ Splits str around matches of the given pattern. :param str: a string expression to split :param pattern: a string representing a regular expression. The regex string should be a Java regular expression. :param limit: an integer which controls the number of times `pattern` is applied. * ``limit > 0``: The resulting array's length will not be more than `limit`, and the resulting array's last entry will contain all input beyond the last matched pattern. * ``limit <= 0``: `pattern` will be applied as many times as possible, and the resulting array can be of any size. .. versionchanged:: 3.0 `split` now takes an optional `limit` field. If not provided, default limit value is -1. >>> df = spark.createDataFrame([('oneAtwoBthreeC',)], ['s',]) >>> df.select(split(df.s, '[ABC]', 2).alias('s')).collect() [Row(s=[u'one', u'twoBthreeC'])] >>> df.select(split(df.s, '[ABC]', -1).alias('s')).collect() [Row(s=[u'one', u'two', u'three', u''])] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.split(_to_java_column(str), pattern, limit))
[ "def", "split", "(", "str", ",", "pattern", ",", "limit", "=", "-", "1", ")", ":", "sc", "=", "SparkContext", ".", "_active_spark_context", "return", "Column", "(", "sc", ".", "_jvm", ".", "functions", ".", "split", "(", "_to_java_column", "(", "str", ")", ",", "pattern", ",", "limit", ")", ")" ]
Splits str around matches of the given pattern. :param str: a string expression to split :param pattern: a string representing a regular expression. The regex string should be a Java regular expression. :param limit: an integer which controls the number of times `pattern` is applied. * ``limit > 0``: The resulting array's length will not be more than `limit`, and the resulting array's last entry will contain all input beyond the last matched pattern. * ``limit <= 0``: `pattern` will be applied as many times as possible, and the resulting array can be of any size. .. versionchanged:: 3.0 `split` now takes an optional `limit` field. If not provided, default limit value is -1. >>> df = spark.createDataFrame([('oneAtwoBthreeC',)], ['s',]) >>> df.select(split(df.s, '[ABC]', 2).alias('s')).collect() [Row(s=[u'one', u'twoBthreeC'])] >>> df.select(split(df.s, '[ABC]', -1).alias('s')).collect() [Row(s=[u'one', u'two', u'three', u''])]
[ "Splits", "str", "around", "matches", "of", "the", "given", "pattern", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/functions.py#L1710-L1735
19,189
apache/spark
python/pyspark/sql/functions.py
regexp_extract
def regexp_extract(str, pattern, idx): r"""Extract a specific group matched by a Java regex, from the specified string column. If the regex did not match, or the specified group did not match, an empty string is returned. >>> df = spark.createDataFrame([('100-200',)], ['str']) >>> df.select(regexp_extract('str', r'(\d+)-(\d+)', 1).alias('d')).collect() [Row(d=u'100')] >>> df = spark.createDataFrame([('foo',)], ['str']) >>> df.select(regexp_extract('str', r'(\d+)', 1).alias('d')).collect() [Row(d=u'')] >>> df = spark.createDataFrame([('aaaac',)], ['str']) >>> df.select(regexp_extract('str', '(a+)(b)?(c)', 2).alias('d')).collect() [Row(d=u'')] """ sc = SparkContext._active_spark_context jc = sc._jvm.functions.regexp_extract(_to_java_column(str), pattern, idx) return Column(jc)
python
def regexp_extract(str, pattern, idx): r"""Extract a specific group matched by a Java regex, from the specified string column. If the regex did not match, or the specified group did not match, an empty string is returned. >>> df = spark.createDataFrame([('100-200',)], ['str']) >>> df.select(regexp_extract('str', r'(\d+)-(\d+)', 1).alias('d')).collect() [Row(d=u'100')] >>> df = spark.createDataFrame([('foo',)], ['str']) >>> df.select(regexp_extract('str', r'(\d+)', 1).alias('d')).collect() [Row(d=u'')] >>> df = spark.createDataFrame([('aaaac',)], ['str']) >>> df.select(regexp_extract('str', '(a+)(b)?(c)', 2).alias('d')).collect() [Row(d=u'')] """ sc = SparkContext._active_spark_context jc = sc._jvm.functions.regexp_extract(_to_java_column(str), pattern, idx) return Column(jc)
[ "def", "regexp_extract", "(", "str", ",", "pattern", ",", "idx", ")", ":", "sc", "=", "SparkContext", ".", "_active_spark_context", "jc", "=", "sc", ".", "_jvm", ".", "functions", ".", "regexp_extract", "(", "_to_java_column", "(", "str", ")", ",", "pattern", ",", "idx", ")", "return", "Column", "(", "jc", ")" ]
r"""Extract a specific group matched by a Java regex, from the specified string column. If the regex did not match, or the specified group did not match, an empty string is returned. >>> df = spark.createDataFrame([('100-200',)], ['str']) >>> df.select(regexp_extract('str', r'(\d+)-(\d+)', 1).alias('d')).collect() [Row(d=u'100')] >>> df = spark.createDataFrame([('foo',)], ['str']) >>> df.select(regexp_extract('str', r'(\d+)', 1).alias('d')).collect() [Row(d=u'')] >>> df = spark.createDataFrame([('aaaac',)], ['str']) >>> df.select(regexp_extract('str', '(a+)(b)?(c)', 2).alias('d')).collect() [Row(d=u'')]
[ "r", "Extract", "a", "specific", "group", "matched", "by", "a", "Java", "regex", "from", "the", "specified", "string", "column", ".", "If", "the", "regex", "did", "not", "match", "or", "the", "specified", "group", "did", "not", "match", "an", "empty", "string", "is", "returned", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/functions.py#L1740-L1756
19,190
apache/spark
python/pyspark/sql/functions.py
regexp_replace
def regexp_replace(str, pattern, replacement): r"""Replace all substrings of the specified string value that match regexp with rep. >>> df = spark.createDataFrame([('100-200',)], ['str']) >>> df.select(regexp_replace('str', r'(\d+)', '--').alias('d')).collect() [Row(d=u'-----')] """ sc = SparkContext._active_spark_context jc = sc._jvm.functions.regexp_replace(_to_java_column(str), pattern, replacement) return Column(jc)
python
def regexp_replace(str, pattern, replacement): r"""Replace all substrings of the specified string value that match regexp with rep. >>> df = spark.createDataFrame([('100-200',)], ['str']) >>> df.select(regexp_replace('str', r'(\d+)', '--').alias('d')).collect() [Row(d=u'-----')] """ sc = SparkContext._active_spark_context jc = sc._jvm.functions.regexp_replace(_to_java_column(str), pattern, replacement) return Column(jc)
[ "def", "regexp_replace", "(", "str", ",", "pattern", ",", "replacement", ")", ":", "sc", "=", "SparkContext", ".", "_active_spark_context", "jc", "=", "sc", ".", "_jvm", ".", "functions", ".", "regexp_replace", "(", "_to_java_column", "(", "str", ")", ",", "pattern", ",", "replacement", ")", "return", "Column", "(", "jc", ")" ]
r"""Replace all substrings of the specified string value that match regexp with rep. >>> df = spark.createDataFrame([('100-200',)], ['str']) >>> df.select(regexp_replace('str', r'(\d+)', '--').alias('d')).collect() [Row(d=u'-----')]
[ "r", "Replace", "all", "substrings", "of", "the", "specified", "string", "value", "that", "match", "regexp", "with", "rep", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/functions.py#L1761-L1770
19,191
apache/spark
python/pyspark/sql/functions.py
translate
def translate(srcCol, matching, replace): """A function translate any character in the `srcCol` by a character in `matching`. The characters in `replace` is corresponding to the characters in `matching`. The translate will happen when any character in the string matching with the character in the `matching`. >>> spark.createDataFrame([('translate',)], ['a']).select(translate('a', "rnlt", "123") \\ ... .alias('r')).collect() [Row(r=u'1a2s3ae')] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.translate(_to_java_column(srcCol), matching, replace))
python
def translate(srcCol, matching, replace): """A function translate any character in the `srcCol` by a character in `matching`. The characters in `replace` is corresponding to the characters in `matching`. The translate will happen when any character in the string matching with the character in the `matching`. >>> spark.createDataFrame([('translate',)], ['a']).select(translate('a', "rnlt", "123") \\ ... .alias('r')).collect() [Row(r=u'1a2s3ae')] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.translate(_to_java_column(srcCol), matching, replace))
[ "def", "translate", "(", "srcCol", ",", "matching", ",", "replace", ")", ":", "sc", "=", "SparkContext", ".", "_active_spark_context", "return", "Column", "(", "sc", ".", "_jvm", ".", "functions", ".", "translate", "(", "_to_java_column", "(", "srcCol", ")", ",", "matching", ",", "replace", ")", ")" ]
A function translate any character in the `srcCol` by a character in `matching`. The characters in `replace` is corresponding to the characters in `matching`. The translate will happen when any character in the string matching with the character in the `matching`. >>> spark.createDataFrame([('translate',)], ['a']).select(translate('a', "rnlt", "123") \\ ... .alias('r')).collect() [Row(r=u'1a2s3ae')]
[ "A", "function", "translate", "any", "character", "in", "the", "srcCol", "by", "a", "character", "in", "matching", ".", "The", "characters", "in", "replace", "is", "corresponding", "to", "the", "characters", "in", "matching", ".", "The", "translate", "will", "happen", "when", "any", "character", "in", "the", "string", "matching", "with", "the", "character", "in", "the", "matching", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/functions.py#L1856-L1867
19,192
apache/spark
python/pyspark/sql/functions.py
array_join
def array_join(col, delimiter, null_replacement=None): """ Concatenates the elements of `column` using the `delimiter`. Null values are replaced with `null_replacement` if set, otherwise they are ignored. >>> df = spark.createDataFrame([(["a", "b", "c"],), (["a", None],)], ['data']) >>> df.select(array_join(df.data, ",").alias("joined")).collect() [Row(joined=u'a,b,c'), Row(joined=u'a')] >>> df.select(array_join(df.data, ",", "NULL").alias("joined")).collect() [Row(joined=u'a,b,c'), Row(joined=u'a,NULL')] """ sc = SparkContext._active_spark_context if null_replacement is None: return Column(sc._jvm.functions.array_join(_to_java_column(col), delimiter)) else: return Column(sc._jvm.functions.array_join( _to_java_column(col), delimiter, null_replacement))
python
def array_join(col, delimiter, null_replacement=None): """ Concatenates the elements of `column` using the `delimiter`. Null values are replaced with `null_replacement` if set, otherwise they are ignored. >>> df = spark.createDataFrame([(["a", "b", "c"],), (["a", None],)], ['data']) >>> df.select(array_join(df.data, ",").alias("joined")).collect() [Row(joined=u'a,b,c'), Row(joined=u'a')] >>> df.select(array_join(df.data, ",", "NULL").alias("joined")).collect() [Row(joined=u'a,b,c'), Row(joined=u'a,NULL')] """ sc = SparkContext._active_spark_context if null_replacement is None: return Column(sc._jvm.functions.array_join(_to_java_column(col), delimiter)) else: return Column(sc._jvm.functions.array_join( _to_java_column(col), delimiter, null_replacement))
[ "def", "array_join", "(", "col", ",", "delimiter", ",", "null_replacement", "=", "None", ")", ":", "sc", "=", "SparkContext", ".", "_active_spark_context", "if", "null_replacement", "is", "None", ":", "return", "Column", "(", "sc", ".", "_jvm", ".", "functions", ".", "array_join", "(", "_to_java_column", "(", "col", ")", ",", "delimiter", ")", ")", "else", ":", "return", "Column", "(", "sc", ".", "_jvm", ".", "functions", ".", "array_join", "(", "_to_java_column", "(", "col", ")", ",", "delimiter", ",", "null_replacement", ")", ")" ]
Concatenates the elements of `column` using the `delimiter`. Null values are replaced with `null_replacement` if set, otherwise they are ignored. >>> df = spark.createDataFrame([(["a", "b", "c"],), (["a", None],)], ['data']) >>> df.select(array_join(df.data, ",").alias("joined")).collect() [Row(joined=u'a,b,c'), Row(joined=u'a')] >>> df.select(array_join(df.data, ",", "NULL").alias("joined")).collect() [Row(joined=u'a,b,c'), Row(joined=u'a,NULL')]
[ "Concatenates", "the", "elements", "of", "column", "using", "the", "delimiter", ".", "Null", "values", "are", "replaced", "with", "null_replacement", "if", "set", "otherwise", "they", "are", "ignored", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/functions.py#L1977-L1993
19,193
apache/spark
python/pyspark/sql/functions.py
concat
def concat(*cols): """ Concatenates multiple input columns together into a single column. The function works with strings, binary and compatible array columns. >>> df = spark.createDataFrame([('abcd','123')], ['s', 'd']) >>> df.select(concat(df.s, df.d).alias('s')).collect() [Row(s=u'abcd123')] >>> df = spark.createDataFrame([([1, 2], [3, 4], [5]), ([1, 2], None, [3])], ['a', 'b', 'c']) >>> df.select(concat(df.a, df.b, df.c).alias("arr")).collect() [Row(arr=[1, 2, 3, 4, 5]), Row(arr=None)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.concat(_to_seq(sc, cols, _to_java_column)))
python
def concat(*cols): """ Concatenates multiple input columns together into a single column. The function works with strings, binary and compatible array columns. >>> df = spark.createDataFrame([('abcd','123')], ['s', 'd']) >>> df.select(concat(df.s, df.d).alias('s')).collect() [Row(s=u'abcd123')] >>> df = spark.createDataFrame([([1, 2], [3, 4], [5]), ([1, 2], None, [3])], ['a', 'b', 'c']) >>> df.select(concat(df.a, df.b, df.c).alias("arr")).collect() [Row(arr=[1, 2, 3, 4, 5]), Row(arr=None)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.concat(_to_seq(sc, cols, _to_java_column)))
[ "def", "concat", "(", "*", "cols", ")", ":", "sc", "=", "SparkContext", ".", "_active_spark_context", "return", "Column", "(", "sc", ".", "_jvm", ".", "functions", ".", "concat", "(", "_to_seq", "(", "sc", ",", "cols", ",", "_to_java_column", ")", ")", ")" ]
Concatenates multiple input columns together into a single column. The function works with strings, binary and compatible array columns. >>> df = spark.createDataFrame([('abcd','123')], ['s', 'd']) >>> df.select(concat(df.s, df.d).alias('s')).collect() [Row(s=u'abcd123')] >>> df = spark.createDataFrame([([1, 2], [3, 4], [5]), ([1, 2], None, [3])], ['a', 'b', 'c']) >>> df.select(concat(df.a, df.b, df.c).alias("arr")).collect() [Row(arr=[1, 2, 3, 4, 5]), Row(arr=None)]
[ "Concatenates", "multiple", "input", "columns", "together", "into", "a", "single", "column", ".", "The", "function", "works", "with", "strings", "binary", "and", "compatible", "array", "columns", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/functions.py#L1998-L2012
19,194
apache/spark
python/pyspark/sql/functions.py
explode
def explode(col): """ Returns a new row for each element in the given array or map. Uses the default column name `col` for elements in the array and `key` and `value` for elements in the map unless specified otherwise. >>> from pyspark.sql import Row >>> eDF = spark.createDataFrame([Row(a=1, intlist=[1,2,3], mapfield={"a": "b"})]) >>> eDF.select(explode(eDF.intlist).alias("anInt")).collect() [Row(anInt=1), Row(anInt=2), Row(anInt=3)] >>> eDF.select(explode(eDF.mapfield).alias("key", "value")).show() +---+-----+ |key|value| +---+-----+ | a| b| +---+-----+ """ sc = SparkContext._active_spark_context jc = sc._jvm.functions.explode(_to_java_column(col)) return Column(jc)
python
def explode(col): """ Returns a new row for each element in the given array or map. Uses the default column name `col` for elements in the array and `key` and `value` for elements in the map unless specified otherwise. >>> from pyspark.sql import Row >>> eDF = spark.createDataFrame([Row(a=1, intlist=[1,2,3], mapfield={"a": "b"})]) >>> eDF.select(explode(eDF.intlist).alias("anInt")).collect() [Row(anInt=1), Row(anInt=2), Row(anInt=3)] >>> eDF.select(explode(eDF.mapfield).alias("key", "value")).show() +---+-----+ |key|value| +---+-----+ | a| b| +---+-----+ """ sc = SparkContext._active_spark_context jc = sc._jvm.functions.explode(_to_java_column(col)) return Column(jc)
[ "def", "explode", "(", "col", ")", ":", "sc", "=", "SparkContext", ".", "_active_spark_context", "jc", "=", "sc", ".", "_jvm", ".", "functions", ".", "explode", "(", "_to_java_column", "(", "col", ")", ")", "return", "Column", "(", "jc", ")" ]
Returns a new row for each element in the given array or map. Uses the default column name `col` for elements in the array and `key` and `value` for elements in the map unless specified otherwise. >>> from pyspark.sql import Row >>> eDF = spark.createDataFrame([Row(a=1, intlist=[1,2,3], mapfield={"a": "b"})]) >>> eDF.select(explode(eDF.intlist).alias("anInt")).collect() [Row(anInt=1), Row(anInt=2), Row(anInt=3)] >>> eDF.select(explode(eDF.mapfield).alias("key", "value")).show() +---+-----+ |key|value| +---+-----+ | a| b| +---+-----+
[ "Returns", "a", "new", "row", "for", "each", "element", "in", "the", "given", "array", "or", "map", ".", "Uses", "the", "default", "column", "name", "col", "for", "elements", "in", "the", "array", "and", "key", "and", "value", "for", "elements", "in", "the", "map", "unless", "specified", "otherwise", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/functions.py#L2144-L2164
19,195
apache/spark
python/pyspark/sql/functions.py
get_json_object
def get_json_object(col, path): """ Extracts json object from a json string based on json path specified, and returns json string of the extracted json object. It will return null if the input json string is invalid. :param col: string column in json format :param path: path to the json object to extract >>> data = [("1", '''{"f1": "value1", "f2": "value2"}'''), ("2", '''{"f1": "value12"}''')] >>> df = spark.createDataFrame(data, ("key", "jstring")) >>> df.select(df.key, get_json_object(df.jstring, '$.f1').alias("c0"), \\ ... get_json_object(df.jstring, '$.f2').alias("c1") ).collect() [Row(key=u'1', c0=u'value1', c1=u'value2'), Row(key=u'2', c0=u'value12', c1=None)] """ sc = SparkContext._active_spark_context jc = sc._jvm.functions.get_json_object(_to_java_column(col), path) return Column(jc)
python
def get_json_object(col, path): """ Extracts json object from a json string based on json path specified, and returns json string of the extracted json object. It will return null if the input json string is invalid. :param col: string column in json format :param path: path to the json object to extract >>> data = [("1", '''{"f1": "value1", "f2": "value2"}'''), ("2", '''{"f1": "value12"}''')] >>> df = spark.createDataFrame(data, ("key", "jstring")) >>> df.select(df.key, get_json_object(df.jstring, '$.f1').alias("c0"), \\ ... get_json_object(df.jstring, '$.f2').alias("c1") ).collect() [Row(key=u'1', c0=u'value1', c1=u'value2'), Row(key=u'2', c0=u'value12', c1=None)] """ sc = SparkContext._active_spark_context jc = sc._jvm.functions.get_json_object(_to_java_column(col), path) return Column(jc)
[ "def", "get_json_object", "(", "col", ",", "path", ")", ":", "sc", "=", "SparkContext", ".", "_active_spark_context", "jc", "=", "sc", ".", "_jvm", ".", "functions", ".", "get_json_object", "(", "_to_java_column", "(", "col", ")", ",", "path", ")", "return", "Column", "(", "jc", ")" ]
Extracts json object from a json string based on json path specified, and returns json string of the extracted json object. It will return null if the input json string is invalid. :param col: string column in json format :param path: path to the json object to extract >>> data = [("1", '''{"f1": "value1", "f2": "value2"}'''), ("2", '''{"f1": "value12"}''')] >>> df = spark.createDataFrame(data, ("key", "jstring")) >>> df.select(df.key, get_json_object(df.jstring, '$.f1').alias("c0"), \\ ... get_json_object(df.jstring, '$.f2').alias("c1") ).collect() [Row(key=u'1', c0=u'value1', c1=u'value2'), Row(key=u'2', c0=u'value12', c1=None)]
[ "Extracts", "json", "object", "from", "a", "json", "string", "based", "on", "json", "path", "specified", "and", "returns", "json", "string", "of", "the", "extracted", "json", "object", ".", "It", "will", "return", "null", "if", "the", "input", "json", "string", "is", "invalid", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/functions.py#L2264-L2280
19,196
apache/spark
python/pyspark/sql/functions.py
json_tuple
def json_tuple(col, *fields): """Creates a new row for a json column according to the given field names. :param col: string column in json format :param fields: list of fields to extract >>> data = [("1", '''{"f1": "value1", "f2": "value2"}'''), ("2", '''{"f1": "value12"}''')] >>> df = spark.createDataFrame(data, ("key", "jstring")) >>> df.select(df.key, json_tuple(df.jstring, 'f1', 'f2')).collect() [Row(key=u'1', c0=u'value1', c1=u'value2'), Row(key=u'2', c0=u'value12', c1=None)] """ sc = SparkContext._active_spark_context jc = sc._jvm.functions.json_tuple(_to_java_column(col), _to_seq(sc, fields)) return Column(jc)
python
def json_tuple(col, *fields): """Creates a new row for a json column according to the given field names. :param col: string column in json format :param fields: list of fields to extract >>> data = [("1", '''{"f1": "value1", "f2": "value2"}'''), ("2", '''{"f1": "value12"}''')] >>> df = spark.createDataFrame(data, ("key", "jstring")) >>> df.select(df.key, json_tuple(df.jstring, 'f1', 'f2')).collect() [Row(key=u'1', c0=u'value1', c1=u'value2'), Row(key=u'2', c0=u'value12', c1=None)] """ sc = SparkContext._active_spark_context jc = sc._jvm.functions.json_tuple(_to_java_column(col), _to_seq(sc, fields)) return Column(jc)
[ "def", "json_tuple", "(", "col", ",", "*", "fields", ")", ":", "sc", "=", "SparkContext", ".", "_active_spark_context", "jc", "=", "sc", ".", "_jvm", ".", "functions", ".", "json_tuple", "(", "_to_java_column", "(", "col", ")", ",", "_to_seq", "(", "sc", ",", "fields", ")", ")", "return", "Column", "(", "jc", ")" ]
Creates a new row for a json column according to the given field names. :param col: string column in json format :param fields: list of fields to extract >>> data = [("1", '''{"f1": "value1", "f2": "value2"}'''), ("2", '''{"f1": "value12"}''')] >>> df = spark.createDataFrame(data, ("key", "jstring")) >>> df.select(df.key, json_tuple(df.jstring, 'f1', 'f2')).collect() [Row(key=u'1', c0=u'value1', c1=u'value2'), Row(key=u'2', c0=u'value12', c1=None)]
[ "Creates", "a", "new", "row", "for", "a", "json", "column", "according", "to", "the", "given", "field", "names", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/functions.py#L2285-L2298
19,197
apache/spark
python/pyspark/sql/functions.py
schema_of_json
def schema_of_json(json, options={}): """ Parses a JSON string and infers its schema in DDL format. :param json: a JSON string or a string literal containing a JSON string. :param options: options to control parsing. accepts the same options as the JSON datasource .. versionchanged:: 3.0 It accepts `options` parameter to control schema inferring. >>> df = spark.range(1) >>> df.select(schema_of_json(lit('{"a": 0}')).alias("json")).collect() [Row(json=u'struct<a:bigint>')] >>> schema = schema_of_json('{a: 1}', {'allowUnquotedFieldNames':'true'}) >>> df.select(schema.alias("json")).collect() [Row(json=u'struct<a:bigint>')] """ if isinstance(json, basestring): col = _create_column_from_literal(json) elif isinstance(json, Column): col = _to_java_column(json) else: raise TypeError("schema argument should be a column or string") sc = SparkContext._active_spark_context jc = sc._jvm.functions.schema_of_json(col, options) return Column(jc)
python
def schema_of_json(json, options={}): """ Parses a JSON string and infers its schema in DDL format. :param json: a JSON string or a string literal containing a JSON string. :param options: options to control parsing. accepts the same options as the JSON datasource .. versionchanged:: 3.0 It accepts `options` parameter to control schema inferring. >>> df = spark.range(1) >>> df.select(schema_of_json(lit('{"a": 0}')).alias("json")).collect() [Row(json=u'struct<a:bigint>')] >>> schema = schema_of_json('{a: 1}', {'allowUnquotedFieldNames':'true'}) >>> df.select(schema.alias("json")).collect() [Row(json=u'struct<a:bigint>')] """ if isinstance(json, basestring): col = _create_column_from_literal(json) elif isinstance(json, Column): col = _to_java_column(json) else: raise TypeError("schema argument should be a column or string") sc = SparkContext._active_spark_context jc = sc._jvm.functions.schema_of_json(col, options) return Column(jc)
[ "def", "schema_of_json", "(", "json", ",", "options", "=", "{", "}", ")", ":", "if", "isinstance", "(", "json", ",", "basestring", ")", ":", "col", "=", "_create_column_from_literal", "(", "json", ")", "elif", "isinstance", "(", "json", ",", "Column", ")", ":", "col", "=", "_to_java_column", "(", "json", ")", "else", ":", "raise", "TypeError", "(", "\"schema argument should be a column or string\"", ")", "sc", "=", "SparkContext", ".", "_active_spark_context", "jc", "=", "sc", ".", "_jvm", ".", "functions", ".", "schema_of_json", "(", "col", ",", "options", ")", "return", "Column", "(", "jc", ")" ]
Parses a JSON string and infers its schema in DDL format. :param json: a JSON string or a string literal containing a JSON string. :param options: options to control parsing. accepts the same options as the JSON datasource .. versionchanged:: 3.0 It accepts `options` parameter to control schema inferring. >>> df = spark.range(1) >>> df.select(schema_of_json(lit('{"a": 0}')).alias("json")).collect() [Row(json=u'struct<a:bigint>')] >>> schema = schema_of_json('{a: 1}', {'allowUnquotedFieldNames':'true'}) >>> df.select(schema.alias("json")).collect() [Row(json=u'struct<a:bigint>')]
[ "Parses", "a", "JSON", "string", "and", "infers", "its", "schema", "in", "DDL", "format", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/functions.py#L2393-L2419
19,198
apache/spark
python/pyspark/sql/functions.py
schema_of_csv
def schema_of_csv(csv, options={}): """ Parses a CSV string and infers its schema in DDL format. :param col: a CSV string or a string literal containing a CSV string. :param options: options to control parsing. accepts the same options as the CSV datasource >>> df = spark.range(1) >>> df.select(schema_of_csv(lit('1|a'), {'sep':'|'}).alias("csv")).collect() [Row(csv=u'struct<_c0:int,_c1:string>')] >>> df.select(schema_of_csv('1|a', {'sep':'|'}).alias("csv")).collect() [Row(csv=u'struct<_c0:int,_c1:string>')] """ if isinstance(csv, basestring): col = _create_column_from_literal(csv) elif isinstance(csv, Column): col = _to_java_column(csv) else: raise TypeError("schema argument should be a column or string") sc = SparkContext._active_spark_context jc = sc._jvm.functions.schema_of_csv(col, options) return Column(jc)
python
def schema_of_csv(csv, options={}): """ Parses a CSV string and infers its schema in DDL format. :param col: a CSV string or a string literal containing a CSV string. :param options: options to control parsing. accepts the same options as the CSV datasource >>> df = spark.range(1) >>> df.select(schema_of_csv(lit('1|a'), {'sep':'|'}).alias("csv")).collect() [Row(csv=u'struct<_c0:int,_c1:string>')] >>> df.select(schema_of_csv('1|a', {'sep':'|'}).alias("csv")).collect() [Row(csv=u'struct<_c0:int,_c1:string>')] """ if isinstance(csv, basestring): col = _create_column_from_literal(csv) elif isinstance(csv, Column): col = _to_java_column(csv) else: raise TypeError("schema argument should be a column or string") sc = SparkContext._active_spark_context jc = sc._jvm.functions.schema_of_csv(col, options) return Column(jc)
[ "def", "schema_of_csv", "(", "csv", ",", "options", "=", "{", "}", ")", ":", "if", "isinstance", "(", "csv", ",", "basestring", ")", ":", "col", "=", "_create_column_from_literal", "(", "csv", ")", "elif", "isinstance", "(", "csv", ",", "Column", ")", ":", "col", "=", "_to_java_column", "(", "csv", ")", "else", ":", "raise", "TypeError", "(", "\"schema argument should be a column or string\"", ")", "sc", "=", "SparkContext", ".", "_active_spark_context", "jc", "=", "sc", ".", "_jvm", ".", "functions", ".", "schema_of_csv", "(", "col", ",", "options", ")", "return", "Column", "(", "jc", ")" ]
Parses a CSV string and infers its schema in DDL format. :param col: a CSV string or a string literal containing a CSV string. :param options: options to control parsing. accepts the same options as the CSV datasource >>> df = spark.range(1) >>> df.select(schema_of_csv(lit('1|a'), {'sep':'|'}).alias("csv")).collect() [Row(csv=u'struct<_c0:int,_c1:string>')] >>> df.select(schema_of_csv('1|a', {'sep':'|'}).alias("csv")).collect() [Row(csv=u'struct<_c0:int,_c1:string>')]
[ "Parses", "a", "CSV", "string", "and", "infers", "its", "schema", "in", "DDL", "format", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/functions.py#L2424-L2446
19,199
apache/spark
python/pyspark/sql/functions.py
map_concat
def map_concat(*cols): """Returns the union of all the given maps. :param cols: list of column names (string) or list of :class:`Column` expressions >>> from pyspark.sql.functions import map_concat >>> df = spark.sql("SELECT map(1, 'a', 2, 'b') as map1, map(3, 'c', 1, 'd') as map2") >>> df.select(map_concat("map1", "map2").alias("map3")).show(truncate=False) +------------------------+ |map3 | +------------------------+ |[1 -> d, 2 -> b, 3 -> c]| +------------------------+ """ sc = SparkContext._active_spark_context if len(cols) == 1 and isinstance(cols[0], (list, set)): cols = cols[0] jc = sc._jvm.functions.map_concat(_to_seq(sc, cols, _to_java_column)) return Column(jc)
python
def map_concat(*cols): """Returns the union of all the given maps. :param cols: list of column names (string) or list of :class:`Column` expressions >>> from pyspark.sql.functions import map_concat >>> df = spark.sql("SELECT map(1, 'a', 2, 'b') as map1, map(3, 'c', 1, 'd') as map2") >>> df.select(map_concat("map1", "map2").alias("map3")).show(truncate=False) +------------------------+ |map3 | +------------------------+ |[1 -> d, 2 -> b, 3 -> c]| +------------------------+ """ sc = SparkContext._active_spark_context if len(cols) == 1 and isinstance(cols[0], (list, set)): cols = cols[0] jc = sc._jvm.functions.map_concat(_to_seq(sc, cols, _to_java_column)) return Column(jc)
[ "def", "map_concat", "(", "*", "cols", ")", ":", "sc", "=", "SparkContext", ".", "_active_spark_context", "if", "len", "(", "cols", ")", "==", "1", "and", "isinstance", "(", "cols", "[", "0", "]", ",", "(", "list", ",", "set", ")", ")", ":", "cols", "=", "cols", "[", "0", "]", "jc", "=", "sc", ".", "_jvm", ".", "functions", ".", "map_concat", "(", "_to_seq", "(", "sc", ",", "cols", ",", "_to_java_column", ")", ")", "return", "Column", "(", "jc", ")" ]
Returns the union of all the given maps. :param cols: list of column names (string) or list of :class:`Column` expressions >>> from pyspark.sql.functions import map_concat >>> df = spark.sql("SELECT map(1, 'a', 2, 'b') as map1, map(3, 'c', 1, 'd') as map2") >>> df.select(map_concat("map1", "map2").alias("map3")).show(truncate=False) +------------------------+ |map3 | +------------------------+ |[1 -> d, 2 -> b, 3 -> c]| +------------------------+
[ "Returns", "the", "union", "of", "all", "the", "given", "maps", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/functions.py#L2717-L2735