id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
19,000
apache/spark
python/pyspark/rdd.py
RDD._computeFractionForSampleSize
def _computeFractionForSampleSize(sampleSizeLowerBound, total, withReplacement): """ Returns a sampling rate that guarantees a sample of size >= sampleSizeLowerBound 99.99% of the time. How the sampling rate is determined: Let p = num / total, where num is the sample size and total is the total number of data points in the RDD. We're trying to compute q > p such that - when sampling with replacement, we're drawing each data point with prob_i ~ Pois(q), where we want to guarantee Pr[s < num] < 0.0001 for s = sum(prob_i for i from 0 to total), i.e. the failure rate of not having a sufficiently large sample < 0.0001. Setting q = p + 5 * sqrt(p/total) is sufficient to guarantee 0.9999 success rate for num > 12, but we need a slightly larger q (9 empirically determined). - when sampling without replacement, we're drawing each data point with prob_i ~ Binomial(total, fraction) and our choice of q guarantees 1-delta, or 0.9999 success rate, where success rate is defined the same as in sampling with replacement. """ fraction = float(sampleSizeLowerBound) / total if withReplacement: numStDev = 5 if (sampleSizeLowerBound < 12): numStDev = 9 return fraction + numStDev * sqrt(fraction / total) else: delta = 0.00005 gamma = - log(delta) / total return min(1, fraction + gamma + sqrt(gamma * gamma + 2 * gamma * fraction))
python
def _computeFractionForSampleSize(sampleSizeLowerBound, total, withReplacement): """ Returns a sampling rate that guarantees a sample of size >= sampleSizeLowerBound 99.99% of the time. How the sampling rate is determined: Let p = num / total, where num is the sample size and total is the total number of data points in the RDD. We're trying to compute q > p such that - when sampling with replacement, we're drawing each data point with prob_i ~ Pois(q), where we want to guarantee Pr[s < num] < 0.0001 for s = sum(prob_i for i from 0 to total), i.e. the failure rate of not having a sufficiently large sample < 0.0001. Setting q = p + 5 * sqrt(p/total) is sufficient to guarantee 0.9999 success rate for num > 12, but we need a slightly larger q (9 empirically determined). - when sampling without replacement, we're drawing each data point with prob_i ~ Binomial(total, fraction) and our choice of q guarantees 1-delta, or 0.9999 success rate, where success rate is defined the same as in sampling with replacement. """ fraction = float(sampleSizeLowerBound) / total if withReplacement: numStDev = 5 if (sampleSizeLowerBound < 12): numStDev = 9 return fraction + numStDev * sqrt(fraction / total) else: delta = 0.00005 gamma = - log(delta) / total return min(1, fraction + gamma + sqrt(gamma * gamma + 2 * gamma * fraction))
[ "def", "_computeFractionForSampleSize", "(", "sampleSizeLowerBound", ",", "total", ",", "withReplacement", ")", ":", "fraction", "=", "float", "(", "sampleSizeLowerBound", ")", "/", "total", "if", "withReplacement", ":", "numStDev", "=", "5", "if", "(", "sampleSizeLowerBound", "<", "12", ")", ":", "numStDev", "=", "9", "return", "fraction", "+", "numStDev", "*", "sqrt", "(", "fraction", "/", "total", ")", "else", ":", "delta", "=", "0.00005", "gamma", "=", "-", "log", "(", "delta", ")", "/", "total", "return", "min", "(", "1", ",", "fraction", "+", "gamma", "+", "sqrt", "(", "gamma", "*", "gamma", "+", "2", "*", "gamma", "*", "fraction", ")", ")" ]
Returns a sampling rate that guarantees a sample of size >= sampleSizeLowerBound 99.99% of the time. How the sampling rate is determined: Let p = num / total, where num is the sample size and total is the total number of data points in the RDD. We're trying to compute q > p such that - when sampling with replacement, we're drawing each data point with prob_i ~ Pois(q), where we want to guarantee Pr[s < num] < 0.0001 for s = sum(prob_i for i from 0 to total), i.e. the failure rate of not having a sufficiently large sample < 0.0001. Setting q = p + 5 * sqrt(p/total) is sufficient to guarantee 0.9999 success rate for num > 12, but we need a slightly larger q (9 empirically determined). - when sampling without replacement, we're drawing each data point with prob_i ~ Binomial(total, fraction) and our choice of q guarantees 1-delta, or 0.9999 success rate, where success rate is defined the same as in sampling with replacement.
[ "Returns", "a", "sampling", "rate", "that", "guarantees", "a", "sample", "of", "size", ">", "=", "sampleSizeLowerBound", "99", ".", "99%", "of", "the", "time", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/rdd.py#L521-L551
19,001
apache/spark
python/pyspark/rdd.py
RDD.union
def union(self, other): """ Return the union of this RDD and another one. >>> rdd = sc.parallelize([1, 1, 2, 3]) >>> rdd.union(rdd).collect() [1, 1, 2, 3, 1, 1, 2, 3] """ if self._jrdd_deserializer == other._jrdd_deserializer: rdd = RDD(self._jrdd.union(other._jrdd), self.ctx, self._jrdd_deserializer) else: # These RDDs contain data in different serialized formats, so we # must normalize them to the default serializer. self_copy = self._reserialize() other_copy = other._reserialize() rdd = RDD(self_copy._jrdd.union(other_copy._jrdd), self.ctx, self.ctx.serializer) if (self.partitioner == other.partitioner and self.getNumPartitions() == rdd.getNumPartitions()): rdd.partitioner = self.partitioner return rdd
python
def union(self, other): """ Return the union of this RDD and another one. >>> rdd = sc.parallelize([1, 1, 2, 3]) >>> rdd.union(rdd).collect() [1, 1, 2, 3, 1, 1, 2, 3] """ if self._jrdd_deserializer == other._jrdd_deserializer: rdd = RDD(self._jrdd.union(other._jrdd), self.ctx, self._jrdd_deserializer) else: # These RDDs contain data in different serialized formats, so we # must normalize them to the default serializer. self_copy = self._reserialize() other_copy = other._reserialize() rdd = RDD(self_copy._jrdd.union(other_copy._jrdd), self.ctx, self.ctx.serializer) if (self.partitioner == other.partitioner and self.getNumPartitions() == rdd.getNumPartitions()): rdd.partitioner = self.partitioner return rdd
[ "def", "union", "(", "self", ",", "other", ")", ":", "if", "self", ".", "_jrdd_deserializer", "==", "other", ".", "_jrdd_deserializer", ":", "rdd", "=", "RDD", "(", "self", ".", "_jrdd", ".", "union", "(", "other", ".", "_jrdd", ")", ",", "self", ".", "ctx", ",", "self", ".", "_jrdd_deserializer", ")", "else", ":", "# These RDDs contain data in different serialized formats, so we", "# must normalize them to the default serializer.", "self_copy", "=", "self", ".", "_reserialize", "(", ")", "other_copy", "=", "other", ".", "_reserialize", "(", ")", "rdd", "=", "RDD", "(", "self_copy", ".", "_jrdd", ".", "union", "(", "other_copy", ".", "_jrdd", ")", ",", "self", ".", "ctx", ",", "self", ".", "ctx", ".", "serializer", ")", "if", "(", "self", ".", "partitioner", "==", "other", ".", "partitioner", "and", "self", ".", "getNumPartitions", "(", ")", "==", "rdd", ".", "getNumPartitions", "(", ")", ")", ":", "rdd", ".", "partitioner", "=", "self", ".", "partitioner", "return", "rdd" ]
Return the union of this RDD and another one. >>> rdd = sc.parallelize([1, 1, 2, 3]) >>> rdd.union(rdd).collect() [1, 1, 2, 3, 1, 1, 2, 3]
[ "Return", "the", "union", "of", "this", "RDD", "and", "another", "one", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/rdd.py#L553-L574
19,002
apache/spark
python/pyspark/rdd.py
RDD.intersection
def intersection(self, other): """ Return the intersection of this RDD and another one. The output will not contain any duplicate elements, even if the input RDDs did. .. note:: This method performs a shuffle internally. >>> rdd1 = sc.parallelize([1, 10, 2, 3, 4, 5]) >>> rdd2 = sc.parallelize([1, 6, 2, 3, 7, 8]) >>> rdd1.intersection(rdd2).collect() [1, 2, 3] """ return self.map(lambda v: (v, None)) \ .cogroup(other.map(lambda v: (v, None))) \ .filter(lambda k_vs: all(k_vs[1])) \ .keys()
python
def intersection(self, other): """ Return the intersection of this RDD and another one. The output will not contain any duplicate elements, even if the input RDDs did. .. note:: This method performs a shuffle internally. >>> rdd1 = sc.parallelize([1, 10, 2, 3, 4, 5]) >>> rdd2 = sc.parallelize([1, 6, 2, 3, 7, 8]) >>> rdd1.intersection(rdd2).collect() [1, 2, 3] """ return self.map(lambda v: (v, None)) \ .cogroup(other.map(lambda v: (v, None))) \ .filter(lambda k_vs: all(k_vs[1])) \ .keys()
[ "def", "intersection", "(", "self", ",", "other", ")", ":", "return", "self", ".", "map", "(", "lambda", "v", ":", "(", "v", ",", "None", ")", ")", ".", "cogroup", "(", "other", ".", "map", "(", "lambda", "v", ":", "(", "v", ",", "None", ")", ")", ")", ".", "filter", "(", "lambda", "k_vs", ":", "all", "(", "k_vs", "[", "1", "]", ")", ")", ".", "keys", "(", ")" ]
Return the intersection of this RDD and another one. The output will not contain any duplicate elements, even if the input RDDs did. .. note:: This method performs a shuffle internally. >>> rdd1 = sc.parallelize([1, 10, 2, 3, 4, 5]) >>> rdd2 = sc.parallelize([1, 6, 2, 3, 7, 8]) >>> rdd1.intersection(rdd2).collect() [1, 2, 3]
[ "Return", "the", "intersection", "of", "this", "RDD", "and", "another", "one", ".", "The", "output", "will", "not", "contain", "any", "duplicate", "elements", "even", "if", "the", "input", "RDDs", "did", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/rdd.py#L576-L591
19,003
apache/spark
python/pyspark/rdd.py
RDD.repartitionAndSortWithinPartitions
def repartitionAndSortWithinPartitions(self, numPartitions=None, partitionFunc=portable_hash, ascending=True, keyfunc=lambda x: x): """ Repartition the RDD according to the given partitioner and, within each resulting partition, sort records by their keys. >>> rdd = sc.parallelize([(0, 5), (3, 8), (2, 6), (0, 8), (3, 8), (1, 3)]) >>> rdd2 = rdd.repartitionAndSortWithinPartitions(2, lambda x: x % 2, True) >>> rdd2.glom().collect() [[(0, 5), (0, 8), (2, 6)], [(1, 3), (3, 8), (3, 8)]] """ if numPartitions is None: numPartitions = self._defaultReducePartitions() memory = _parse_memory(self.ctx._conf.get("spark.python.worker.memory", "512m")) serializer = self._jrdd_deserializer def sortPartition(iterator): sort = ExternalSorter(memory * 0.9, serializer).sorted return iter(sort(iterator, key=lambda k_v: keyfunc(k_v[0]), reverse=(not ascending))) return self.partitionBy(numPartitions, partitionFunc).mapPartitions(sortPartition, True)
python
def repartitionAndSortWithinPartitions(self, numPartitions=None, partitionFunc=portable_hash, ascending=True, keyfunc=lambda x: x): """ Repartition the RDD according to the given partitioner and, within each resulting partition, sort records by their keys. >>> rdd = sc.parallelize([(0, 5), (3, 8), (2, 6), (0, 8), (3, 8), (1, 3)]) >>> rdd2 = rdd.repartitionAndSortWithinPartitions(2, lambda x: x % 2, True) >>> rdd2.glom().collect() [[(0, 5), (0, 8), (2, 6)], [(1, 3), (3, 8), (3, 8)]] """ if numPartitions is None: numPartitions = self._defaultReducePartitions() memory = _parse_memory(self.ctx._conf.get("spark.python.worker.memory", "512m")) serializer = self._jrdd_deserializer def sortPartition(iterator): sort = ExternalSorter(memory * 0.9, serializer).sorted return iter(sort(iterator, key=lambda k_v: keyfunc(k_v[0]), reverse=(not ascending))) return self.partitionBy(numPartitions, partitionFunc).mapPartitions(sortPartition, True)
[ "def", "repartitionAndSortWithinPartitions", "(", "self", ",", "numPartitions", "=", "None", ",", "partitionFunc", "=", "portable_hash", ",", "ascending", "=", "True", ",", "keyfunc", "=", "lambda", "x", ":", "x", ")", ":", "if", "numPartitions", "is", "None", ":", "numPartitions", "=", "self", ".", "_defaultReducePartitions", "(", ")", "memory", "=", "_parse_memory", "(", "self", ".", "ctx", ".", "_conf", ".", "get", "(", "\"spark.python.worker.memory\"", ",", "\"512m\"", ")", ")", "serializer", "=", "self", ".", "_jrdd_deserializer", "def", "sortPartition", "(", "iterator", ")", ":", "sort", "=", "ExternalSorter", "(", "memory", "*", "0.9", ",", "serializer", ")", ".", "sorted", "return", "iter", "(", "sort", "(", "iterator", ",", "key", "=", "lambda", "k_v", ":", "keyfunc", "(", "k_v", "[", "0", "]", ")", ",", "reverse", "=", "(", "not", "ascending", ")", ")", ")", "return", "self", ".", "partitionBy", "(", "numPartitions", ",", "partitionFunc", ")", ".", "mapPartitions", "(", "sortPartition", ",", "True", ")" ]
Repartition the RDD according to the given partitioner and, within each resulting partition, sort records by their keys. >>> rdd = sc.parallelize([(0, 5), (3, 8), (2, 6), (0, 8), (3, 8), (1, 3)]) >>> rdd2 = rdd.repartitionAndSortWithinPartitions(2, lambda x: x % 2, True) >>> rdd2.glom().collect() [[(0, 5), (0, 8), (2, 6)], [(1, 3), (3, 8), (3, 8)]]
[ "Repartition", "the", "RDD", "according", "to", "the", "given", "partitioner", "and", "within", "each", "resulting", "partition", "sort", "records", "by", "their", "keys", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/rdd.py#L612-L633
19,004
apache/spark
python/pyspark/rdd.py
RDD.sortBy
def sortBy(self, keyfunc, ascending=True, numPartitions=None): """ Sorts this RDD by the given keyfunc >>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)] >>> sc.parallelize(tmp).sortBy(lambda x: x[0]).collect() [('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)] >>> sc.parallelize(tmp).sortBy(lambda x: x[1]).collect() [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)] """ return self.keyBy(keyfunc).sortByKey(ascending, numPartitions).values()
python
def sortBy(self, keyfunc, ascending=True, numPartitions=None): """ Sorts this RDD by the given keyfunc >>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)] >>> sc.parallelize(tmp).sortBy(lambda x: x[0]).collect() [('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)] >>> sc.parallelize(tmp).sortBy(lambda x: x[1]).collect() [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)] """ return self.keyBy(keyfunc).sortByKey(ascending, numPartitions).values()
[ "def", "sortBy", "(", "self", ",", "keyfunc", ",", "ascending", "=", "True", ",", "numPartitions", "=", "None", ")", ":", "return", "self", ".", "keyBy", "(", "keyfunc", ")", ".", "sortByKey", "(", "ascending", ",", "numPartitions", ")", ".", "values", "(", ")" ]
Sorts this RDD by the given keyfunc >>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)] >>> sc.parallelize(tmp).sortBy(lambda x: x[0]).collect() [('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)] >>> sc.parallelize(tmp).sortBy(lambda x: x[1]).collect() [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
[ "Sorts", "this", "RDD", "by", "the", "given", "keyfunc" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/rdd.py#L691-L701
19,005
apache/spark
python/pyspark/rdd.py
RDD.groupBy
def groupBy(self, f, numPartitions=None, partitionFunc=portable_hash): """ Return an RDD of grouped items. >>> rdd = sc.parallelize([1, 1, 2, 3, 5, 8]) >>> result = rdd.groupBy(lambda x: x % 2).collect() >>> sorted([(x, sorted(y)) for (x, y) in result]) [(0, [2, 8]), (1, [1, 1, 3, 5])] """ return self.map(lambda x: (f(x), x)).groupByKey(numPartitions, partitionFunc)
python
def groupBy(self, f, numPartitions=None, partitionFunc=portable_hash): """ Return an RDD of grouped items. >>> rdd = sc.parallelize([1, 1, 2, 3, 5, 8]) >>> result = rdd.groupBy(lambda x: x % 2).collect() >>> sorted([(x, sorted(y)) for (x, y) in result]) [(0, [2, 8]), (1, [1, 1, 3, 5])] """ return self.map(lambda x: (f(x), x)).groupByKey(numPartitions, partitionFunc)
[ "def", "groupBy", "(", "self", ",", "f", ",", "numPartitions", "=", "None", ",", "partitionFunc", "=", "portable_hash", ")", ":", "return", "self", ".", "map", "(", "lambda", "x", ":", "(", "f", "(", "x", ")", ",", "x", ")", ")", ".", "groupByKey", "(", "numPartitions", ",", "partitionFunc", ")" ]
Return an RDD of grouped items. >>> rdd = sc.parallelize([1, 1, 2, 3, 5, 8]) >>> result = rdd.groupBy(lambda x: x % 2).collect() >>> sorted([(x, sorted(y)) for (x, y) in result]) [(0, [2, 8]), (1, [1, 1, 3, 5])]
[ "Return", "an", "RDD", "of", "grouped", "items", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/rdd.py#L731-L740
19,006
apache/spark
python/pyspark/rdd.py
RDD.pipe
def pipe(self, command, env=None, checkCode=False): """ Return an RDD created by piping elements to a forked external process. >>> sc.parallelize(['1', '2', '', '3']).pipe('cat').collect() [u'1', u'2', u'', u'3'] :param checkCode: whether or not to check the return value of the shell command. """ if env is None: env = dict() def func(iterator): pipe = Popen( shlex.split(command), env=env, stdin=PIPE, stdout=PIPE) def pipe_objs(out): for obj in iterator: s = unicode(obj).rstrip('\n') + '\n' out.write(s.encode('utf-8')) out.close() Thread(target=pipe_objs, args=[pipe.stdin]).start() def check_return_code(): pipe.wait() if checkCode and pipe.returncode: raise Exception("Pipe function `%s' exited " "with error code %d" % (command, pipe.returncode)) else: for i in range(0): yield i return (x.rstrip(b'\n').decode('utf-8') for x in chain(iter(pipe.stdout.readline, b''), check_return_code())) return self.mapPartitions(func)
python
def pipe(self, command, env=None, checkCode=False): """ Return an RDD created by piping elements to a forked external process. >>> sc.parallelize(['1', '2', '', '3']).pipe('cat').collect() [u'1', u'2', u'', u'3'] :param checkCode: whether or not to check the return value of the shell command. """ if env is None: env = dict() def func(iterator): pipe = Popen( shlex.split(command), env=env, stdin=PIPE, stdout=PIPE) def pipe_objs(out): for obj in iterator: s = unicode(obj).rstrip('\n') + '\n' out.write(s.encode('utf-8')) out.close() Thread(target=pipe_objs, args=[pipe.stdin]).start() def check_return_code(): pipe.wait() if checkCode and pipe.returncode: raise Exception("Pipe function `%s' exited " "with error code %d" % (command, pipe.returncode)) else: for i in range(0): yield i return (x.rstrip(b'\n').decode('utf-8') for x in chain(iter(pipe.stdout.readline, b''), check_return_code())) return self.mapPartitions(func)
[ "def", "pipe", "(", "self", ",", "command", ",", "env", "=", "None", ",", "checkCode", "=", "False", ")", ":", "if", "env", "is", "None", ":", "env", "=", "dict", "(", ")", "def", "func", "(", "iterator", ")", ":", "pipe", "=", "Popen", "(", "shlex", ".", "split", "(", "command", ")", ",", "env", "=", "env", ",", "stdin", "=", "PIPE", ",", "stdout", "=", "PIPE", ")", "def", "pipe_objs", "(", "out", ")", ":", "for", "obj", "in", "iterator", ":", "s", "=", "unicode", "(", "obj", ")", ".", "rstrip", "(", "'\\n'", ")", "+", "'\\n'", "out", ".", "write", "(", "s", ".", "encode", "(", "'utf-8'", ")", ")", "out", ".", "close", "(", ")", "Thread", "(", "target", "=", "pipe_objs", ",", "args", "=", "[", "pipe", ".", "stdin", "]", ")", ".", "start", "(", ")", "def", "check_return_code", "(", ")", ":", "pipe", ".", "wait", "(", ")", "if", "checkCode", "and", "pipe", ".", "returncode", ":", "raise", "Exception", "(", "\"Pipe function `%s' exited \"", "\"with error code %d\"", "%", "(", "command", ",", "pipe", ".", "returncode", ")", ")", "else", ":", "for", "i", "in", "range", "(", "0", ")", ":", "yield", "i", "return", "(", "x", ".", "rstrip", "(", "b'\\n'", ")", ".", "decode", "(", "'utf-8'", ")", "for", "x", "in", "chain", "(", "iter", "(", "pipe", ".", "stdout", ".", "readline", ",", "b''", ")", ",", "check_return_code", "(", ")", ")", ")", "return", "self", ".", "mapPartitions", "(", "func", ")" ]
Return an RDD created by piping elements to a forked external process. >>> sc.parallelize(['1', '2', '', '3']).pipe('cat').collect() [u'1', u'2', u'', u'3'] :param checkCode: whether or not to check the return value of the shell command.
[ "Return", "an", "RDD", "created", "by", "piping", "elements", "to", "a", "forked", "external", "process", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/rdd.py#L743-L776
19,007
apache/spark
python/pyspark/rdd.py
RDD.foreach
def foreach(self, f): """ Applies a function to all elements of this RDD. >>> def f(x): print(x) >>> sc.parallelize([1, 2, 3, 4, 5]).foreach(f) """ f = fail_on_stopiteration(f) def processPartition(iterator): for x in iterator: f(x) return iter([]) self.mapPartitions(processPartition).count()
python
def foreach(self, f): """ Applies a function to all elements of this RDD. >>> def f(x): print(x) >>> sc.parallelize([1, 2, 3, 4, 5]).foreach(f) """ f = fail_on_stopiteration(f) def processPartition(iterator): for x in iterator: f(x) return iter([]) self.mapPartitions(processPartition).count()
[ "def", "foreach", "(", "self", ",", "f", ")", ":", "f", "=", "fail_on_stopiteration", "(", "f", ")", "def", "processPartition", "(", "iterator", ")", ":", "for", "x", "in", "iterator", ":", "f", "(", "x", ")", "return", "iter", "(", "[", "]", ")", "self", ".", "mapPartitions", "(", "processPartition", ")", ".", "count", "(", ")" ]
Applies a function to all elements of this RDD. >>> def f(x): print(x) >>> sc.parallelize([1, 2, 3, 4, 5]).foreach(f)
[ "Applies", "a", "function", "to", "all", "elements", "of", "this", "RDD", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/rdd.py#L778-L791
19,008
apache/spark
python/pyspark/rdd.py
RDD.foreachPartition
def foreachPartition(self, f): """ Applies a function to each partition of this RDD. >>> def f(iterator): ... for x in iterator: ... print(x) >>> sc.parallelize([1, 2, 3, 4, 5]).foreachPartition(f) """ def func(it): r = f(it) try: return iter(r) except TypeError: return iter([]) self.mapPartitions(func).count()
python
def foreachPartition(self, f): """ Applies a function to each partition of this RDD. >>> def f(iterator): ... for x in iterator: ... print(x) >>> sc.parallelize([1, 2, 3, 4, 5]).foreachPartition(f) """ def func(it): r = f(it) try: return iter(r) except TypeError: return iter([]) self.mapPartitions(func).count()
[ "def", "foreachPartition", "(", "self", ",", "f", ")", ":", "def", "func", "(", "it", ")", ":", "r", "=", "f", "(", "it", ")", "try", ":", "return", "iter", "(", "r", ")", "except", "TypeError", ":", "return", "iter", "(", "[", "]", ")", "self", ".", "mapPartitions", "(", "func", ")", ".", "count", "(", ")" ]
Applies a function to each partition of this RDD. >>> def f(iterator): ... for x in iterator: ... print(x) >>> sc.parallelize([1, 2, 3, 4, 5]).foreachPartition(f)
[ "Applies", "a", "function", "to", "each", "partition", "of", "this", "RDD", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/rdd.py#L793-L808
19,009
apache/spark
python/pyspark/rdd.py
RDD.collect
def collect(self): """ Return a list that contains all of the elements in this RDD. .. note:: This method should only be used if the resulting array is expected to be small, as all the data is loaded into the driver's memory. """ with SCCallSiteSync(self.context) as css: sock_info = self.ctx._jvm.PythonRDD.collectAndServe(self._jrdd.rdd()) return list(_load_from_socket(sock_info, self._jrdd_deserializer))
python
def collect(self): """ Return a list that contains all of the elements in this RDD. .. note:: This method should only be used if the resulting array is expected to be small, as all the data is loaded into the driver's memory. """ with SCCallSiteSync(self.context) as css: sock_info = self.ctx._jvm.PythonRDD.collectAndServe(self._jrdd.rdd()) return list(_load_from_socket(sock_info, self._jrdd_deserializer))
[ "def", "collect", "(", "self", ")", ":", "with", "SCCallSiteSync", "(", "self", ".", "context", ")", "as", "css", ":", "sock_info", "=", "self", ".", "ctx", ".", "_jvm", ".", "PythonRDD", ".", "collectAndServe", "(", "self", ".", "_jrdd", ".", "rdd", "(", ")", ")", "return", "list", "(", "_load_from_socket", "(", "sock_info", ",", "self", ".", "_jrdd_deserializer", ")", ")" ]
Return a list that contains all of the elements in this RDD. .. note:: This method should only be used if the resulting array is expected to be small, as all the data is loaded into the driver's memory.
[ "Return", "a", "list", "that", "contains", "all", "of", "the", "elements", "in", "this", "RDD", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/rdd.py#L810-L819
19,010
apache/spark
python/pyspark/rdd.py
RDD.reduce
def reduce(self, f): """ Reduces the elements of this RDD using the specified commutative and associative binary operator. Currently reduces partitions locally. >>> from operator import add >>> sc.parallelize([1, 2, 3, 4, 5]).reduce(add) 15 >>> sc.parallelize((2 for _ in range(10))).map(lambda x: 1).cache().reduce(add) 10 >>> sc.parallelize([]).reduce(add) Traceback (most recent call last): ... ValueError: Can not reduce() empty RDD """ f = fail_on_stopiteration(f) def func(iterator): iterator = iter(iterator) try: initial = next(iterator) except StopIteration: return yield reduce(f, iterator, initial) vals = self.mapPartitions(func).collect() if vals: return reduce(f, vals) raise ValueError("Can not reduce() empty RDD")
python
def reduce(self, f): """ Reduces the elements of this RDD using the specified commutative and associative binary operator. Currently reduces partitions locally. >>> from operator import add >>> sc.parallelize([1, 2, 3, 4, 5]).reduce(add) 15 >>> sc.parallelize((2 for _ in range(10))).map(lambda x: 1).cache().reduce(add) 10 >>> sc.parallelize([]).reduce(add) Traceback (most recent call last): ... ValueError: Can not reduce() empty RDD """ f = fail_on_stopiteration(f) def func(iterator): iterator = iter(iterator) try: initial = next(iterator) except StopIteration: return yield reduce(f, iterator, initial) vals = self.mapPartitions(func).collect() if vals: return reduce(f, vals) raise ValueError("Can not reduce() empty RDD")
[ "def", "reduce", "(", "self", ",", "f", ")", ":", "f", "=", "fail_on_stopiteration", "(", "f", ")", "def", "func", "(", "iterator", ")", ":", "iterator", "=", "iter", "(", "iterator", ")", "try", ":", "initial", "=", "next", "(", "iterator", ")", "except", "StopIteration", ":", "return", "yield", "reduce", "(", "f", ",", "iterator", ",", "initial", ")", "vals", "=", "self", ".", "mapPartitions", "(", "func", ")", ".", "collect", "(", ")", "if", "vals", ":", "return", "reduce", "(", "f", ",", "vals", ")", "raise", "ValueError", "(", "\"Can not reduce() empty RDD\"", ")" ]
Reduces the elements of this RDD using the specified commutative and associative binary operator. Currently reduces partitions locally. >>> from operator import add >>> sc.parallelize([1, 2, 3, 4, 5]).reduce(add) 15 >>> sc.parallelize((2 for _ in range(10))).map(lambda x: 1).cache().reduce(add) 10 >>> sc.parallelize([]).reduce(add) Traceback (most recent call last): ... ValueError: Can not reduce() empty RDD
[ "Reduces", "the", "elements", "of", "this", "RDD", "using", "the", "specified", "commutative", "and", "associative", "binary", "operator", ".", "Currently", "reduces", "partitions", "locally", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/rdd.py#L821-L849
19,011
apache/spark
python/pyspark/rdd.py
RDD.treeReduce
def treeReduce(self, f, depth=2): """ Reduces the elements of this RDD in a multi-level tree pattern. :param depth: suggested depth of the tree (default: 2) >>> add = lambda x, y: x + y >>> rdd = sc.parallelize([-5, -4, -3, -2, -1, 1, 2, 3, 4], 10) >>> rdd.treeReduce(add) -5 >>> rdd.treeReduce(add, 1) -5 >>> rdd.treeReduce(add, 2) -5 >>> rdd.treeReduce(add, 5) -5 >>> rdd.treeReduce(add, 10) -5 """ if depth < 1: raise ValueError("Depth cannot be smaller than 1 but got %d." % depth) zeroValue = None, True # Use the second entry to indicate whether this is a dummy value. def op(x, y): if x[1]: return y elif y[1]: return x else: return f(x[0], y[0]), False reduced = self.map(lambda x: (x, False)).treeAggregate(zeroValue, op, op, depth) if reduced[1]: raise ValueError("Cannot reduce empty RDD.") return reduced[0]
python
def treeReduce(self, f, depth=2): """ Reduces the elements of this RDD in a multi-level tree pattern. :param depth: suggested depth of the tree (default: 2) >>> add = lambda x, y: x + y >>> rdd = sc.parallelize([-5, -4, -3, -2, -1, 1, 2, 3, 4], 10) >>> rdd.treeReduce(add) -5 >>> rdd.treeReduce(add, 1) -5 >>> rdd.treeReduce(add, 2) -5 >>> rdd.treeReduce(add, 5) -5 >>> rdd.treeReduce(add, 10) -5 """ if depth < 1: raise ValueError("Depth cannot be smaller than 1 but got %d." % depth) zeroValue = None, True # Use the second entry to indicate whether this is a dummy value. def op(x, y): if x[1]: return y elif y[1]: return x else: return f(x[0], y[0]), False reduced = self.map(lambda x: (x, False)).treeAggregate(zeroValue, op, op, depth) if reduced[1]: raise ValueError("Cannot reduce empty RDD.") return reduced[0]
[ "def", "treeReduce", "(", "self", ",", "f", ",", "depth", "=", "2", ")", ":", "if", "depth", "<", "1", ":", "raise", "ValueError", "(", "\"Depth cannot be smaller than 1 but got %d.\"", "%", "depth", ")", "zeroValue", "=", "None", ",", "True", "# Use the second entry to indicate whether this is a dummy value.", "def", "op", "(", "x", ",", "y", ")", ":", "if", "x", "[", "1", "]", ":", "return", "y", "elif", "y", "[", "1", "]", ":", "return", "x", "else", ":", "return", "f", "(", "x", "[", "0", "]", ",", "y", "[", "0", "]", ")", ",", "False", "reduced", "=", "self", ".", "map", "(", "lambda", "x", ":", "(", "x", ",", "False", ")", ")", ".", "treeAggregate", "(", "zeroValue", ",", "op", ",", "op", ",", "depth", ")", "if", "reduced", "[", "1", "]", ":", "raise", "ValueError", "(", "\"Cannot reduce empty RDD.\"", ")", "return", "reduced", "[", "0", "]" ]
Reduces the elements of this RDD in a multi-level tree pattern. :param depth: suggested depth of the tree (default: 2) >>> add = lambda x, y: x + y >>> rdd = sc.parallelize([-5, -4, -3, -2, -1, 1, 2, 3, 4], 10) >>> rdd.treeReduce(add) -5 >>> rdd.treeReduce(add, 1) -5 >>> rdd.treeReduce(add, 2) -5 >>> rdd.treeReduce(add, 5) -5 >>> rdd.treeReduce(add, 10) -5
[ "Reduces", "the", "elements", "of", "this", "RDD", "in", "a", "multi", "-", "level", "tree", "pattern", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/rdd.py#L851-L886
19,012
apache/spark
python/pyspark/rdd.py
RDD.fold
def fold(self, zeroValue, op): """ Aggregate the elements of each partition, and then the results for all the partitions, using a given associative function and a neutral "zero value." The function C{op(t1, t2)} is allowed to modify C{t1} and return it as its result value to avoid object allocation; however, it should not modify C{t2}. This behaves somewhat differently from fold operations implemented for non-distributed collections in functional languages like Scala. This fold operation may be applied to partitions individually, and then fold those results into the final result, rather than apply the fold to each element sequentially in some defined ordering. For functions that are not commutative, the result may differ from that of a fold applied to a non-distributed collection. >>> from operator import add >>> sc.parallelize([1, 2, 3, 4, 5]).fold(0, add) 15 """ op = fail_on_stopiteration(op) def func(iterator): acc = zeroValue for obj in iterator: acc = op(acc, obj) yield acc # collecting result of mapPartitions here ensures that the copy of # zeroValue provided to each partition is unique from the one provided # to the final reduce call vals = self.mapPartitions(func).collect() return reduce(op, vals, zeroValue)
python
def fold(self, zeroValue, op): """ Aggregate the elements of each partition, and then the results for all the partitions, using a given associative function and a neutral "zero value." The function C{op(t1, t2)} is allowed to modify C{t1} and return it as its result value to avoid object allocation; however, it should not modify C{t2}. This behaves somewhat differently from fold operations implemented for non-distributed collections in functional languages like Scala. This fold operation may be applied to partitions individually, and then fold those results into the final result, rather than apply the fold to each element sequentially in some defined ordering. For functions that are not commutative, the result may differ from that of a fold applied to a non-distributed collection. >>> from operator import add >>> sc.parallelize([1, 2, 3, 4, 5]).fold(0, add) 15 """ op = fail_on_stopiteration(op) def func(iterator): acc = zeroValue for obj in iterator: acc = op(acc, obj) yield acc # collecting result of mapPartitions here ensures that the copy of # zeroValue provided to each partition is unique from the one provided # to the final reduce call vals = self.mapPartitions(func).collect() return reduce(op, vals, zeroValue)
[ "def", "fold", "(", "self", ",", "zeroValue", ",", "op", ")", ":", "op", "=", "fail_on_stopiteration", "(", "op", ")", "def", "func", "(", "iterator", ")", ":", "acc", "=", "zeroValue", "for", "obj", "in", "iterator", ":", "acc", "=", "op", "(", "acc", ",", "obj", ")", "yield", "acc", "# collecting result of mapPartitions here ensures that the copy of", "# zeroValue provided to each partition is unique from the one provided", "# to the final reduce call", "vals", "=", "self", ".", "mapPartitions", "(", "func", ")", ".", "collect", "(", ")", "return", "reduce", "(", "op", ",", "vals", ",", "zeroValue", ")" ]
Aggregate the elements of each partition, and then the results for all the partitions, using a given associative function and a neutral "zero value." The function C{op(t1, t2)} is allowed to modify C{t1} and return it as its result value to avoid object allocation; however, it should not modify C{t2}. This behaves somewhat differently from fold operations implemented for non-distributed collections in functional languages like Scala. This fold operation may be applied to partitions individually, and then fold those results into the final result, rather than apply the fold to each element sequentially in some defined ordering. For functions that are not commutative, the result may differ from that of a fold applied to a non-distributed collection. >>> from operator import add >>> sc.parallelize([1, 2, 3, 4, 5]).fold(0, add) 15
[ "Aggregate", "the", "elements", "of", "each", "partition", "and", "then", "the", "results", "for", "all", "the", "partitions", "using", "a", "given", "associative", "function", "and", "a", "neutral", "zero", "value", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/rdd.py#L888-L920
19,013
apache/spark
python/pyspark/rdd.py
RDD.aggregate
def aggregate(self, zeroValue, seqOp, combOp): """ Aggregate the elements of each partition, and then the results for all the partitions, using a given combine functions and a neutral "zero value." The functions C{op(t1, t2)} is allowed to modify C{t1} and return it as its result value to avoid object allocation; however, it should not modify C{t2}. The first function (seqOp) can return a different result type, U, than the type of this RDD. Thus, we need one operation for merging a T into an U and one operation for merging two U >>> seqOp = (lambda x, y: (x[0] + y, x[1] + 1)) >>> combOp = (lambda x, y: (x[0] + y[0], x[1] + y[1])) >>> sc.parallelize([1, 2, 3, 4]).aggregate((0, 0), seqOp, combOp) (10, 4) >>> sc.parallelize([]).aggregate((0, 0), seqOp, combOp) (0, 0) """ seqOp = fail_on_stopiteration(seqOp) combOp = fail_on_stopiteration(combOp) def func(iterator): acc = zeroValue for obj in iterator: acc = seqOp(acc, obj) yield acc # collecting result of mapPartitions here ensures that the copy of # zeroValue provided to each partition is unique from the one provided # to the final reduce call vals = self.mapPartitions(func).collect() return reduce(combOp, vals, zeroValue)
python
def aggregate(self, zeroValue, seqOp, combOp): """ Aggregate the elements of each partition, and then the results for all the partitions, using a given combine functions and a neutral "zero value." The functions C{op(t1, t2)} is allowed to modify C{t1} and return it as its result value to avoid object allocation; however, it should not modify C{t2}. The first function (seqOp) can return a different result type, U, than the type of this RDD. Thus, we need one operation for merging a T into an U and one operation for merging two U >>> seqOp = (lambda x, y: (x[0] + y, x[1] + 1)) >>> combOp = (lambda x, y: (x[0] + y[0], x[1] + y[1])) >>> sc.parallelize([1, 2, 3, 4]).aggregate((0, 0), seqOp, combOp) (10, 4) >>> sc.parallelize([]).aggregate((0, 0), seqOp, combOp) (0, 0) """ seqOp = fail_on_stopiteration(seqOp) combOp = fail_on_stopiteration(combOp) def func(iterator): acc = zeroValue for obj in iterator: acc = seqOp(acc, obj) yield acc # collecting result of mapPartitions here ensures that the copy of # zeroValue provided to each partition is unique from the one provided # to the final reduce call vals = self.mapPartitions(func).collect() return reduce(combOp, vals, zeroValue)
[ "def", "aggregate", "(", "self", ",", "zeroValue", ",", "seqOp", ",", "combOp", ")", ":", "seqOp", "=", "fail_on_stopiteration", "(", "seqOp", ")", "combOp", "=", "fail_on_stopiteration", "(", "combOp", ")", "def", "func", "(", "iterator", ")", ":", "acc", "=", "zeroValue", "for", "obj", "in", "iterator", ":", "acc", "=", "seqOp", "(", "acc", ",", "obj", ")", "yield", "acc", "# collecting result of mapPartitions here ensures that the copy of", "# zeroValue provided to each partition is unique from the one provided", "# to the final reduce call", "vals", "=", "self", ".", "mapPartitions", "(", "func", ")", ".", "collect", "(", ")", "return", "reduce", "(", "combOp", ",", "vals", ",", "zeroValue", ")" ]
Aggregate the elements of each partition, and then the results for all the partitions, using a given combine functions and a neutral "zero value." The functions C{op(t1, t2)} is allowed to modify C{t1} and return it as its result value to avoid object allocation; however, it should not modify C{t2}. The first function (seqOp) can return a different result type, U, than the type of this RDD. Thus, we need one operation for merging a T into an U and one operation for merging two U >>> seqOp = (lambda x, y: (x[0] + y, x[1] + 1)) >>> combOp = (lambda x, y: (x[0] + y[0], x[1] + y[1])) >>> sc.parallelize([1, 2, 3, 4]).aggregate((0, 0), seqOp, combOp) (10, 4) >>> sc.parallelize([]).aggregate((0, 0), seqOp, combOp) (0, 0)
[ "Aggregate", "the", "elements", "of", "each", "partition", "and", "then", "the", "results", "for", "all", "the", "partitions", "using", "a", "given", "combine", "functions", "and", "a", "neutral", "zero", "value", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/rdd.py#L922-L955
19,014
apache/spark
python/pyspark/rdd.py
RDD.treeAggregate
def treeAggregate(self, zeroValue, seqOp, combOp, depth=2): """ Aggregates the elements of this RDD in a multi-level tree pattern. :param depth: suggested depth of the tree (default: 2) >>> add = lambda x, y: x + y >>> rdd = sc.parallelize([-5, -4, -3, -2, -1, 1, 2, 3, 4], 10) >>> rdd.treeAggregate(0, add, add) -5 >>> rdd.treeAggregate(0, add, add, 1) -5 >>> rdd.treeAggregate(0, add, add, 2) -5 >>> rdd.treeAggregate(0, add, add, 5) -5 >>> rdd.treeAggregate(0, add, add, 10) -5 """ if depth < 1: raise ValueError("Depth cannot be smaller than 1 but got %d." % depth) if self.getNumPartitions() == 0: return zeroValue def aggregatePartition(iterator): acc = zeroValue for obj in iterator: acc = seqOp(acc, obj) yield acc partiallyAggregated = self.mapPartitions(aggregatePartition) numPartitions = partiallyAggregated.getNumPartitions() scale = max(int(ceil(pow(numPartitions, 1.0 / depth))), 2) # If creating an extra level doesn't help reduce the wall-clock time, we stop the tree # aggregation. while numPartitions > scale + numPartitions / scale: numPartitions /= scale curNumPartitions = int(numPartitions) def mapPartition(i, iterator): for obj in iterator: yield (i % curNumPartitions, obj) partiallyAggregated = partiallyAggregated \ .mapPartitionsWithIndex(mapPartition) \ .reduceByKey(combOp, curNumPartitions) \ .values() return partiallyAggregated.reduce(combOp)
python
def treeAggregate(self, zeroValue, seqOp, combOp, depth=2): """ Aggregates the elements of this RDD in a multi-level tree pattern. :param depth: suggested depth of the tree (default: 2) >>> add = lambda x, y: x + y >>> rdd = sc.parallelize([-5, -4, -3, -2, -1, 1, 2, 3, 4], 10) >>> rdd.treeAggregate(0, add, add) -5 >>> rdd.treeAggregate(0, add, add, 1) -5 >>> rdd.treeAggregate(0, add, add, 2) -5 >>> rdd.treeAggregate(0, add, add, 5) -5 >>> rdd.treeAggregate(0, add, add, 10) -5 """ if depth < 1: raise ValueError("Depth cannot be smaller than 1 but got %d." % depth) if self.getNumPartitions() == 0: return zeroValue def aggregatePartition(iterator): acc = zeroValue for obj in iterator: acc = seqOp(acc, obj) yield acc partiallyAggregated = self.mapPartitions(aggregatePartition) numPartitions = partiallyAggregated.getNumPartitions() scale = max(int(ceil(pow(numPartitions, 1.0 / depth))), 2) # If creating an extra level doesn't help reduce the wall-clock time, we stop the tree # aggregation. while numPartitions > scale + numPartitions / scale: numPartitions /= scale curNumPartitions = int(numPartitions) def mapPartition(i, iterator): for obj in iterator: yield (i % curNumPartitions, obj) partiallyAggregated = partiallyAggregated \ .mapPartitionsWithIndex(mapPartition) \ .reduceByKey(combOp, curNumPartitions) \ .values() return partiallyAggregated.reduce(combOp)
[ "def", "treeAggregate", "(", "self", ",", "zeroValue", ",", "seqOp", ",", "combOp", ",", "depth", "=", "2", ")", ":", "if", "depth", "<", "1", ":", "raise", "ValueError", "(", "\"Depth cannot be smaller than 1 but got %d.\"", "%", "depth", ")", "if", "self", ".", "getNumPartitions", "(", ")", "==", "0", ":", "return", "zeroValue", "def", "aggregatePartition", "(", "iterator", ")", ":", "acc", "=", "zeroValue", "for", "obj", "in", "iterator", ":", "acc", "=", "seqOp", "(", "acc", ",", "obj", ")", "yield", "acc", "partiallyAggregated", "=", "self", ".", "mapPartitions", "(", "aggregatePartition", ")", "numPartitions", "=", "partiallyAggregated", ".", "getNumPartitions", "(", ")", "scale", "=", "max", "(", "int", "(", "ceil", "(", "pow", "(", "numPartitions", ",", "1.0", "/", "depth", ")", ")", ")", ",", "2", ")", "# If creating an extra level doesn't help reduce the wall-clock time, we stop the tree", "# aggregation.", "while", "numPartitions", ">", "scale", "+", "numPartitions", "/", "scale", ":", "numPartitions", "/=", "scale", "curNumPartitions", "=", "int", "(", "numPartitions", ")", "def", "mapPartition", "(", "i", ",", "iterator", ")", ":", "for", "obj", "in", "iterator", ":", "yield", "(", "i", "%", "curNumPartitions", ",", "obj", ")", "partiallyAggregated", "=", "partiallyAggregated", ".", "mapPartitionsWithIndex", "(", "mapPartition", ")", ".", "reduceByKey", "(", "combOp", ",", "curNumPartitions", ")", ".", "values", "(", ")", "return", "partiallyAggregated", ".", "reduce", "(", "combOp", ")" ]
Aggregates the elements of this RDD in a multi-level tree pattern. :param depth: suggested depth of the tree (default: 2) >>> add = lambda x, y: x + y >>> rdd = sc.parallelize([-5, -4, -3, -2, -1, 1, 2, 3, 4], 10) >>> rdd.treeAggregate(0, add, add) -5 >>> rdd.treeAggregate(0, add, add, 1) -5 >>> rdd.treeAggregate(0, add, add, 2) -5 >>> rdd.treeAggregate(0, add, add, 5) -5 >>> rdd.treeAggregate(0, add, add, 10) -5
[ "Aggregates", "the", "elements", "of", "this", "RDD", "in", "a", "multi", "-", "level", "tree", "pattern", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/rdd.py#L957-L1007
19,015
apache/spark
python/pyspark/rdd.py
RDD.max
def max(self, key=None): """ Find the maximum item in this RDD. :param key: A function used to generate key for comparing >>> rdd = sc.parallelize([1.0, 5.0, 43.0, 10.0]) >>> rdd.max() 43.0 >>> rdd.max(key=str) 5.0 """ if key is None: return self.reduce(max) return self.reduce(lambda a, b: max(a, b, key=key))
python
def max(self, key=None): """ Find the maximum item in this RDD. :param key: A function used to generate key for comparing >>> rdd = sc.parallelize([1.0, 5.0, 43.0, 10.0]) >>> rdd.max() 43.0 >>> rdd.max(key=str) 5.0 """ if key is None: return self.reduce(max) return self.reduce(lambda a, b: max(a, b, key=key))
[ "def", "max", "(", "self", ",", "key", "=", "None", ")", ":", "if", "key", "is", "None", ":", "return", "self", ".", "reduce", "(", "max", ")", "return", "self", ".", "reduce", "(", "lambda", "a", ",", "b", ":", "max", "(", "a", ",", "b", ",", "key", "=", "key", ")", ")" ]
Find the maximum item in this RDD. :param key: A function used to generate key for comparing >>> rdd = sc.parallelize([1.0, 5.0, 43.0, 10.0]) >>> rdd.max() 43.0 >>> rdd.max(key=str) 5.0
[ "Find", "the", "maximum", "item", "in", "this", "RDD", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/rdd.py#L1009-L1023
19,016
apache/spark
python/pyspark/rdd.py
RDD.min
def min(self, key=None): """ Find the minimum item in this RDD. :param key: A function used to generate key for comparing >>> rdd = sc.parallelize([2.0, 5.0, 43.0, 10.0]) >>> rdd.min() 2.0 >>> rdd.min(key=str) 10.0 """ if key is None: return self.reduce(min) return self.reduce(lambda a, b: min(a, b, key=key))
python
def min(self, key=None): """ Find the minimum item in this RDD. :param key: A function used to generate key for comparing >>> rdd = sc.parallelize([2.0, 5.0, 43.0, 10.0]) >>> rdd.min() 2.0 >>> rdd.min(key=str) 10.0 """ if key is None: return self.reduce(min) return self.reduce(lambda a, b: min(a, b, key=key))
[ "def", "min", "(", "self", ",", "key", "=", "None", ")", ":", "if", "key", "is", "None", ":", "return", "self", ".", "reduce", "(", "min", ")", "return", "self", ".", "reduce", "(", "lambda", "a", ",", "b", ":", "min", "(", "a", ",", "b", ",", "key", "=", "key", ")", ")" ]
Find the minimum item in this RDD. :param key: A function used to generate key for comparing >>> rdd = sc.parallelize([2.0, 5.0, 43.0, 10.0]) >>> rdd.min() 2.0 >>> rdd.min(key=str) 10.0
[ "Find", "the", "minimum", "item", "in", "this", "RDD", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/rdd.py#L1025-L1039
19,017
apache/spark
python/pyspark/rdd.py
RDD.sum
def sum(self): """ Add up the elements in this RDD. >>> sc.parallelize([1.0, 2.0, 3.0]).sum() 6.0 """ return self.mapPartitions(lambda x: [sum(x)]).fold(0, operator.add)
python
def sum(self): """ Add up the elements in this RDD. >>> sc.parallelize([1.0, 2.0, 3.0]).sum() 6.0 """ return self.mapPartitions(lambda x: [sum(x)]).fold(0, operator.add)
[ "def", "sum", "(", "self", ")", ":", "return", "self", ".", "mapPartitions", "(", "lambda", "x", ":", "[", "sum", "(", "x", ")", "]", ")", ".", "fold", "(", "0", ",", "operator", ".", "add", ")" ]
Add up the elements in this RDD. >>> sc.parallelize([1.0, 2.0, 3.0]).sum() 6.0
[ "Add", "up", "the", "elements", "in", "this", "RDD", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/rdd.py#L1041-L1048
19,018
apache/spark
python/pyspark/rdd.py
RDD.top
def top(self, num, key=None): """ Get the top N elements from an RDD. .. note:: This method should only be used if the resulting array is expected to be small, as all the data is loaded into the driver's memory. .. note:: It returns the list sorted in descending order. >>> sc.parallelize([10, 4, 2, 12, 3]).top(1) [12] >>> sc.parallelize([2, 3, 4, 5, 6], 2).top(2) [6, 5] >>> sc.parallelize([10, 4, 2, 12, 3]).top(3, key=str) [4, 3, 2] """ def topIterator(iterator): yield heapq.nlargest(num, iterator, key=key) def merge(a, b): return heapq.nlargest(num, a + b, key=key) return self.mapPartitions(topIterator).reduce(merge)
python
def top(self, num, key=None): """ Get the top N elements from an RDD. .. note:: This method should only be used if the resulting array is expected to be small, as all the data is loaded into the driver's memory. .. note:: It returns the list sorted in descending order. >>> sc.parallelize([10, 4, 2, 12, 3]).top(1) [12] >>> sc.parallelize([2, 3, 4, 5, 6], 2).top(2) [6, 5] >>> sc.parallelize([10, 4, 2, 12, 3]).top(3, key=str) [4, 3, 2] """ def topIterator(iterator): yield heapq.nlargest(num, iterator, key=key) def merge(a, b): return heapq.nlargest(num, a + b, key=key) return self.mapPartitions(topIterator).reduce(merge)
[ "def", "top", "(", "self", ",", "num", ",", "key", "=", "None", ")", ":", "def", "topIterator", "(", "iterator", ")", ":", "yield", "heapq", ".", "nlargest", "(", "num", ",", "iterator", ",", "key", "=", "key", ")", "def", "merge", "(", "a", ",", "b", ")", ":", "return", "heapq", ".", "nlargest", "(", "num", ",", "a", "+", "b", ",", "key", "=", "key", ")", "return", "self", ".", "mapPartitions", "(", "topIterator", ")", ".", "reduce", "(", "merge", ")" ]
Get the top N elements from an RDD. .. note:: This method should only be used if the resulting array is expected to be small, as all the data is loaded into the driver's memory. .. note:: It returns the list sorted in descending order. >>> sc.parallelize([10, 4, 2, 12, 3]).top(1) [12] >>> sc.parallelize([2, 3, 4, 5, 6], 2).top(2) [6, 5] >>> sc.parallelize([10, 4, 2, 12, 3]).top(3, key=str) [4, 3, 2]
[ "Get", "the", "top", "N", "elements", "from", "an", "RDD", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/rdd.py#L1265-L1287
19,019
apache/spark
python/pyspark/rdd.py
RDD.takeOrdered
def takeOrdered(self, num, key=None): """ Get the N elements from an RDD ordered in ascending order or as specified by the optional key function. .. note:: this method should only be used if the resulting array is expected to be small, as all the data is loaded into the driver's memory. >>> sc.parallelize([10, 1, 2, 9, 3, 4, 5, 6, 7]).takeOrdered(6) [1, 2, 3, 4, 5, 6] >>> sc.parallelize([10, 1, 2, 9, 3, 4, 5, 6, 7], 2).takeOrdered(6, key=lambda x: -x) [10, 9, 7, 6, 5, 4] """ def merge(a, b): return heapq.nsmallest(num, a + b, key) return self.mapPartitions(lambda it: [heapq.nsmallest(num, it, key)]).reduce(merge)
python
def takeOrdered(self, num, key=None): """ Get the N elements from an RDD ordered in ascending order or as specified by the optional key function. .. note:: this method should only be used if the resulting array is expected to be small, as all the data is loaded into the driver's memory. >>> sc.parallelize([10, 1, 2, 9, 3, 4, 5, 6, 7]).takeOrdered(6) [1, 2, 3, 4, 5, 6] >>> sc.parallelize([10, 1, 2, 9, 3, 4, 5, 6, 7], 2).takeOrdered(6, key=lambda x: -x) [10, 9, 7, 6, 5, 4] """ def merge(a, b): return heapq.nsmallest(num, a + b, key) return self.mapPartitions(lambda it: [heapq.nsmallest(num, it, key)]).reduce(merge)
[ "def", "takeOrdered", "(", "self", ",", "num", ",", "key", "=", "None", ")", ":", "def", "merge", "(", "a", ",", "b", ")", ":", "return", "heapq", ".", "nsmallest", "(", "num", ",", "a", "+", "b", ",", "key", ")", "return", "self", ".", "mapPartitions", "(", "lambda", "it", ":", "[", "heapq", ".", "nsmallest", "(", "num", ",", "it", ",", "key", ")", "]", ")", ".", "reduce", "(", "merge", ")" ]
Get the N elements from an RDD ordered in ascending order or as specified by the optional key function. .. note:: this method should only be used if the resulting array is expected to be small, as all the data is loaded into the driver's memory. >>> sc.parallelize([10, 1, 2, 9, 3, 4, 5, 6, 7]).takeOrdered(6) [1, 2, 3, 4, 5, 6] >>> sc.parallelize([10, 1, 2, 9, 3, 4, 5, 6, 7], 2).takeOrdered(6, key=lambda x: -x) [10, 9, 7, 6, 5, 4]
[ "Get", "the", "N", "elements", "from", "an", "RDD", "ordered", "in", "ascending", "order", "or", "as", "specified", "by", "the", "optional", "key", "function", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/rdd.py#L1289-L1306
19,020
apache/spark
python/pyspark/rdd.py
RDD.take
def take(self, num): """ Take the first num elements of the RDD. It works by first scanning one partition, and use the results from that partition to estimate the number of additional partitions needed to satisfy the limit. Translated from the Scala implementation in RDD#take(). .. note:: this method should only be used if the resulting array is expected to be small, as all the data is loaded into the driver's memory. >>> sc.parallelize([2, 3, 4, 5, 6]).cache().take(2) [2, 3] >>> sc.parallelize([2, 3, 4, 5, 6]).take(10) [2, 3, 4, 5, 6] >>> sc.parallelize(range(100), 100).filter(lambda x: x > 90).take(3) [91, 92, 93] """ items = [] totalParts = self.getNumPartitions() partsScanned = 0 while len(items) < num and partsScanned < totalParts: # The number of partitions to try in this iteration. # It is ok for this number to be greater than totalParts because # we actually cap it at totalParts in runJob. numPartsToTry = 1 if partsScanned > 0: # If we didn't find any rows after the previous iteration, # quadruple and retry. Otherwise, interpolate the number of # partitions we need to try, but overestimate it by 50%. # We also cap the estimation in the end. if len(items) == 0: numPartsToTry = partsScanned * 4 else: # the first parameter of max is >=1 whenever partsScanned >= 2 numPartsToTry = int(1.5 * num * partsScanned / len(items)) - partsScanned numPartsToTry = min(max(numPartsToTry, 1), partsScanned * 4) left = num - len(items) def takeUpToNumLeft(iterator): iterator = iter(iterator) taken = 0 while taken < left: try: yield next(iterator) except StopIteration: return taken += 1 p = range(partsScanned, min(partsScanned + numPartsToTry, totalParts)) res = self.context.runJob(self, takeUpToNumLeft, p) items += res partsScanned += numPartsToTry return items[:num]
python
def take(self, num): """ Take the first num elements of the RDD. It works by first scanning one partition, and use the results from that partition to estimate the number of additional partitions needed to satisfy the limit. Translated from the Scala implementation in RDD#take(). .. note:: this method should only be used if the resulting array is expected to be small, as all the data is loaded into the driver's memory. >>> sc.parallelize([2, 3, 4, 5, 6]).cache().take(2) [2, 3] >>> sc.parallelize([2, 3, 4, 5, 6]).take(10) [2, 3, 4, 5, 6] >>> sc.parallelize(range(100), 100).filter(lambda x: x > 90).take(3) [91, 92, 93] """ items = [] totalParts = self.getNumPartitions() partsScanned = 0 while len(items) < num and partsScanned < totalParts: # The number of partitions to try in this iteration. # It is ok for this number to be greater than totalParts because # we actually cap it at totalParts in runJob. numPartsToTry = 1 if partsScanned > 0: # If we didn't find any rows after the previous iteration, # quadruple and retry. Otherwise, interpolate the number of # partitions we need to try, but overestimate it by 50%. # We also cap the estimation in the end. if len(items) == 0: numPartsToTry = partsScanned * 4 else: # the first parameter of max is >=1 whenever partsScanned >= 2 numPartsToTry = int(1.5 * num * partsScanned / len(items)) - partsScanned numPartsToTry = min(max(numPartsToTry, 1), partsScanned * 4) left = num - len(items) def takeUpToNumLeft(iterator): iterator = iter(iterator) taken = 0 while taken < left: try: yield next(iterator) except StopIteration: return taken += 1 p = range(partsScanned, min(partsScanned + numPartsToTry, totalParts)) res = self.context.runJob(self, takeUpToNumLeft, p) items += res partsScanned += numPartsToTry return items[:num]
[ "def", "take", "(", "self", ",", "num", ")", ":", "items", "=", "[", "]", "totalParts", "=", "self", ".", "getNumPartitions", "(", ")", "partsScanned", "=", "0", "while", "len", "(", "items", ")", "<", "num", "and", "partsScanned", "<", "totalParts", ":", "# The number of partitions to try in this iteration.", "# It is ok for this number to be greater than totalParts because", "# we actually cap it at totalParts in runJob.", "numPartsToTry", "=", "1", "if", "partsScanned", ">", "0", ":", "# If we didn't find any rows after the previous iteration,", "# quadruple and retry. Otherwise, interpolate the number of", "# partitions we need to try, but overestimate it by 50%.", "# We also cap the estimation in the end.", "if", "len", "(", "items", ")", "==", "0", ":", "numPartsToTry", "=", "partsScanned", "*", "4", "else", ":", "# the first parameter of max is >=1 whenever partsScanned >= 2", "numPartsToTry", "=", "int", "(", "1.5", "*", "num", "*", "partsScanned", "/", "len", "(", "items", ")", ")", "-", "partsScanned", "numPartsToTry", "=", "min", "(", "max", "(", "numPartsToTry", ",", "1", ")", ",", "partsScanned", "*", "4", ")", "left", "=", "num", "-", "len", "(", "items", ")", "def", "takeUpToNumLeft", "(", "iterator", ")", ":", "iterator", "=", "iter", "(", "iterator", ")", "taken", "=", "0", "while", "taken", "<", "left", ":", "try", ":", "yield", "next", "(", "iterator", ")", "except", "StopIteration", ":", "return", "taken", "+=", "1", "p", "=", "range", "(", "partsScanned", ",", "min", "(", "partsScanned", "+", "numPartsToTry", ",", "totalParts", ")", ")", "res", "=", "self", ".", "context", ".", "runJob", "(", "self", ",", "takeUpToNumLeft", ",", "p", ")", "items", "+=", "res", "partsScanned", "+=", "numPartsToTry", "return", "items", "[", ":", "num", "]" ]
Take the first num elements of the RDD. It works by first scanning one partition, and use the results from that partition to estimate the number of additional partitions needed to satisfy the limit. Translated from the Scala implementation in RDD#take(). .. note:: this method should only be used if the resulting array is expected to be small, as all the data is loaded into the driver's memory. >>> sc.parallelize([2, 3, 4, 5, 6]).cache().take(2) [2, 3] >>> sc.parallelize([2, 3, 4, 5, 6]).take(10) [2, 3, 4, 5, 6] >>> sc.parallelize(range(100), 100).filter(lambda x: x > 90).take(3) [91, 92, 93]
[ "Take", "the", "first", "num", "elements", "of", "the", "RDD", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/rdd.py#L1308-L1367
19,021
apache/spark
python/pyspark/rdd.py
RDD.saveAsTextFile
def saveAsTextFile(self, path, compressionCodecClass=None): """ Save this RDD as a text file, using string representations of elements. @param path: path to text file @param compressionCodecClass: (None by default) string i.e. "org.apache.hadoop.io.compress.GzipCodec" >>> tempFile = NamedTemporaryFile(delete=True) >>> tempFile.close() >>> sc.parallelize(range(10)).saveAsTextFile(tempFile.name) >>> from fileinput import input >>> from glob import glob >>> ''.join(sorted(input(glob(tempFile.name + "/part-0000*")))) '0\\n1\\n2\\n3\\n4\\n5\\n6\\n7\\n8\\n9\\n' Empty lines are tolerated when saving to text files. >>> tempFile2 = NamedTemporaryFile(delete=True) >>> tempFile2.close() >>> sc.parallelize(['', 'foo', '', 'bar', '']).saveAsTextFile(tempFile2.name) >>> ''.join(sorted(input(glob(tempFile2.name + "/part-0000*")))) '\\n\\n\\nbar\\nfoo\\n' Using compressionCodecClass >>> tempFile3 = NamedTemporaryFile(delete=True) >>> tempFile3.close() >>> codec = "org.apache.hadoop.io.compress.GzipCodec" >>> sc.parallelize(['foo', 'bar']).saveAsTextFile(tempFile3.name, codec) >>> from fileinput import input, hook_compressed >>> result = sorted(input(glob(tempFile3.name + "/part*.gz"), openhook=hook_compressed)) >>> b''.join(result).decode('utf-8') u'bar\\nfoo\\n' """ def func(split, iterator): for x in iterator: if not isinstance(x, (unicode, bytes)): x = unicode(x) if isinstance(x, unicode): x = x.encode("utf-8") yield x keyed = self.mapPartitionsWithIndex(func) keyed._bypass_serializer = True if compressionCodecClass: compressionCodec = self.ctx._jvm.java.lang.Class.forName(compressionCodecClass) keyed._jrdd.map(self.ctx._jvm.BytesToString()).saveAsTextFile(path, compressionCodec) else: keyed._jrdd.map(self.ctx._jvm.BytesToString()).saveAsTextFile(path)
python
def saveAsTextFile(self, path, compressionCodecClass=None): """ Save this RDD as a text file, using string representations of elements. @param path: path to text file @param compressionCodecClass: (None by default) string i.e. "org.apache.hadoop.io.compress.GzipCodec" >>> tempFile = NamedTemporaryFile(delete=True) >>> tempFile.close() >>> sc.parallelize(range(10)).saveAsTextFile(tempFile.name) >>> from fileinput import input >>> from glob import glob >>> ''.join(sorted(input(glob(tempFile.name + "/part-0000*")))) '0\\n1\\n2\\n3\\n4\\n5\\n6\\n7\\n8\\n9\\n' Empty lines are tolerated when saving to text files. >>> tempFile2 = NamedTemporaryFile(delete=True) >>> tempFile2.close() >>> sc.parallelize(['', 'foo', '', 'bar', '']).saveAsTextFile(tempFile2.name) >>> ''.join(sorted(input(glob(tempFile2.name + "/part-0000*")))) '\\n\\n\\nbar\\nfoo\\n' Using compressionCodecClass >>> tempFile3 = NamedTemporaryFile(delete=True) >>> tempFile3.close() >>> codec = "org.apache.hadoop.io.compress.GzipCodec" >>> sc.parallelize(['foo', 'bar']).saveAsTextFile(tempFile3.name, codec) >>> from fileinput import input, hook_compressed >>> result = sorted(input(glob(tempFile3.name + "/part*.gz"), openhook=hook_compressed)) >>> b''.join(result).decode('utf-8') u'bar\\nfoo\\n' """ def func(split, iterator): for x in iterator: if not isinstance(x, (unicode, bytes)): x = unicode(x) if isinstance(x, unicode): x = x.encode("utf-8") yield x keyed = self.mapPartitionsWithIndex(func) keyed._bypass_serializer = True if compressionCodecClass: compressionCodec = self.ctx._jvm.java.lang.Class.forName(compressionCodecClass) keyed._jrdd.map(self.ctx._jvm.BytesToString()).saveAsTextFile(path, compressionCodec) else: keyed._jrdd.map(self.ctx._jvm.BytesToString()).saveAsTextFile(path)
[ "def", "saveAsTextFile", "(", "self", ",", "path", ",", "compressionCodecClass", "=", "None", ")", ":", "def", "func", "(", "split", ",", "iterator", ")", ":", "for", "x", "in", "iterator", ":", "if", "not", "isinstance", "(", "x", ",", "(", "unicode", ",", "bytes", ")", ")", ":", "x", "=", "unicode", "(", "x", ")", "if", "isinstance", "(", "x", ",", "unicode", ")", ":", "x", "=", "x", ".", "encode", "(", "\"utf-8\"", ")", "yield", "x", "keyed", "=", "self", ".", "mapPartitionsWithIndex", "(", "func", ")", "keyed", ".", "_bypass_serializer", "=", "True", "if", "compressionCodecClass", ":", "compressionCodec", "=", "self", ".", "ctx", ".", "_jvm", ".", "java", ".", "lang", ".", "Class", ".", "forName", "(", "compressionCodecClass", ")", "keyed", ".", "_jrdd", ".", "map", "(", "self", ".", "ctx", ".", "_jvm", ".", "BytesToString", "(", ")", ")", ".", "saveAsTextFile", "(", "path", ",", "compressionCodec", ")", "else", ":", "keyed", ".", "_jrdd", ".", "map", "(", "self", ".", "ctx", ".", "_jvm", ".", "BytesToString", "(", ")", ")", ".", "saveAsTextFile", "(", "path", ")" ]
Save this RDD as a text file, using string representations of elements. @param path: path to text file @param compressionCodecClass: (None by default) string i.e. "org.apache.hadoop.io.compress.GzipCodec" >>> tempFile = NamedTemporaryFile(delete=True) >>> tempFile.close() >>> sc.parallelize(range(10)).saveAsTextFile(tempFile.name) >>> from fileinput import input >>> from glob import glob >>> ''.join(sorted(input(glob(tempFile.name + "/part-0000*")))) '0\\n1\\n2\\n3\\n4\\n5\\n6\\n7\\n8\\n9\\n' Empty lines are tolerated when saving to text files. >>> tempFile2 = NamedTemporaryFile(delete=True) >>> tempFile2.close() >>> sc.parallelize(['', 'foo', '', 'bar', '']).saveAsTextFile(tempFile2.name) >>> ''.join(sorted(input(glob(tempFile2.name + "/part-0000*")))) '\\n\\n\\nbar\\nfoo\\n' Using compressionCodecClass >>> tempFile3 = NamedTemporaryFile(delete=True) >>> tempFile3.close() >>> codec = "org.apache.hadoop.io.compress.GzipCodec" >>> sc.parallelize(['foo', 'bar']).saveAsTextFile(tempFile3.name, codec) >>> from fileinput import input, hook_compressed >>> result = sorted(input(glob(tempFile3.name + "/part*.gz"), openhook=hook_compressed)) >>> b''.join(result).decode('utf-8') u'bar\\nfoo\\n'
[ "Save", "this", "RDD", "as", "a", "text", "file", "using", "string", "representations", "of", "elements", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/rdd.py#L1524-L1572
19,022
apache/spark
python/pyspark/rdd.py
RDD.reduceByKey
def reduceByKey(self, func, numPartitions=None, partitionFunc=portable_hash): """ Merge the values for each key using an associative and commutative reduce function. This will also perform the merging locally on each mapper before sending results to a reducer, similarly to a "combiner" in MapReduce. Output will be partitioned with C{numPartitions} partitions, or the default parallelism level if C{numPartitions} is not specified. Default partitioner is hash-partition. >>> from operator import add >>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)]) >>> sorted(rdd.reduceByKey(add).collect()) [('a', 2), ('b', 1)] """ return self.combineByKey(lambda x: x, func, func, numPartitions, partitionFunc)
python
def reduceByKey(self, func, numPartitions=None, partitionFunc=portable_hash): """ Merge the values for each key using an associative and commutative reduce function. This will also perform the merging locally on each mapper before sending results to a reducer, similarly to a "combiner" in MapReduce. Output will be partitioned with C{numPartitions} partitions, or the default parallelism level if C{numPartitions} is not specified. Default partitioner is hash-partition. >>> from operator import add >>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)]) >>> sorted(rdd.reduceByKey(add).collect()) [('a', 2), ('b', 1)] """ return self.combineByKey(lambda x: x, func, func, numPartitions, partitionFunc)
[ "def", "reduceByKey", "(", "self", ",", "func", ",", "numPartitions", "=", "None", ",", "partitionFunc", "=", "portable_hash", ")", ":", "return", "self", ".", "combineByKey", "(", "lambda", "x", ":", "x", ",", "func", ",", "func", ",", "numPartitions", ",", "partitionFunc", ")" ]
Merge the values for each key using an associative and commutative reduce function. This will also perform the merging locally on each mapper before sending results to a reducer, similarly to a "combiner" in MapReduce. Output will be partitioned with C{numPartitions} partitions, or the default parallelism level if C{numPartitions} is not specified. Default partitioner is hash-partition. >>> from operator import add >>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)]) >>> sorted(rdd.reduceByKey(add).collect()) [('a', 2), ('b', 1)]
[ "Merge", "the", "values", "for", "each", "key", "using", "an", "associative", "and", "commutative", "reduce", "function", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/rdd.py#L1611-L1627
19,023
apache/spark
python/pyspark/rdd.py
RDD.reduceByKeyLocally
def reduceByKeyLocally(self, func): """ Merge the values for each key using an associative and commutative reduce function, but return the results immediately to the master as a dictionary. This will also perform the merging locally on each mapper before sending results to a reducer, similarly to a "combiner" in MapReduce. >>> from operator import add >>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)]) >>> sorted(rdd.reduceByKeyLocally(add).items()) [('a', 2), ('b', 1)] """ func = fail_on_stopiteration(func) def reducePartition(iterator): m = {} for k, v in iterator: m[k] = func(m[k], v) if k in m else v yield m def mergeMaps(m1, m2): for k, v in m2.items(): m1[k] = func(m1[k], v) if k in m1 else v return m1 return self.mapPartitions(reducePartition).reduce(mergeMaps)
python
def reduceByKeyLocally(self, func): """ Merge the values for each key using an associative and commutative reduce function, but return the results immediately to the master as a dictionary. This will also perform the merging locally on each mapper before sending results to a reducer, similarly to a "combiner" in MapReduce. >>> from operator import add >>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)]) >>> sorted(rdd.reduceByKeyLocally(add).items()) [('a', 2), ('b', 1)] """ func = fail_on_stopiteration(func) def reducePartition(iterator): m = {} for k, v in iterator: m[k] = func(m[k], v) if k in m else v yield m def mergeMaps(m1, m2): for k, v in m2.items(): m1[k] = func(m1[k], v) if k in m1 else v return m1 return self.mapPartitions(reducePartition).reduce(mergeMaps)
[ "def", "reduceByKeyLocally", "(", "self", ",", "func", ")", ":", "func", "=", "fail_on_stopiteration", "(", "func", ")", "def", "reducePartition", "(", "iterator", ")", ":", "m", "=", "{", "}", "for", "k", ",", "v", "in", "iterator", ":", "m", "[", "k", "]", "=", "func", "(", "m", "[", "k", "]", ",", "v", ")", "if", "k", "in", "m", "else", "v", "yield", "m", "def", "mergeMaps", "(", "m1", ",", "m2", ")", ":", "for", "k", ",", "v", "in", "m2", ".", "items", "(", ")", ":", "m1", "[", "k", "]", "=", "func", "(", "m1", "[", "k", "]", ",", "v", ")", "if", "k", "in", "m1", "else", "v", "return", "m1", "return", "self", ".", "mapPartitions", "(", "reducePartition", ")", ".", "reduce", "(", "mergeMaps", ")" ]
Merge the values for each key using an associative and commutative reduce function, but return the results immediately to the master as a dictionary. This will also perform the merging locally on each mapper before sending results to a reducer, similarly to a "combiner" in MapReduce. >>> from operator import add >>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)]) >>> sorted(rdd.reduceByKeyLocally(add).items()) [('a', 2), ('b', 1)]
[ "Merge", "the", "values", "for", "each", "key", "using", "an", "associative", "and", "commutative", "reduce", "function", "but", "return", "the", "results", "immediately", "to", "the", "master", "as", "a", "dictionary", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/rdd.py#L1629-L1654
19,024
apache/spark
python/pyspark/rdd.py
RDD.partitionBy
def partitionBy(self, numPartitions, partitionFunc=portable_hash): """ Return a copy of the RDD partitioned using the specified partitioner. >>> pairs = sc.parallelize([1, 2, 3, 4, 2, 4, 1]).map(lambda x: (x, x)) >>> sets = pairs.partitionBy(2).glom().collect() >>> len(set(sets[0]).intersection(set(sets[1]))) 0 """ if numPartitions is None: numPartitions = self._defaultReducePartitions() partitioner = Partitioner(numPartitions, partitionFunc) if self.partitioner == partitioner: return self # Transferring O(n) objects to Java is too expensive. # Instead, we'll form the hash buckets in Python, # transferring O(numPartitions) objects to Java. # Each object is a (splitNumber, [objects]) pair. # In order to avoid too huge objects, the objects are # grouped into chunks. outputSerializer = self.ctx._unbatched_serializer limit = (_parse_memory(self.ctx._conf.get( "spark.python.worker.memory", "512m")) / 2) def add_shuffle_key(split, iterator): buckets = defaultdict(list) c, batch = 0, min(10 * numPartitions, 1000) for k, v in iterator: buckets[partitionFunc(k) % numPartitions].append((k, v)) c += 1 # check used memory and avg size of chunk of objects if (c % 1000 == 0 and get_used_memory() > limit or c > batch): n, size = len(buckets), 0 for split in list(buckets.keys()): yield pack_long(split) d = outputSerializer.dumps(buckets[split]) del buckets[split] yield d size += len(d) avg = int(size / n) >> 20 # let 1M < avg < 10M if avg < 1: batch *= 1.5 elif avg > 10: batch = max(int(batch / 1.5), 1) c = 0 for split, items in buckets.items(): yield pack_long(split) yield outputSerializer.dumps(items) keyed = self.mapPartitionsWithIndex(add_shuffle_key, preservesPartitioning=True) keyed._bypass_serializer = True with SCCallSiteSync(self.context) as css: pairRDD = self.ctx._jvm.PairwiseRDD( keyed._jrdd.rdd()).asJavaPairRDD() jpartitioner = self.ctx._jvm.PythonPartitioner(numPartitions, id(partitionFunc)) jrdd = self.ctx._jvm.PythonRDD.valueOfPair(pairRDD.partitionBy(jpartitioner)) rdd = RDD(jrdd, self.ctx, BatchedSerializer(outputSerializer)) rdd.partitioner = partitioner return rdd
python
def partitionBy(self, numPartitions, partitionFunc=portable_hash): """ Return a copy of the RDD partitioned using the specified partitioner. >>> pairs = sc.parallelize([1, 2, 3, 4, 2, 4, 1]).map(lambda x: (x, x)) >>> sets = pairs.partitionBy(2).glom().collect() >>> len(set(sets[0]).intersection(set(sets[1]))) 0 """ if numPartitions is None: numPartitions = self._defaultReducePartitions() partitioner = Partitioner(numPartitions, partitionFunc) if self.partitioner == partitioner: return self # Transferring O(n) objects to Java is too expensive. # Instead, we'll form the hash buckets in Python, # transferring O(numPartitions) objects to Java. # Each object is a (splitNumber, [objects]) pair. # In order to avoid too huge objects, the objects are # grouped into chunks. outputSerializer = self.ctx._unbatched_serializer limit = (_parse_memory(self.ctx._conf.get( "spark.python.worker.memory", "512m")) / 2) def add_shuffle_key(split, iterator): buckets = defaultdict(list) c, batch = 0, min(10 * numPartitions, 1000) for k, v in iterator: buckets[partitionFunc(k) % numPartitions].append((k, v)) c += 1 # check used memory and avg size of chunk of objects if (c % 1000 == 0 and get_used_memory() > limit or c > batch): n, size = len(buckets), 0 for split in list(buckets.keys()): yield pack_long(split) d = outputSerializer.dumps(buckets[split]) del buckets[split] yield d size += len(d) avg = int(size / n) >> 20 # let 1M < avg < 10M if avg < 1: batch *= 1.5 elif avg > 10: batch = max(int(batch / 1.5), 1) c = 0 for split, items in buckets.items(): yield pack_long(split) yield outputSerializer.dumps(items) keyed = self.mapPartitionsWithIndex(add_shuffle_key, preservesPartitioning=True) keyed._bypass_serializer = True with SCCallSiteSync(self.context) as css: pairRDD = self.ctx._jvm.PairwiseRDD( keyed._jrdd.rdd()).asJavaPairRDD() jpartitioner = self.ctx._jvm.PythonPartitioner(numPartitions, id(partitionFunc)) jrdd = self.ctx._jvm.PythonRDD.valueOfPair(pairRDD.partitionBy(jpartitioner)) rdd = RDD(jrdd, self.ctx, BatchedSerializer(outputSerializer)) rdd.partitioner = partitioner return rdd
[ "def", "partitionBy", "(", "self", ",", "numPartitions", ",", "partitionFunc", "=", "portable_hash", ")", ":", "if", "numPartitions", "is", "None", ":", "numPartitions", "=", "self", ".", "_defaultReducePartitions", "(", ")", "partitioner", "=", "Partitioner", "(", "numPartitions", ",", "partitionFunc", ")", "if", "self", ".", "partitioner", "==", "partitioner", ":", "return", "self", "# Transferring O(n) objects to Java is too expensive.", "# Instead, we'll form the hash buckets in Python,", "# transferring O(numPartitions) objects to Java.", "# Each object is a (splitNumber, [objects]) pair.", "# In order to avoid too huge objects, the objects are", "# grouped into chunks.", "outputSerializer", "=", "self", ".", "ctx", ".", "_unbatched_serializer", "limit", "=", "(", "_parse_memory", "(", "self", ".", "ctx", ".", "_conf", ".", "get", "(", "\"spark.python.worker.memory\"", ",", "\"512m\"", ")", ")", "/", "2", ")", "def", "add_shuffle_key", "(", "split", ",", "iterator", ")", ":", "buckets", "=", "defaultdict", "(", "list", ")", "c", ",", "batch", "=", "0", ",", "min", "(", "10", "*", "numPartitions", ",", "1000", ")", "for", "k", ",", "v", "in", "iterator", ":", "buckets", "[", "partitionFunc", "(", "k", ")", "%", "numPartitions", "]", ".", "append", "(", "(", "k", ",", "v", ")", ")", "c", "+=", "1", "# check used memory and avg size of chunk of objects", "if", "(", "c", "%", "1000", "==", "0", "and", "get_used_memory", "(", ")", ">", "limit", "or", "c", ">", "batch", ")", ":", "n", ",", "size", "=", "len", "(", "buckets", ")", ",", "0", "for", "split", "in", "list", "(", "buckets", ".", "keys", "(", ")", ")", ":", "yield", "pack_long", "(", "split", ")", "d", "=", "outputSerializer", ".", "dumps", "(", "buckets", "[", "split", "]", ")", "del", "buckets", "[", "split", "]", "yield", "d", "size", "+=", "len", "(", "d", ")", "avg", "=", "int", "(", "size", "/", "n", ")", ">>", "20", "# let 1M < avg < 10M", "if", "avg", "<", "1", ":", "batch", "*=", "1.5", "elif", "avg", ">", "10", ":", "batch", "=", "max", "(", "int", "(", "batch", "/", "1.5", ")", ",", "1", ")", "c", "=", "0", "for", "split", ",", "items", "in", "buckets", ".", "items", "(", ")", ":", "yield", "pack_long", "(", "split", ")", "yield", "outputSerializer", ".", "dumps", "(", "items", ")", "keyed", "=", "self", ".", "mapPartitionsWithIndex", "(", "add_shuffle_key", ",", "preservesPartitioning", "=", "True", ")", "keyed", ".", "_bypass_serializer", "=", "True", "with", "SCCallSiteSync", "(", "self", ".", "context", ")", "as", "css", ":", "pairRDD", "=", "self", ".", "ctx", ".", "_jvm", ".", "PairwiseRDD", "(", "keyed", ".", "_jrdd", ".", "rdd", "(", ")", ")", ".", "asJavaPairRDD", "(", ")", "jpartitioner", "=", "self", ".", "ctx", ".", "_jvm", ".", "PythonPartitioner", "(", "numPartitions", ",", "id", "(", "partitionFunc", ")", ")", "jrdd", "=", "self", ".", "ctx", ".", "_jvm", ".", "PythonRDD", ".", "valueOfPair", "(", "pairRDD", ".", "partitionBy", "(", "jpartitioner", ")", ")", "rdd", "=", "RDD", "(", "jrdd", ",", "self", ".", "ctx", ",", "BatchedSerializer", "(", "outputSerializer", ")", ")", "rdd", ".", "partitioner", "=", "partitioner", "return", "rdd" ]
Return a copy of the RDD partitioned using the specified partitioner. >>> pairs = sc.parallelize([1, 2, 3, 4, 2, 4, 1]).map(lambda x: (x, x)) >>> sets = pairs.partitionBy(2).glom().collect() >>> len(set(sets[0]).intersection(set(sets[1]))) 0
[ "Return", "a", "copy", "of", "the", "RDD", "partitioned", "using", "the", "specified", "partitioner", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/rdd.py#L1742-L1810
19,025
apache/spark
python/pyspark/rdd.py
RDD.combineByKey
def combineByKey(self, createCombiner, mergeValue, mergeCombiners, numPartitions=None, partitionFunc=portable_hash): """ Generic function to combine the elements for each key using a custom set of aggregation functions. Turns an RDD[(K, V)] into a result of type RDD[(K, C)], for a "combined type" C. Users provide three functions: - C{createCombiner}, which turns a V into a C (e.g., creates a one-element list) - C{mergeValue}, to merge a V into a C (e.g., adds it to the end of a list) - C{mergeCombiners}, to combine two C's into a single one (e.g., merges the lists) To avoid memory allocation, both mergeValue and mergeCombiners are allowed to modify and return their first argument instead of creating a new C. In addition, users can control the partitioning of the output RDD. .. note:: V and C can be different -- for example, one might group an RDD of type (Int, Int) into an RDD of type (Int, List[Int]). >>> x = sc.parallelize([("a", 1), ("b", 1), ("a", 2)]) >>> def to_list(a): ... return [a] ... >>> def append(a, b): ... a.append(b) ... return a ... >>> def extend(a, b): ... a.extend(b) ... return a ... >>> sorted(x.combineByKey(to_list, append, extend).collect()) [('a', [1, 2]), ('b', [1])] """ if numPartitions is None: numPartitions = self._defaultReducePartitions() serializer = self.ctx.serializer memory = self._memory_limit() agg = Aggregator(createCombiner, mergeValue, mergeCombiners) def combineLocally(iterator): merger = ExternalMerger(agg, memory * 0.9, serializer) merger.mergeValues(iterator) return merger.items() locally_combined = self.mapPartitions(combineLocally, preservesPartitioning=True) shuffled = locally_combined.partitionBy(numPartitions, partitionFunc) def _mergeCombiners(iterator): merger = ExternalMerger(agg, memory, serializer) merger.mergeCombiners(iterator) return merger.items() return shuffled.mapPartitions(_mergeCombiners, preservesPartitioning=True)
python
def combineByKey(self, createCombiner, mergeValue, mergeCombiners, numPartitions=None, partitionFunc=portable_hash): """ Generic function to combine the elements for each key using a custom set of aggregation functions. Turns an RDD[(K, V)] into a result of type RDD[(K, C)], for a "combined type" C. Users provide three functions: - C{createCombiner}, which turns a V into a C (e.g., creates a one-element list) - C{mergeValue}, to merge a V into a C (e.g., adds it to the end of a list) - C{mergeCombiners}, to combine two C's into a single one (e.g., merges the lists) To avoid memory allocation, both mergeValue and mergeCombiners are allowed to modify and return their first argument instead of creating a new C. In addition, users can control the partitioning of the output RDD. .. note:: V and C can be different -- for example, one might group an RDD of type (Int, Int) into an RDD of type (Int, List[Int]). >>> x = sc.parallelize([("a", 1), ("b", 1), ("a", 2)]) >>> def to_list(a): ... return [a] ... >>> def append(a, b): ... a.append(b) ... return a ... >>> def extend(a, b): ... a.extend(b) ... return a ... >>> sorted(x.combineByKey(to_list, append, extend).collect()) [('a', [1, 2]), ('b', [1])] """ if numPartitions is None: numPartitions = self._defaultReducePartitions() serializer = self.ctx.serializer memory = self._memory_limit() agg = Aggregator(createCombiner, mergeValue, mergeCombiners) def combineLocally(iterator): merger = ExternalMerger(agg, memory * 0.9, serializer) merger.mergeValues(iterator) return merger.items() locally_combined = self.mapPartitions(combineLocally, preservesPartitioning=True) shuffled = locally_combined.partitionBy(numPartitions, partitionFunc) def _mergeCombiners(iterator): merger = ExternalMerger(agg, memory, serializer) merger.mergeCombiners(iterator) return merger.items() return shuffled.mapPartitions(_mergeCombiners, preservesPartitioning=True)
[ "def", "combineByKey", "(", "self", ",", "createCombiner", ",", "mergeValue", ",", "mergeCombiners", ",", "numPartitions", "=", "None", ",", "partitionFunc", "=", "portable_hash", ")", ":", "if", "numPartitions", "is", "None", ":", "numPartitions", "=", "self", ".", "_defaultReducePartitions", "(", ")", "serializer", "=", "self", ".", "ctx", ".", "serializer", "memory", "=", "self", ".", "_memory_limit", "(", ")", "agg", "=", "Aggregator", "(", "createCombiner", ",", "mergeValue", ",", "mergeCombiners", ")", "def", "combineLocally", "(", "iterator", ")", ":", "merger", "=", "ExternalMerger", "(", "agg", ",", "memory", "*", "0.9", ",", "serializer", ")", "merger", ".", "mergeValues", "(", "iterator", ")", "return", "merger", ".", "items", "(", ")", "locally_combined", "=", "self", ".", "mapPartitions", "(", "combineLocally", ",", "preservesPartitioning", "=", "True", ")", "shuffled", "=", "locally_combined", ".", "partitionBy", "(", "numPartitions", ",", "partitionFunc", ")", "def", "_mergeCombiners", "(", "iterator", ")", ":", "merger", "=", "ExternalMerger", "(", "agg", ",", "memory", ",", "serializer", ")", "merger", ".", "mergeCombiners", "(", "iterator", ")", "return", "merger", ".", "items", "(", ")", "return", "shuffled", ".", "mapPartitions", "(", "_mergeCombiners", ",", "preservesPartitioning", "=", "True", ")" ]
Generic function to combine the elements for each key using a custom set of aggregation functions. Turns an RDD[(K, V)] into a result of type RDD[(K, C)], for a "combined type" C. Users provide three functions: - C{createCombiner}, which turns a V into a C (e.g., creates a one-element list) - C{mergeValue}, to merge a V into a C (e.g., adds it to the end of a list) - C{mergeCombiners}, to combine two C's into a single one (e.g., merges the lists) To avoid memory allocation, both mergeValue and mergeCombiners are allowed to modify and return their first argument instead of creating a new C. In addition, users can control the partitioning of the output RDD. .. note:: V and C can be different -- for example, one might group an RDD of type (Int, Int) into an RDD of type (Int, List[Int]). >>> x = sc.parallelize([("a", 1), ("b", 1), ("a", 2)]) >>> def to_list(a): ... return [a] ... >>> def append(a, b): ... a.append(b) ... return a ... >>> def extend(a, b): ... a.extend(b) ... return a ... >>> sorted(x.combineByKey(to_list, append, extend).collect()) [('a', [1, 2]), ('b', [1])]
[ "Generic", "function", "to", "combine", "the", "elements", "for", "each", "key", "using", "a", "custom", "set", "of", "aggregation", "functions", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/rdd.py#L1813-L1874
19,026
apache/spark
python/pyspark/rdd.py
RDD.aggregateByKey
def aggregateByKey(self, zeroValue, seqFunc, combFunc, numPartitions=None, partitionFunc=portable_hash): """ Aggregate the values of each key, using given combine functions and a neutral "zero value". This function can return a different result type, U, than the type of the values in this RDD, V. Thus, we need one operation for merging a V into a U and one operation for merging two U's, The former operation is used for merging values within a partition, and the latter is used for merging values between partitions. To avoid memory allocation, both of these functions are allowed to modify and return their first argument instead of creating a new U. """ def createZero(): return copy.deepcopy(zeroValue) return self.combineByKey( lambda v: seqFunc(createZero(), v), seqFunc, combFunc, numPartitions, partitionFunc)
python
def aggregateByKey(self, zeroValue, seqFunc, combFunc, numPartitions=None, partitionFunc=portable_hash): """ Aggregate the values of each key, using given combine functions and a neutral "zero value". This function can return a different result type, U, than the type of the values in this RDD, V. Thus, we need one operation for merging a V into a U and one operation for merging two U's, The former operation is used for merging values within a partition, and the latter is used for merging values between partitions. To avoid memory allocation, both of these functions are allowed to modify and return their first argument instead of creating a new U. """ def createZero(): return copy.deepcopy(zeroValue) return self.combineByKey( lambda v: seqFunc(createZero(), v), seqFunc, combFunc, numPartitions, partitionFunc)
[ "def", "aggregateByKey", "(", "self", ",", "zeroValue", ",", "seqFunc", ",", "combFunc", ",", "numPartitions", "=", "None", ",", "partitionFunc", "=", "portable_hash", ")", ":", "def", "createZero", "(", ")", ":", "return", "copy", ".", "deepcopy", "(", "zeroValue", ")", "return", "self", ".", "combineByKey", "(", "lambda", "v", ":", "seqFunc", "(", "createZero", "(", ")", ",", "v", ")", ",", "seqFunc", ",", "combFunc", ",", "numPartitions", ",", "partitionFunc", ")" ]
Aggregate the values of each key, using given combine functions and a neutral "zero value". This function can return a different result type, U, than the type of the values in this RDD, V. Thus, we need one operation for merging a V into a U and one operation for merging two U's, The former operation is used for merging values within a partition, and the latter is used for merging values between partitions. To avoid memory allocation, both of these functions are allowed to modify and return their first argument instead of creating a new U.
[ "Aggregate", "the", "values", "of", "each", "key", "using", "given", "combine", "functions", "and", "a", "neutral", "zero", "value", ".", "This", "function", "can", "return", "a", "different", "result", "type", "U", "than", "the", "type", "of", "the", "values", "in", "this", "RDD", "V", ".", "Thus", "we", "need", "one", "operation", "for", "merging", "a", "V", "into", "a", "U", "and", "one", "operation", "for", "merging", "two", "U", "s", "The", "former", "operation", "is", "used", "for", "merging", "values", "within", "a", "partition", "and", "the", "latter", "is", "used", "for", "merging", "values", "between", "partitions", ".", "To", "avoid", "memory", "allocation", "both", "of", "these", "functions", "are", "allowed", "to", "modify", "and", "return", "their", "first", "argument", "instead", "of", "creating", "a", "new", "U", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/rdd.py#L1876-L1891
19,027
apache/spark
python/pyspark/rdd.py
RDD.groupByKey
def groupByKey(self, numPartitions=None, partitionFunc=portable_hash): """ Group the values for each key in the RDD into a single sequence. Hash-partitions the resulting RDD with numPartitions partitions. .. note:: If you are grouping in order to perform an aggregation (such as a sum or average) over each key, using reduceByKey or aggregateByKey will provide much better performance. >>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)]) >>> sorted(rdd.groupByKey().mapValues(len).collect()) [('a', 2), ('b', 1)] >>> sorted(rdd.groupByKey().mapValues(list).collect()) [('a', [1, 1]), ('b', [1])] """ def createCombiner(x): return [x] def mergeValue(xs, x): xs.append(x) return xs def mergeCombiners(a, b): a.extend(b) return a memory = self._memory_limit() serializer = self._jrdd_deserializer agg = Aggregator(createCombiner, mergeValue, mergeCombiners) def combine(iterator): merger = ExternalMerger(agg, memory * 0.9, serializer) merger.mergeValues(iterator) return merger.items() locally_combined = self.mapPartitions(combine, preservesPartitioning=True) shuffled = locally_combined.partitionBy(numPartitions, partitionFunc) def groupByKey(it): merger = ExternalGroupBy(agg, memory, serializer) merger.mergeCombiners(it) return merger.items() return shuffled.mapPartitions(groupByKey, True).mapValues(ResultIterable)
python
def groupByKey(self, numPartitions=None, partitionFunc=portable_hash): """ Group the values for each key in the RDD into a single sequence. Hash-partitions the resulting RDD with numPartitions partitions. .. note:: If you are grouping in order to perform an aggregation (such as a sum or average) over each key, using reduceByKey or aggregateByKey will provide much better performance. >>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)]) >>> sorted(rdd.groupByKey().mapValues(len).collect()) [('a', 2), ('b', 1)] >>> sorted(rdd.groupByKey().mapValues(list).collect()) [('a', [1, 1]), ('b', [1])] """ def createCombiner(x): return [x] def mergeValue(xs, x): xs.append(x) return xs def mergeCombiners(a, b): a.extend(b) return a memory = self._memory_limit() serializer = self._jrdd_deserializer agg = Aggregator(createCombiner, mergeValue, mergeCombiners) def combine(iterator): merger = ExternalMerger(agg, memory * 0.9, serializer) merger.mergeValues(iterator) return merger.items() locally_combined = self.mapPartitions(combine, preservesPartitioning=True) shuffled = locally_combined.partitionBy(numPartitions, partitionFunc) def groupByKey(it): merger = ExternalGroupBy(agg, memory, serializer) merger.mergeCombiners(it) return merger.items() return shuffled.mapPartitions(groupByKey, True).mapValues(ResultIterable)
[ "def", "groupByKey", "(", "self", ",", "numPartitions", "=", "None", ",", "partitionFunc", "=", "portable_hash", ")", ":", "def", "createCombiner", "(", "x", ")", ":", "return", "[", "x", "]", "def", "mergeValue", "(", "xs", ",", "x", ")", ":", "xs", ".", "append", "(", "x", ")", "return", "xs", "def", "mergeCombiners", "(", "a", ",", "b", ")", ":", "a", ".", "extend", "(", "b", ")", "return", "a", "memory", "=", "self", ".", "_memory_limit", "(", ")", "serializer", "=", "self", ".", "_jrdd_deserializer", "agg", "=", "Aggregator", "(", "createCombiner", ",", "mergeValue", ",", "mergeCombiners", ")", "def", "combine", "(", "iterator", ")", ":", "merger", "=", "ExternalMerger", "(", "agg", ",", "memory", "*", "0.9", ",", "serializer", ")", "merger", ".", "mergeValues", "(", "iterator", ")", "return", "merger", ".", "items", "(", ")", "locally_combined", "=", "self", ".", "mapPartitions", "(", "combine", ",", "preservesPartitioning", "=", "True", ")", "shuffled", "=", "locally_combined", ".", "partitionBy", "(", "numPartitions", ",", "partitionFunc", ")", "def", "groupByKey", "(", "it", ")", ":", "merger", "=", "ExternalGroupBy", "(", "agg", ",", "memory", ",", "serializer", ")", "merger", ".", "mergeCombiners", "(", "it", ")", "return", "merger", ".", "items", "(", ")", "return", "shuffled", ".", "mapPartitions", "(", "groupByKey", ",", "True", ")", ".", "mapValues", "(", "ResultIterable", ")" ]
Group the values for each key in the RDD into a single sequence. Hash-partitions the resulting RDD with numPartitions partitions. .. note:: If you are grouping in order to perform an aggregation (such as a sum or average) over each key, using reduceByKey or aggregateByKey will provide much better performance. >>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)]) >>> sorted(rdd.groupByKey().mapValues(len).collect()) [('a', 2), ('b', 1)] >>> sorted(rdd.groupByKey().mapValues(list).collect()) [('a', [1, 1]), ('b', [1])]
[ "Group", "the", "values", "for", "each", "key", "in", "the", "RDD", "into", "a", "single", "sequence", ".", "Hash", "-", "partitions", "the", "resulting", "RDD", "with", "numPartitions", "partitions", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/rdd.py#L1915-L1958
19,028
apache/spark
python/pyspark/rdd.py
RDD.flatMapValues
def flatMapValues(self, f): """ Pass each value in the key-value pair RDD through a flatMap function without changing the keys; this also retains the original RDD's partitioning. >>> x = sc.parallelize([("a", ["x", "y", "z"]), ("b", ["p", "r"])]) >>> def f(x): return x >>> x.flatMapValues(f).collect() [('a', 'x'), ('a', 'y'), ('a', 'z'), ('b', 'p'), ('b', 'r')] """ flat_map_fn = lambda kv: ((kv[0], x) for x in f(kv[1])) return self.flatMap(flat_map_fn, preservesPartitioning=True)
python
def flatMapValues(self, f): """ Pass each value in the key-value pair RDD through a flatMap function without changing the keys; this also retains the original RDD's partitioning. >>> x = sc.parallelize([("a", ["x", "y", "z"]), ("b", ["p", "r"])]) >>> def f(x): return x >>> x.flatMapValues(f).collect() [('a', 'x'), ('a', 'y'), ('a', 'z'), ('b', 'p'), ('b', 'r')] """ flat_map_fn = lambda kv: ((kv[0], x) for x in f(kv[1])) return self.flatMap(flat_map_fn, preservesPartitioning=True)
[ "def", "flatMapValues", "(", "self", ",", "f", ")", ":", "flat_map_fn", "=", "lambda", "kv", ":", "(", "(", "kv", "[", "0", "]", ",", "x", ")", "for", "x", "in", "f", "(", "kv", "[", "1", "]", ")", ")", "return", "self", ".", "flatMap", "(", "flat_map_fn", ",", "preservesPartitioning", "=", "True", ")" ]
Pass each value in the key-value pair RDD through a flatMap function without changing the keys; this also retains the original RDD's partitioning. >>> x = sc.parallelize([("a", ["x", "y", "z"]), ("b", ["p", "r"])]) >>> def f(x): return x >>> x.flatMapValues(f).collect() [('a', 'x'), ('a', 'y'), ('a', 'z'), ('b', 'p'), ('b', 'r')]
[ "Pass", "each", "value", "in", "the", "key", "-", "value", "pair", "RDD", "through", "a", "flatMap", "function", "without", "changing", "the", "keys", ";", "this", "also", "retains", "the", "original", "RDD", "s", "partitioning", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/rdd.py#L1960-L1972
19,029
apache/spark
python/pyspark/rdd.py
RDD.mapValues
def mapValues(self, f): """ Pass each value in the key-value pair RDD through a map function without changing the keys; this also retains the original RDD's partitioning. >>> x = sc.parallelize([("a", ["apple", "banana", "lemon"]), ("b", ["grapes"])]) >>> def f(x): return len(x) >>> x.mapValues(f).collect() [('a', 3), ('b', 1)] """ map_values_fn = lambda kv: (kv[0], f(kv[1])) return self.map(map_values_fn, preservesPartitioning=True)
python
def mapValues(self, f): """ Pass each value in the key-value pair RDD through a map function without changing the keys; this also retains the original RDD's partitioning. >>> x = sc.parallelize([("a", ["apple", "banana", "lemon"]), ("b", ["grapes"])]) >>> def f(x): return len(x) >>> x.mapValues(f).collect() [('a', 3), ('b', 1)] """ map_values_fn = lambda kv: (kv[0], f(kv[1])) return self.map(map_values_fn, preservesPartitioning=True)
[ "def", "mapValues", "(", "self", ",", "f", ")", ":", "map_values_fn", "=", "lambda", "kv", ":", "(", "kv", "[", "0", "]", ",", "f", "(", "kv", "[", "1", "]", ")", ")", "return", "self", ".", "map", "(", "map_values_fn", ",", "preservesPartitioning", "=", "True", ")" ]
Pass each value in the key-value pair RDD through a map function without changing the keys; this also retains the original RDD's partitioning. >>> x = sc.parallelize([("a", ["apple", "banana", "lemon"]), ("b", ["grapes"])]) >>> def f(x): return len(x) >>> x.mapValues(f).collect() [('a', 3), ('b', 1)]
[ "Pass", "each", "value", "in", "the", "key", "-", "value", "pair", "RDD", "through", "a", "map", "function", "without", "changing", "the", "keys", ";", "this", "also", "retains", "the", "original", "RDD", "s", "partitioning", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/rdd.py#L1974-L1986
19,030
apache/spark
python/pyspark/rdd.py
RDD.coalesce
def coalesce(self, numPartitions, shuffle=False): """ Return a new RDD that is reduced into `numPartitions` partitions. >>> sc.parallelize([1, 2, 3, 4, 5], 3).glom().collect() [[1], [2, 3], [4, 5]] >>> sc.parallelize([1, 2, 3, 4, 5], 3).coalesce(1).glom().collect() [[1, 2, 3, 4, 5]] """ if shuffle: # Decrease the batch size in order to distribute evenly the elements across output # partitions. Otherwise, repartition will possibly produce highly skewed partitions. batchSize = min(10, self.ctx._batchSize or 1024) ser = BatchedSerializer(PickleSerializer(), batchSize) selfCopy = self._reserialize(ser) jrdd_deserializer = selfCopy._jrdd_deserializer jrdd = selfCopy._jrdd.coalesce(numPartitions, shuffle) else: jrdd_deserializer = self._jrdd_deserializer jrdd = self._jrdd.coalesce(numPartitions, shuffle) return RDD(jrdd, self.ctx, jrdd_deserializer)
python
def coalesce(self, numPartitions, shuffle=False): """ Return a new RDD that is reduced into `numPartitions` partitions. >>> sc.parallelize([1, 2, 3, 4, 5], 3).glom().collect() [[1], [2, 3], [4, 5]] >>> sc.parallelize([1, 2, 3, 4, 5], 3).coalesce(1).glom().collect() [[1, 2, 3, 4, 5]] """ if shuffle: # Decrease the batch size in order to distribute evenly the elements across output # partitions. Otherwise, repartition will possibly produce highly skewed partitions. batchSize = min(10, self.ctx._batchSize or 1024) ser = BatchedSerializer(PickleSerializer(), batchSize) selfCopy = self._reserialize(ser) jrdd_deserializer = selfCopy._jrdd_deserializer jrdd = selfCopy._jrdd.coalesce(numPartitions, shuffle) else: jrdd_deserializer = self._jrdd_deserializer jrdd = self._jrdd.coalesce(numPartitions, shuffle) return RDD(jrdd, self.ctx, jrdd_deserializer)
[ "def", "coalesce", "(", "self", ",", "numPartitions", ",", "shuffle", "=", "False", ")", ":", "if", "shuffle", ":", "# Decrease the batch size in order to distribute evenly the elements across output", "# partitions. Otherwise, repartition will possibly produce highly skewed partitions.", "batchSize", "=", "min", "(", "10", ",", "self", ".", "ctx", ".", "_batchSize", "or", "1024", ")", "ser", "=", "BatchedSerializer", "(", "PickleSerializer", "(", ")", ",", "batchSize", ")", "selfCopy", "=", "self", ".", "_reserialize", "(", "ser", ")", "jrdd_deserializer", "=", "selfCopy", ".", "_jrdd_deserializer", "jrdd", "=", "selfCopy", ".", "_jrdd", ".", "coalesce", "(", "numPartitions", ",", "shuffle", ")", "else", ":", "jrdd_deserializer", "=", "self", ".", "_jrdd_deserializer", "jrdd", "=", "self", ".", "_jrdd", ".", "coalesce", "(", "numPartitions", ",", "shuffle", ")", "return", "RDD", "(", "jrdd", ",", "self", ".", "ctx", ",", "jrdd_deserializer", ")" ]
Return a new RDD that is reduced into `numPartitions` partitions. >>> sc.parallelize([1, 2, 3, 4, 5], 3).glom().collect() [[1], [2, 3], [4, 5]] >>> sc.parallelize([1, 2, 3, 4, 5], 3).coalesce(1).glom().collect() [[1, 2, 3, 4, 5]]
[ "Return", "a", "new", "RDD", "that", "is", "reduced", "into", "numPartitions", "partitions", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/rdd.py#L2095-L2115
19,031
apache/spark
python/pyspark/rdd.py
RDD.zipWithIndex
def zipWithIndex(self): """ Zips this RDD with its element indices. The ordering is first based on the partition index and then the ordering of items within each partition. So the first item in the first partition gets index 0, and the last item in the last partition receives the largest index. This method needs to trigger a spark job when this RDD contains more than one partitions. >>> sc.parallelize(["a", "b", "c", "d"], 3).zipWithIndex().collect() [('a', 0), ('b', 1), ('c', 2), ('d', 3)] """ starts = [0] if self.getNumPartitions() > 1: nums = self.mapPartitions(lambda it: [sum(1 for i in it)]).collect() for i in range(len(nums) - 1): starts.append(starts[-1] + nums[i]) def func(k, it): for i, v in enumerate(it, starts[k]): yield v, i return self.mapPartitionsWithIndex(func)
python
def zipWithIndex(self): """ Zips this RDD with its element indices. The ordering is first based on the partition index and then the ordering of items within each partition. So the first item in the first partition gets index 0, and the last item in the last partition receives the largest index. This method needs to trigger a spark job when this RDD contains more than one partitions. >>> sc.parallelize(["a", "b", "c", "d"], 3).zipWithIndex().collect() [('a', 0), ('b', 1), ('c', 2), ('d', 3)] """ starts = [0] if self.getNumPartitions() > 1: nums = self.mapPartitions(lambda it: [sum(1 for i in it)]).collect() for i in range(len(nums) - 1): starts.append(starts[-1] + nums[i]) def func(k, it): for i, v in enumerate(it, starts[k]): yield v, i return self.mapPartitionsWithIndex(func)
[ "def", "zipWithIndex", "(", "self", ")", ":", "starts", "=", "[", "0", "]", "if", "self", ".", "getNumPartitions", "(", ")", ">", "1", ":", "nums", "=", "self", ".", "mapPartitions", "(", "lambda", "it", ":", "[", "sum", "(", "1", "for", "i", "in", "it", ")", "]", ")", ".", "collect", "(", ")", "for", "i", "in", "range", "(", "len", "(", "nums", ")", "-", "1", ")", ":", "starts", ".", "append", "(", "starts", "[", "-", "1", "]", "+", "nums", "[", "i", "]", ")", "def", "func", "(", "k", ",", "it", ")", ":", "for", "i", ",", "v", "in", "enumerate", "(", "it", ",", "starts", "[", "k", "]", ")", ":", "yield", "v", ",", "i", "return", "self", ".", "mapPartitionsWithIndex", "(", "func", ")" ]
Zips this RDD with its element indices. The ordering is first based on the partition index and then the ordering of items within each partition. So the first item in the first partition gets index 0, and the last item in the last partition receives the largest index. This method needs to trigger a spark job when this RDD contains more than one partitions. >>> sc.parallelize(["a", "b", "c", "d"], 3).zipWithIndex().collect() [('a', 0), ('b', 1), ('c', 2), ('d', 3)]
[ "Zips", "this", "RDD", "with", "its", "element", "indices", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/rdd.py#L2159-L2184
19,032
apache/spark
python/pyspark/rdd.py
RDD.zipWithUniqueId
def zipWithUniqueId(self): """ Zips this RDD with generated unique Long ids. Items in the kth partition will get ids k, n+k, 2*n+k, ..., where n is the number of partitions. So there may exist gaps, but this method won't trigger a spark job, which is different from L{zipWithIndex} >>> sc.parallelize(["a", "b", "c", "d", "e"], 3).zipWithUniqueId().collect() [('a', 0), ('b', 1), ('c', 4), ('d', 2), ('e', 5)] """ n = self.getNumPartitions() def func(k, it): for i, v in enumerate(it): yield v, i * n + k return self.mapPartitionsWithIndex(func)
python
def zipWithUniqueId(self): """ Zips this RDD with generated unique Long ids. Items in the kth partition will get ids k, n+k, 2*n+k, ..., where n is the number of partitions. So there may exist gaps, but this method won't trigger a spark job, which is different from L{zipWithIndex} >>> sc.parallelize(["a", "b", "c", "d", "e"], 3).zipWithUniqueId().collect() [('a', 0), ('b', 1), ('c', 4), ('d', 2), ('e', 5)] """ n = self.getNumPartitions() def func(k, it): for i, v in enumerate(it): yield v, i * n + k return self.mapPartitionsWithIndex(func)
[ "def", "zipWithUniqueId", "(", "self", ")", ":", "n", "=", "self", ".", "getNumPartitions", "(", ")", "def", "func", "(", "k", ",", "it", ")", ":", "for", "i", ",", "v", "in", "enumerate", "(", "it", ")", ":", "yield", "v", ",", "i", "*", "n", "+", "k", "return", "self", ".", "mapPartitionsWithIndex", "(", "func", ")" ]
Zips this RDD with generated unique Long ids. Items in the kth partition will get ids k, n+k, 2*n+k, ..., where n is the number of partitions. So there may exist gaps, but this method won't trigger a spark job, which is different from L{zipWithIndex} >>> sc.parallelize(["a", "b", "c", "d", "e"], 3).zipWithUniqueId().collect() [('a', 0), ('b', 1), ('c', 4), ('d', 2), ('e', 5)]
[ "Zips", "this", "RDD", "with", "generated", "unique", "Long", "ids", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/rdd.py#L2186-L2204
19,033
apache/spark
python/pyspark/rdd.py
RDD.getStorageLevel
def getStorageLevel(self): """ Get the RDD's current storage level. >>> rdd1 = sc.parallelize([1,2]) >>> rdd1.getStorageLevel() StorageLevel(False, False, False, False, 1) >>> print(rdd1.getStorageLevel()) Serialized 1x Replicated """ java_storage_level = self._jrdd.getStorageLevel() storage_level = StorageLevel(java_storage_level.useDisk(), java_storage_level.useMemory(), java_storage_level.useOffHeap(), java_storage_level.deserialized(), java_storage_level.replication()) return storage_level
python
def getStorageLevel(self): """ Get the RDD's current storage level. >>> rdd1 = sc.parallelize([1,2]) >>> rdd1.getStorageLevel() StorageLevel(False, False, False, False, 1) >>> print(rdd1.getStorageLevel()) Serialized 1x Replicated """ java_storage_level = self._jrdd.getStorageLevel() storage_level = StorageLevel(java_storage_level.useDisk(), java_storage_level.useMemory(), java_storage_level.useOffHeap(), java_storage_level.deserialized(), java_storage_level.replication()) return storage_level
[ "def", "getStorageLevel", "(", "self", ")", ":", "java_storage_level", "=", "self", ".", "_jrdd", ".", "getStorageLevel", "(", ")", "storage_level", "=", "StorageLevel", "(", "java_storage_level", ".", "useDisk", "(", ")", ",", "java_storage_level", ".", "useMemory", "(", ")", ",", "java_storage_level", ".", "useOffHeap", "(", ")", ",", "java_storage_level", ".", "deserialized", "(", ")", ",", "java_storage_level", ".", "replication", "(", ")", ")", "return", "storage_level" ]
Get the RDD's current storage level. >>> rdd1 = sc.parallelize([1,2]) >>> rdd1.getStorageLevel() StorageLevel(False, False, False, False, 1) >>> print(rdd1.getStorageLevel()) Serialized 1x Replicated
[ "Get", "the", "RDD", "s", "current", "storage", "level", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/rdd.py#L2234-L2250
19,034
apache/spark
python/pyspark/rdd.py
RDD.lookup
def lookup(self, key): """ Return the list of values in the RDD for key `key`. This operation is done efficiently if the RDD has a known partitioner by only searching the partition that the key maps to. >>> l = range(1000) >>> rdd = sc.parallelize(zip(l, l), 10) >>> rdd.lookup(42) # slow [42] >>> sorted = rdd.sortByKey() >>> sorted.lookup(42) # fast [42] >>> sorted.lookup(1024) [] >>> rdd2 = sc.parallelize([(('a', 'b'), 'c')]).groupByKey() >>> list(rdd2.lookup(('a', 'b'))[0]) ['c'] """ values = self.filter(lambda kv: kv[0] == key).values() if self.partitioner is not None: return self.ctx.runJob(values, lambda x: x, [self.partitioner(key)]) return values.collect()
python
def lookup(self, key): """ Return the list of values in the RDD for key `key`. This operation is done efficiently if the RDD has a known partitioner by only searching the partition that the key maps to. >>> l = range(1000) >>> rdd = sc.parallelize(zip(l, l), 10) >>> rdd.lookup(42) # slow [42] >>> sorted = rdd.sortByKey() >>> sorted.lookup(42) # fast [42] >>> sorted.lookup(1024) [] >>> rdd2 = sc.parallelize([(('a', 'b'), 'c')]).groupByKey() >>> list(rdd2.lookup(('a', 'b'))[0]) ['c'] """ values = self.filter(lambda kv: kv[0] == key).values() if self.partitioner is not None: return self.ctx.runJob(values, lambda x: x, [self.partitioner(key)]) return values.collect()
[ "def", "lookup", "(", "self", ",", "key", ")", ":", "values", "=", "self", ".", "filter", "(", "lambda", "kv", ":", "kv", "[", "0", "]", "==", "key", ")", ".", "values", "(", ")", "if", "self", ".", "partitioner", "is", "not", "None", ":", "return", "self", ".", "ctx", ".", "runJob", "(", "values", ",", "lambda", "x", ":", "x", ",", "[", "self", ".", "partitioner", "(", "key", ")", "]", ")", "return", "values", ".", "collect", "(", ")" ]
Return the list of values in the RDD for key `key`. This operation is done efficiently if the RDD has a known partitioner by only searching the partition that the key maps to. >>> l = range(1000) >>> rdd = sc.parallelize(zip(l, l), 10) >>> rdd.lookup(42) # slow [42] >>> sorted = rdd.sortByKey() >>> sorted.lookup(42) # fast [42] >>> sorted.lookup(1024) [] >>> rdd2 = sc.parallelize([(('a', 'b'), 'c')]).groupByKey() >>> list(rdd2.lookup(('a', 'b'))[0]) ['c']
[ "Return", "the", "list", "of", "values", "in", "the", "RDD", "for", "key", "key", ".", "This", "operation", "is", "done", "efficiently", "if", "the", "RDD", "has", "a", "known", "partitioner", "by", "only", "searching", "the", "partition", "that", "the", "key", "maps", "to", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/rdd.py#L2267-L2291
19,035
apache/spark
python/pyspark/rdd.py
RDD.toLocalIterator
def toLocalIterator(self): """ Return an iterator that contains all of the elements in this RDD. The iterator will consume as much memory as the largest partition in this RDD. >>> rdd = sc.parallelize(range(10)) >>> [x for x in rdd.toLocalIterator()] [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] """ with SCCallSiteSync(self.context) as css: sock_info = self.ctx._jvm.PythonRDD.toLocalIteratorAndServe(self._jrdd.rdd()) return _load_from_socket(sock_info, self._jrdd_deserializer)
python
def toLocalIterator(self): """ Return an iterator that contains all of the elements in this RDD. The iterator will consume as much memory as the largest partition in this RDD. >>> rdd = sc.parallelize(range(10)) >>> [x for x in rdd.toLocalIterator()] [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] """ with SCCallSiteSync(self.context) as css: sock_info = self.ctx._jvm.PythonRDD.toLocalIteratorAndServe(self._jrdd.rdd()) return _load_from_socket(sock_info, self._jrdd_deserializer)
[ "def", "toLocalIterator", "(", "self", ")", ":", "with", "SCCallSiteSync", "(", "self", ".", "context", ")", "as", "css", ":", "sock_info", "=", "self", ".", "ctx", ".", "_jvm", ".", "PythonRDD", ".", "toLocalIteratorAndServe", "(", "self", ".", "_jrdd", ".", "rdd", "(", ")", ")", "return", "_load_from_socket", "(", "sock_info", ",", "self", ".", "_jrdd_deserializer", ")" ]
Return an iterator that contains all of the elements in this RDD. The iterator will consume as much memory as the largest partition in this RDD. >>> rdd = sc.parallelize(range(10)) >>> [x for x in rdd.toLocalIterator()] [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
[ "Return", "an", "iterator", "that", "contains", "all", "of", "the", "elements", "in", "this", "RDD", ".", "The", "iterator", "will", "consume", "as", "much", "memory", "as", "the", "largest", "partition", "in", "this", "RDD", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/rdd.py#L2378-L2389
19,036
apache/spark
python/pyspark/sql/column.py
_unary_op
def _unary_op(name, doc="unary operator"): """ Create a method for given unary operator """ def _(self): jc = getattr(self._jc, name)() return Column(jc) _.__doc__ = doc return _
python
def _unary_op(name, doc="unary operator"): """ Create a method for given unary operator """ def _(self): jc = getattr(self._jc, name)() return Column(jc) _.__doc__ = doc return _
[ "def", "_unary_op", "(", "name", ",", "doc", "=", "\"unary operator\"", ")", ":", "def", "_", "(", "self", ")", ":", "jc", "=", "getattr", "(", "self", ".", "_jc", ",", "name", ")", "(", ")", "return", "Column", "(", "jc", ")", "_", ".", "__doc__", "=", "doc", "return", "_" ]
Create a method for given unary operator
[ "Create", "a", "method", "for", "given", "unary", "operator" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/column.py#L81-L87
19,037
apache/spark
python/pyspark/sql/column.py
_bin_op
def _bin_op(name, doc="binary operator"): """ Create a method for given binary operator """ def _(self, other): jc = other._jc if isinstance(other, Column) else other njc = getattr(self._jc, name)(jc) return Column(njc) _.__doc__ = doc return _
python
def _bin_op(name, doc="binary operator"): """ Create a method for given binary operator """ def _(self, other): jc = other._jc if isinstance(other, Column) else other njc = getattr(self._jc, name)(jc) return Column(njc) _.__doc__ = doc return _
[ "def", "_bin_op", "(", "name", ",", "doc", "=", "\"binary operator\"", ")", ":", "def", "_", "(", "self", ",", "other", ")", ":", "jc", "=", "other", ".", "_jc", "if", "isinstance", "(", "other", ",", "Column", ")", "else", "other", "njc", "=", "getattr", "(", "self", ".", "_jc", ",", "name", ")", "(", "jc", ")", "return", "Column", "(", "njc", ")", "_", ".", "__doc__", "=", "doc", "return", "_" ]
Create a method for given binary operator
[ "Create", "a", "method", "for", "given", "binary", "operator" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/column.py#L110-L118
19,038
apache/spark
python/pyspark/sql/column.py
Column.isin
def isin(self, *cols): """ A boolean expression that is evaluated to true if the value of this expression is contained by the evaluated values of the arguments. >>> df[df.name.isin("Bob", "Mike")].collect() [Row(age=5, name=u'Bob')] >>> df[df.age.isin([1, 2, 3])].collect() [Row(age=2, name=u'Alice')] """ if len(cols) == 1 and isinstance(cols[0], (list, set)): cols = cols[0] cols = [c._jc if isinstance(c, Column) else _create_column_from_literal(c) for c in cols] sc = SparkContext._active_spark_context jc = getattr(self._jc, "isin")(_to_seq(sc, cols)) return Column(jc)
python
def isin(self, *cols): """ A boolean expression that is evaluated to true if the value of this expression is contained by the evaluated values of the arguments. >>> df[df.name.isin("Bob", "Mike")].collect() [Row(age=5, name=u'Bob')] >>> df[df.age.isin([1, 2, 3])].collect() [Row(age=2, name=u'Alice')] """ if len(cols) == 1 and isinstance(cols[0], (list, set)): cols = cols[0] cols = [c._jc if isinstance(c, Column) else _create_column_from_literal(c) for c in cols] sc = SparkContext._active_spark_context jc = getattr(self._jc, "isin")(_to_seq(sc, cols)) return Column(jc)
[ "def", "isin", "(", "self", ",", "*", "cols", ")", ":", "if", "len", "(", "cols", ")", "==", "1", "and", "isinstance", "(", "cols", "[", "0", "]", ",", "(", "list", ",", "set", ")", ")", ":", "cols", "=", "cols", "[", "0", "]", "cols", "=", "[", "c", ".", "_jc", "if", "isinstance", "(", "c", ",", "Column", ")", "else", "_create_column_from_literal", "(", "c", ")", "for", "c", "in", "cols", "]", "sc", "=", "SparkContext", ".", "_active_spark_context", "jc", "=", "getattr", "(", "self", ".", "_jc", ",", "\"isin\"", ")", "(", "_to_seq", "(", "sc", ",", "cols", ")", ")", "return", "Column", "(", "jc", ")" ]
A boolean expression that is evaluated to true if the value of this expression is contained by the evaluated values of the arguments. >>> df[df.name.isin("Bob", "Mike")].collect() [Row(age=5, name=u'Bob')] >>> df[df.age.isin([1, 2, 3])].collect() [Row(age=2, name=u'Alice')]
[ "A", "boolean", "expression", "that", "is", "evaluated", "to", "true", "if", "the", "value", "of", "this", "expression", "is", "contained", "by", "the", "evaluated", "values", "of", "the", "arguments", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/column.py#L431-L446
19,039
apache/spark
python/pyspark/sql/column.py
Column.cast
def cast(self, dataType): """ Convert the column into type ``dataType``. >>> df.select(df.age.cast("string").alias('ages')).collect() [Row(ages=u'2'), Row(ages=u'5')] >>> df.select(df.age.cast(StringType()).alias('ages')).collect() [Row(ages=u'2'), Row(ages=u'5')] """ if isinstance(dataType, basestring): jc = self._jc.cast(dataType) elif isinstance(dataType, DataType): from pyspark.sql import SparkSession spark = SparkSession.builder.getOrCreate() jdt = spark._jsparkSession.parseDataType(dataType.json()) jc = self._jc.cast(jdt) else: raise TypeError("unexpected type: %s" % type(dataType)) return Column(jc)
python
def cast(self, dataType): """ Convert the column into type ``dataType``. >>> df.select(df.age.cast("string").alias('ages')).collect() [Row(ages=u'2'), Row(ages=u'5')] >>> df.select(df.age.cast(StringType()).alias('ages')).collect() [Row(ages=u'2'), Row(ages=u'5')] """ if isinstance(dataType, basestring): jc = self._jc.cast(dataType) elif isinstance(dataType, DataType): from pyspark.sql import SparkSession spark = SparkSession.builder.getOrCreate() jdt = spark._jsparkSession.parseDataType(dataType.json()) jc = self._jc.cast(jdt) else: raise TypeError("unexpected type: %s" % type(dataType)) return Column(jc)
[ "def", "cast", "(", "self", ",", "dataType", ")", ":", "if", "isinstance", "(", "dataType", ",", "basestring", ")", ":", "jc", "=", "self", ".", "_jc", ".", "cast", "(", "dataType", ")", "elif", "isinstance", "(", "dataType", ",", "DataType", ")", ":", "from", "pyspark", ".", "sql", "import", "SparkSession", "spark", "=", "SparkSession", ".", "builder", ".", "getOrCreate", "(", ")", "jdt", "=", "spark", ".", "_jsparkSession", ".", "parseDataType", "(", "dataType", ".", "json", "(", ")", ")", "jc", "=", "self", ".", "_jc", ".", "cast", "(", "jdt", ")", "else", ":", "raise", "TypeError", "(", "\"unexpected type: %s\"", "%", "type", "(", "dataType", ")", ")", "return", "Column", "(", "jc", ")" ]
Convert the column into type ``dataType``. >>> df.select(df.age.cast("string").alias('ages')).collect() [Row(ages=u'2'), Row(ages=u'5')] >>> df.select(df.age.cast(StringType()).alias('ages')).collect() [Row(ages=u'2'), Row(ages=u'5')]
[ "Convert", "the", "column", "into", "type", "dataType", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/column.py#L576-L593
19,040
apache/spark
python/pyspark/sql/column.py
Column.over
def over(self, window): """ Define a windowing column. :param window: a :class:`WindowSpec` :return: a Column >>> from pyspark.sql import Window >>> window = Window.partitionBy("name").orderBy("age").rowsBetween(-1, 1) >>> from pyspark.sql.functions import rank, min >>> # df.select(rank().over(window), min('age').over(window)) """ from pyspark.sql.window import WindowSpec if not isinstance(window, WindowSpec): raise TypeError("window should be WindowSpec") jc = self._jc.over(window._jspec) return Column(jc)
python
def over(self, window): """ Define a windowing column. :param window: a :class:`WindowSpec` :return: a Column >>> from pyspark.sql import Window >>> window = Window.partitionBy("name").orderBy("age").rowsBetween(-1, 1) >>> from pyspark.sql.functions import rank, min >>> # df.select(rank().over(window), min('age').over(window)) """ from pyspark.sql.window import WindowSpec if not isinstance(window, WindowSpec): raise TypeError("window should be WindowSpec") jc = self._jc.over(window._jspec) return Column(jc)
[ "def", "over", "(", "self", ",", "window", ")", ":", "from", "pyspark", ".", "sql", ".", "window", "import", "WindowSpec", "if", "not", "isinstance", "(", "window", ",", "WindowSpec", ")", ":", "raise", "TypeError", "(", "\"window should be WindowSpec\"", ")", "jc", "=", "self", ".", "_jc", ".", "over", "(", "window", ".", "_jspec", ")", "return", "Column", "(", "jc", ")" ]
Define a windowing column. :param window: a :class:`WindowSpec` :return: a Column >>> from pyspark.sql import Window >>> window = Window.partitionBy("name").orderBy("age").rowsBetween(-1, 1) >>> from pyspark.sql.functions import rank, min >>> # df.select(rank().over(window), min('age').over(window))
[ "Define", "a", "windowing", "column", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/column.py#L663-L679
19,041
apache/spark
python/pyspark/mllib/feature.py
StandardScaler.fit
def fit(self, dataset): """ Computes the mean and variance and stores as a model to be used for later scaling. :param dataset: The data used to compute the mean and variance to build the transformation model. :return: a StandardScalarModel """ dataset = dataset.map(_convert_to_vector) jmodel = callMLlibFunc("fitStandardScaler", self.withMean, self.withStd, dataset) return StandardScalerModel(jmodel)
python
def fit(self, dataset): """ Computes the mean and variance and stores as a model to be used for later scaling. :param dataset: The data used to compute the mean and variance to build the transformation model. :return: a StandardScalarModel """ dataset = dataset.map(_convert_to_vector) jmodel = callMLlibFunc("fitStandardScaler", self.withMean, self.withStd, dataset) return StandardScalerModel(jmodel)
[ "def", "fit", "(", "self", ",", "dataset", ")", ":", "dataset", "=", "dataset", ".", "map", "(", "_convert_to_vector", ")", "jmodel", "=", "callMLlibFunc", "(", "\"fitStandardScaler\"", ",", "self", ".", "withMean", ",", "self", ".", "withStd", ",", "dataset", ")", "return", "StandardScalerModel", "(", "jmodel", ")" ]
Computes the mean and variance and stores as a model to be used for later scaling. :param dataset: The data used to compute the mean and variance to build the transformation model. :return: a StandardScalarModel
[ "Computes", "the", "mean", "and", "variance", "and", "stores", "as", "a", "model", "to", "be", "used", "for", "later", "scaling", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/feature.py#L240-L251
19,042
apache/spark
python/pyspark/mllib/feature.py
ChiSqSelector.fit
def fit(self, data): """ Returns a ChiSquared feature selector. :param data: an `RDD[LabeledPoint]` containing the labeled dataset with categorical features. Real-valued features will be treated as categorical for each distinct value. Apply feature discretizer before using this function. """ jmodel = callMLlibFunc("fitChiSqSelector", self.selectorType, self.numTopFeatures, self.percentile, self.fpr, self.fdr, self.fwe, data) return ChiSqSelectorModel(jmodel)
python
def fit(self, data): """ Returns a ChiSquared feature selector. :param data: an `RDD[LabeledPoint]` containing the labeled dataset with categorical features. Real-valued features will be treated as categorical for each distinct value. Apply feature discretizer before using this function. """ jmodel = callMLlibFunc("fitChiSqSelector", self.selectorType, self.numTopFeatures, self.percentile, self.fpr, self.fdr, self.fwe, data) return ChiSqSelectorModel(jmodel)
[ "def", "fit", "(", "self", ",", "data", ")", ":", "jmodel", "=", "callMLlibFunc", "(", "\"fitChiSqSelector\"", ",", "self", ".", "selectorType", ",", "self", ".", "numTopFeatures", ",", "self", ".", "percentile", ",", "self", ".", "fpr", ",", "self", ".", "fdr", ",", "self", ".", "fwe", ",", "data", ")", "return", "ChiSqSelectorModel", "(", "jmodel", ")" ]
Returns a ChiSquared feature selector. :param data: an `RDD[LabeledPoint]` containing the labeled dataset with categorical features. Real-valued features will be treated as categorical for each distinct value. Apply feature discretizer before using this function.
[ "Returns", "a", "ChiSquared", "feature", "selector", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/feature.py#L383-L394
19,043
apache/spark
python/pyspark/mllib/feature.py
IDF.fit
def fit(self, dataset): """ Computes the inverse document frequency. :param dataset: an RDD of term frequency vectors """ if not isinstance(dataset, RDD): raise TypeError("dataset should be an RDD of term frequency vectors") jmodel = callMLlibFunc("fitIDF", self.minDocFreq, dataset.map(_convert_to_vector)) return IDFModel(jmodel)
python
def fit(self, dataset): """ Computes the inverse document frequency. :param dataset: an RDD of term frequency vectors """ if not isinstance(dataset, RDD): raise TypeError("dataset should be an RDD of term frequency vectors") jmodel = callMLlibFunc("fitIDF", self.minDocFreq, dataset.map(_convert_to_vector)) return IDFModel(jmodel)
[ "def", "fit", "(", "self", ",", "dataset", ")", ":", "if", "not", "isinstance", "(", "dataset", ",", "RDD", ")", ":", "raise", "TypeError", "(", "\"dataset should be an RDD of term frequency vectors\"", ")", "jmodel", "=", "callMLlibFunc", "(", "\"fitIDF\"", ",", "self", ".", "minDocFreq", ",", "dataset", ".", "map", "(", "_convert_to_vector", ")", ")", "return", "IDFModel", "(", "jmodel", ")" ]
Computes the inverse document frequency. :param dataset: an RDD of term frequency vectors
[ "Computes", "the", "inverse", "document", "frequency", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/feature.py#L577-L586
19,044
apache/spark
python/pyspark/mllib/feature.py
Word2VecModel.findSynonyms
def findSynonyms(self, word, num): """ Find synonyms of a word :param word: a word or a vector representation of word :param num: number of synonyms to find :return: array of (word, cosineSimilarity) .. note:: Local use only """ if not isinstance(word, basestring): word = _convert_to_vector(word) words, similarity = self.call("findSynonyms", word, num) return zip(words, similarity)
python
def findSynonyms(self, word, num): """ Find synonyms of a word :param word: a word or a vector representation of word :param num: number of synonyms to find :return: array of (word, cosineSimilarity) .. note:: Local use only """ if not isinstance(word, basestring): word = _convert_to_vector(word) words, similarity = self.call("findSynonyms", word, num) return zip(words, similarity)
[ "def", "findSynonyms", "(", "self", ",", "word", ",", "num", ")", ":", "if", "not", "isinstance", "(", "word", ",", "basestring", ")", ":", "word", "=", "_convert_to_vector", "(", "word", ")", "words", ",", "similarity", "=", "self", ".", "call", "(", "\"findSynonyms\"", ",", "word", ",", "num", ")", "return", "zip", "(", "words", ",", "similarity", ")" ]
Find synonyms of a word :param word: a word or a vector representation of word :param num: number of synonyms to find :return: array of (word, cosineSimilarity) .. note:: Local use only
[ "Find", "synonyms", "of", "a", "word" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/feature.py#L611-L624
19,045
apache/spark
python/pyspark/mllib/feature.py
ElementwiseProduct.transform
def transform(self, vector): """ Computes the Hadamard product of the vector. """ if isinstance(vector, RDD): vector = vector.map(_convert_to_vector) else: vector = _convert_to_vector(vector) return callMLlibFunc("elementwiseProductVector", self.scalingVector, vector)
python
def transform(self, vector): """ Computes the Hadamard product of the vector. """ if isinstance(vector, RDD): vector = vector.map(_convert_to_vector) else: vector = _convert_to_vector(vector) return callMLlibFunc("elementwiseProductVector", self.scalingVector, vector)
[ "def", "transform", "(", "self", ",", "vector", ")", ":", "if", "isinstance", "(", "vector", ",", "RDD", ")", ":", "vector", "=", "vector", ".", "map", "(", "_convert_to_vector", ")", "else", ":", "vector", "=", "_convert_to_vector", "(", "vector", ")", "return", "callMLlibFunc", "(", "\"elementwiseProductVector\"", ",", "self", ".", "scalingVector", ",", "vector", ")" ]
Computes the Hadamard product of the vector.
[ "Computes", "the", "Hadamard", "product", "of", "the", "vector", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/feature.py#L810-L819
19,046
apache/spark
python/pyspark/mllib/tree.py
DecisionTree.trainClassifier
def trainClassifier(cls, data, numClasses, categoricalFeaturesInfo, impurity="gini", maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0): """ Train a decision tree model for classification. :param data: Training data: RDD of LabeledPoint. Labels should take values {0, 1, ..., numClasses-1}. :param numClasses: Number of classes for classification. :param categoricalFeaturesInfo: Map storing arity of categorical features. An entry (n -> k) indicates that feature n is categorical with k categories indexed from 0: {0, 1, ..., k-1}. :param impurity: Criterion used for information gain calculation. Supported values: "gini" or "entropy". (default: "gini") :param maxDepth: Maximum depth of tree (e.g. depth 0 means 1 leaf node, depth 1 means 1 internal node + 2 leaf nodes). (default: 5) :param maxBins: Number of bins used for finding splits at each node. (default: 32) :param minInstancesPerNode: Minimum number of instances required at child nodes to create the parent split. (default: 1) :param minInfoGain: Minimum info gain required to create a split. (default: 0.0) :return: DecisionTreeModel. Example usage: >>> from numpy import array >>> from pyspark.mllib.regression import LabeledPoint >>> from pyspark.mllib.tree import DecisionTree >>> >>> data = [ ... LabeledPoint(0.0, [0.0]), ... LabeledPoint(1.0, [1.0]), ... LabeledPoint(1.0, [2.0]), ... LabeledPoint(1.0, [3.0]) ... ] >>> model = DecisionTree.trainClassifier(sc.parallelize(data), 2, {}) >>> print(model) DecisionTreeModel classifier of depth 1 with 3 nodes >>> print(model.toDebugString()) DecisionTreeModel classifier of depth 1 with 3 nodes If (feature 0 <= 0.5) Predict: 0.0 Else (feature 0 > 0.5) Predict: 1.0 <BLANKLINE> >>> model.predict(array([1.0])) 1.0 >>> model.predict(array([0.0])) 0.0 >>> rdd = sc.parallelize([[1.0], [0.0]]) >>> model.predict(rdd).collect() [1.0, 0.0] """ return cls._train(data, "classification", numClasses, categoricalFeaturesInfo, impurity, maxDepth, maxBins, minInstancesPerNode, minInfoGain)
python
def trainClassifier(cls, data, numClasses, categoricalFeaturesInfo, impurity="gini", maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0): """ Train a decision tree model for classification. :param data: Training data: RDD of LabeledPoint. Labels should take values {0, 1, ..., numClasses-1}. :param numClasses: Number of classes for classification. :param categoricalFeaturesInfo: Map storing arity of categorical features. An entry (n -> k) indicates that feature n is categorical with k categories indexed from 0: {0, 1, ..., k-1}. :param impurity: Criterion used for information gain calculation. Supported values: "gini" or "entropy". (default: "gini") :param maxDepth: Maximum depth of tree (e.g. depth 0 means 1 leaf node, depth 1 means 1 internal node + 2 leaf nodes). (default: 5) :param maxBins: Number of bins used for finding splits at each node. (default: 32) :param minInstancesPerNode: Minimum number of instances required at child nodes to create the parent split. (default: 1) :param minInfoGain: Minimum info gain required to create a split. (default: 0.0) :return: DecisionTreeModel. Example usage: >>> from numpy import array >>> from pyspark.mllib.regression import LabeledPoint >>> from pyspark.mllib.tree import DecisionTree >>> >>> data = [ ... LabeledPoint(0.0, [0.0]), ... LabeledPoint(1.0, [1.0]), ... LabeledPoint(1.0, [2.0]), ... LabeledPoint(1.0, [3.0]) ... ] >>> model = DecisionTree.trainClassifier(sc.parallelize(data), 2, {}) >>> print(model) DecisionTreeModel classifier of depth 1 with 3 nodes >>> print(model.toDebugString()) DecisionTreeModel classifier of depth 1 with 3 nodes If (feature 0 <= 0.5) Predict: 0.0 Else (feature 0 > 0.5) Predict: 1.0 <BLANKLINE> >>> model.predict(array([1.0])) 1.0 >>> model.predict(array([0.0])) 0.0 >>> rdd = sc.parallelize([[1.0], [0.0]]) >>> model.predict(rdd).collect() [1.0, 0.0] """ return cls._train(data, "classification", numClasses, categoricalFeaturesInfo, impurity, maxDepth, maxBins, minInstancesPerNode, minInfoGain)
[ "def", "trainClassifier", "(", "cls", ",", "data", ",", "numClasses", ",", "categoricalFeaturesInfo", ",", "impurity", "=", "\"gini\"", ",", "maxDepth", "=", "5", ",", "maxBins", "=", "32", ",", "minInstancesPerNode", "=", "1", ",", "minInfoGain", "=", "0.0", ")", ":", "return", "cls", ".", "_train", "(", "data", ",", "\"classification\"", ",", "numClasses", ",", "categoricalFeaturesInfo", ",", "impurity", ",", "maxDepth", ",", "maxBins", ",", "minInstancesPerNode", ",", "minInfoGain", ")" ]
Train a decision tree model for classification. :param data: Training data: RDD of LabeledPoint. Labels should take values {0, 1, ..., numClasses-1}. :param numClasses: Number of classes for classification. :param categoricalFeaturesInfo: Map storing arity of categorical features. An entry (n -> k) indicates that feature n is categorical with k categories indexed from 0: {0, 1, ..., k-1}. :param impurity: Criterion used for information gain calculation. Supported values: "gini" or "entropy". (default: "gini") :param maxDepth: Maximum depth of tree (e.g. depth 0 means 1 leaf node, depth 1 means 1 internal node + 2 leaf nodes). (default: 5) :param maxBins: Number of bins used for finding splits at each node. (default: 32) :param minInstancesPerNode: Minimum number of instances required at child nodes to create the parent split. (default: 1) :param minInfoGain: Minimum info gain required to create a split. (default: 0.0) :return: DecisionTreeModel. Example usage: >>> from numpy import array >>> from pyspark.mllib.regression import LabeledPoint >>> from pyspark.mllib.tree import DecisionTree >>> >>> data = [ ... LabeledPoint(0.0, [0.0]), ... LabeledPoint(1.0, [1.0]), ... LabeledPoint(1.0, [2.0]), ... LabeledPoint(1.0, [3.0]) ... ] >>> model = DecisionTree.trainClassifier(sc.parallelize(data), 2, {}) >>> print(model) DecisionTreeModel classifier of depth 1 with 3 nodes >>> print(model.toDebugString()) DecisionTreeModel classifier of depth 1 with 3 nodes If (feature 0 <= 0.5) Predict: 0.0 Else (feature 0 > 0.5) Predict: 1.0 <BLANKLINE> >>> model.predict(array([1.0])) 1.0 >>> model.predict(array([0.0])) 0.0 >>> rdd = sc.parallelize([[1.0], [0.0]]) >>> model.predict(rdd).collect() [1.0, 0.0]
[ "Train", "a", "decision", "tree", "model", "for", "classification", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/tree.py#L149-L217
19,047
apache/spark
python/pyspark/mllib/tree.py
DecisionTree.trainRegressor
def trainRegressor(cls, data, categoricalFeaturesInfo, impurity="variance", maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0): """ Train a decision tree model for regression. :param data: Training data: RDD of LabeledPoint. Labels are real numbers. :param categoricalFeaturesInfo: Map storing arity of categorical features. An entry (n -> k) indicates that feature n is categorical with k categories indexed from 0: {0, 1, ..., k-1}. :param impurity: Criterion used for information gain calculation. The only supported value for regression is "variance". (default: "variance") :param maxDepth: Maximum depth of tree (e.g. depth 0 means 1 leaf node, depth 1 means 1 internal node + 2 leaf nodes). (default: 5) :param maxBins: Number of bins used for finding splits at each node. (default: 32) :param minInstancesPerNode: Minimum number of instances required at child nodes to create the parent split. (default: 1) :param minInfoGain: Minimum info gain required to create a split. (default: 0.0) :return: DecisionTreeModel. Example usage: >>> from pyspark.mllib.regression import LabeledPoint >>> from pyspark.mllib.tree import DecisionTree >>> from pyspark.mllib.linalg import SparseVector >>> >>> sparse_data = [ ... LabeledPoint(0.0, SparseVector(2, {0: 0.0})), ... LabeledPoint(1.0, SparseVector(2, {1: 1.0})), ... LabeledPoint(0.0, SparseVector(2, {0: 0.0})), ... LabeledPoint(1.0, SparseVector(2, {1: 2.0})) ... ] >>> >>> model = DecisionTree.trainRegressor(sc.parallelize(sparse_data), {}) >>> model.predict(SparseVector(2, {1: 1.0})) 1.0 >>> model.predict(SparseVector(2, {1: 0.0})) 0.0 >>> rdd = sc.parallelize([[0.0, 1.0], [0.0, 0.0]]) >>> model.predict(rdd).collect() [1.0, 0.0] """ return cls._train(data, "regression", 0, categoricalFeaturesInfo, impurity, maxDepth, maxBins, minInstancesPerNode, minInfoGain)
python
def trainRegressor(cls, data, categoricalFeaturesInfo, impurity="variance", maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0): """ Train a decision tree model for regression. :param data: Training data: RDD of LabeledPoint. Labels are real numbers. :param categoricalFeaturesInfo: Map storing arity of categorical features. An entry (n -> k) indicates that feature n is categorical with k categories indexed from 0: {0, 1, ..., k-1}. :param impurity: Criterion used for information gain calculation. The only supported value for regression is "variance". (default: "variance") :param maxDepth: Maximum depth of tree (e.g. depth 0 means 1 leaf node, depth 1 means 1 internal node + 2 leaf nodes). (default: 5) :param maxBins: Number of bins used for finding splits at each node. (default: 32) :param minInstancesPerNode: Minimum number of instances required at child nodes to create the parent split. (default: 1) :param minInfoGain: Minimum info gain required to create a split. (default: 0.0) :return: DecisionTreeModel. Example usage: >>> from pyspark.mllib.regression import LabeledPoint >>> from pyspark.mllib.tree import DecisionTree >>> from pyspark.mllib.linalg import SparseVector >>> >>> sparse_data = [ ... LabeledPoint(0.0, SparseVector(2, {0: 0.0})), ... LabeledPoint(1.0, SparseVector(2, {1: 1.0})), ... LabeledPoint(0.0, SparseVector(2, {0: 0.0})), ... LabeledPoint(1.0, SparseVector(2, {1: 2.0})) ... ] >>> >>> model = DecisionTree.trainRegressor(sc.parallelize(sparse_data), {}) >>> model.predict(SparseVector(2, {1: 1.0})) 1.0 >>> model.predict(SparseVector(2, {1: 0.0})) 0.0 >>> rdd = sc.parallelize([[0.0, 1.0], [0.0, 0.0]]) >>> model.predict(rdd).collect() [1.0, 0.0] """ return cls._train(data, "regression", 0, categoricalFeaturesInfo, impurity, maxDepth, maxBins, minInstancesPerNode, minInfoGain)
[ "def", "trainRegressor", "(", "cls", ",", "data", ",", "categoricalFeaturesInfo", ",", "impurity", "=", "\"variance\"", ",", "maxDepth", "=", "5", ",", "maxBins", "=", "32", ",", "minInstancesPerNode", "=", "1", ",", "minInfoGain", "=", "0.0", ")", ":", "return", "cls", ".", "_train", "(", "data", ",", "\"regression\"", ",", "0", ",", "categoricalFeaturesInfo", ",", "impurity", ",", "maxDepth", ",", "maxBins", ",", "minInstancesPerNode", ",", "minInfoGain", ")" ]
Train a decision tree model for regression. :param data: Training data: RDD of LabeledPoint. Labels are real numbers. :param categoricalFeaturesInfo: Map storing arity of categorical features. An entry (n -> k) indicates that feature n is categorical with k categories indexed from 0: {0, 1, ..., k-1}. :param impurity: Criterion used for information gain calculation. The only supported value for regression is "variance". (default: "variance") :param maxDepth: Maximum depth of tree (e.g. depth 0 means 1 leaf node, depth 1 means 1 internal node + 2 leaf nodes). (default: 5) :param maxBins: Number of bins used for finding splits at each node. (default: 32) :param minInstancesPerNode: Minimum number of instances required at child nodes to create the parent split. (default: 1) :param minInfoGain: Minimum info gain required to create a split. (default: 0.0) :return: DecisionTreeModel. Example usage: >>> from pyspark.mllib.regression import LabeledPoint >>> from pyspark.mllib.tree import DecisionTree >>> from pyspark.mllib.linalg import SparseVector >>> >>> sparse_data = [ ... LabeledPoint(0.0, SparseVector(2, {0: 0.0})), ... LabeledPoint(1.0, SparseVector(2, {1: 1.0})), ... LabeledPoint(0.0, SparseVector(2, {0: 0.0})), ... LabeledPoint(1.0, SparseVector(2, {1: 2.0})) ... ] >>> >>> model = DecisionTree.trainRegressor(sc.parallelize(sparse_data), {}) >>> model.predict(SparseVector(2, {1: 1.0})) 1.0 >>> model.predict(SparseVector(2, {1: 0.0})) 0.0 >>> rdd = sc.parallelize([[0.0, 1.0], [0.0, 0.0]]) >>> model.predict(rdd).collect() [1.0, 0.0]
[ "Train", "a", "decision", "tree", "model", "for", "regression", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/tree.py#L221-L277
19,048
apache/spark
python/pyspark/mllib/tree.py
RandomForest.trainClassifier
def trainClassifier(cls, data, numClasses, categoricalFeaturesInfo, numTrees, featureSubsetStrategy="auto", impurity="gini", maxDepth=4, maxBins=32, seed=None): """ Train a random forest model for binary or multiclass classification. :param data: Training dataset: RDD of LabeledPoint. Labels should take values {0, 1, ..., numClasses-1}. :param numClasses: Number of classes for classification. :param categoricalFeaturesInfo: Map storing arity of categorical features. An entry (n -> k) indicates that feature n is categorical with k categories indexed from 0: {0, 1, ..., k-1}. :param numTrees: Number of trees in the random forest. :param featureSubsetStrategy: Number of features to consider for splits at each node. Supported values: "auto", "all", "sqrt", "log2", "onethird". If "auto" is set, this parameter is set based on numTrees: if numTrees == 1, set to "all"; if numTrees > 1 (forest) set to "sqrt". (default: "auto") :param impurity: Criterion used for information gain calculation. Supported values: "gini" or "entropy". (default: "gini") :param maxDepth: Maximum depth of tree (e.g. depth 0 means 1 leaf node, depth 1 means 1 internal node + 2 leaf nodes). (default: 4) :param maxBins: Maximum number of bins used for splitting features. (default: 32) :param seed: Random seed for bootstrapping and choosing feature subsets. Set as None to generate seed based on system time. (default: None) :return: RandomForestModel that can be used for prediction. Example usage: >>> from pyspark.mllib.regression import LabeledPoint >>> from pyspark.mllib.tree import RandomForest >>> >>> data = [ ... LabeledPoint(0.0, [0.0]), ... LabeledPoint(0.0, [1.0]), ... LabeledPoint(1.0, [2.0]), ... LabeledPoint(1.0, [3.0]) ... ] >>> model = RandomForest.trainClassifier(sc.parallelize(data), 2, {}, 3, seed=42) >>> model.numTrees() 3 >>> model.totalNumNodes() 7 >>> print(model) TreeEnsembleModel classifier with 3 trees <BLANKLINE> >>> print(model.toDebugString()) TreeEnsembleModel classifier with 3 trees <BLANKLINE> Tree 0: Predict: 1.0 Tree 1: If (feature 0 <= 1.5) Predict: 0.0 Else (feature 0 > 1.5) Predict: 1.0 Tree 2: If (feature 0 <= 1.5) Predict: 0.0 Else (feature 0 > 1.5) Predict: 1.0 <BLANKLINE> >>> model.predict([2.0]) 1.0 >>> model.predict([0.0]) 0.0 >>> rdd = sc.parallelize([[3.0], [1.0]]) >>> model.predict(rdd).collect() [1.0, 0.0] """ return cls._train(data, "classification", numClasses, categoricalFeaturesInfo, numTrees, featureSubsetStrategy, impurity, maxDepth, maxBins, seed)
python
def trainClassifier(cls, data, numClasses, categoricalFeaturesInfo, numTrees, featureSubsetStrategy="auto", impurity="gini", maxDepth=4, maxBins=32, seed=None): """ Train a random forest model for binary or multiclass classification. :param data: Training dataset: RDD of LabeledPoint. Labels should take values {0, 1, ..., numClasses-1}. :param numClasses: Number of classes for classification. :param categoricalFeaturesInfo: Map storing arity of categorical features. An entry (n -> k) indicates that feature n is categorical with k categories indexed from 0: {0, 1, ..., k-1}. :param numTrees: Number of trees in the random forest. :param featureSubsetStrategy: Number of features to consider for splits at each node. Supported values: "auto", "all", "sqrt", "log2", "onethird". If "auto" is set, this parameter is set based on numTrees: if numTrees == 1, set to "all"; if numTrees > 1 (forest) set to "sqrt". (default: "auto") :param impurity: Criterion used for information gain calculation. Supported values: "gini" or "entropy". (default: "gini") :param maxDepth: Maximum depth of tree (e.g. depth 0 means 1 leaf node, depth 1 means 1 internal node + 2 leaf nodes). (default: 4) :param maxBins: Maximum number of bins used for splitting features. (default: 32) :param seed: Random seed for bootstrapping and choosing feature subsets. Set as None to generate seed based on system time. (default: None) :return: RandomForestModel that can be used for prediction. Example usage: >>> from pyspark.mllib.regression import LabeledPoint >>> from pyspark.mllib.tree import RandomForest >>> >>> data = [ ... LabeledPoint(0.0, [0.0]), ... LabeledPoint(0.0, [1.0]), ... LabeledPoint(1.0, [2.0]), ... LabeledPoint(1.0, [3.0]) ... ] >>> model = RandomForest.trainClassifier(sc.parallelize(data), 2, {}, 3, seed=42) >>> model.numTrees() 3 >>> model.totalNumNodes() 7 >>> print(model) TreeEnsembleModel classifier with 3 trees <BLANKLINE> >>> print(model.toDebugString()) TreeEnsembleModel classifier with 3 trees <BLANKLINE> Tree 0: Predict: 1.0 Tree 1: If (feature 0 <= 1.5) Predict: 0.0 Else (feature 0 > 1.5) Predict: 1.0 Tree 2: If (feature 0 <= 1.5) Predict: 0.0 Else (feature 0 > 1.5) Predict: 1.0 <BLANKLINE> >>> model.predict([2.0]) 1.0 >>> model.predict([0.0]) 0.0 >>> rdd = sc.parallelize([[3.0], [1.0]]) >>> model.predict(rdd).collect() [1.0, 0.0] """ return cls._train(data, "classification", numClasses, categoricalFeaturesInfo, numTrees, featureSubsetStrategy, impurity, maxDepth, maxBins, seed)
[ "def", "trainClassifier", "(", "cls", ",", "data", ",", "numClasses", ",", "categoricalFeaturesInfo", ",", "numTrees", ",", "featureSubsetStrategy", "=", "\"auto\"", ",", "impurity", "=", "\"gini\"", ",", "maxDepth", "=", "4", ",", "maxBins", "=", "32", ",", "seed", "=", "None", ")", ":", "return", "cls", ".", "_train", "(", "data", ",", "\"classification\"", ",", "numClasses", ",", "categoricalFeaturesInfo", ",", "numTrees", ",", "featureSubsetStrategy", ",", "impurity", ",", "maxDepth", ",", "maxBins", ",", "seed", ")" ]
Train a random forest model for binary or multiclass classification. :param data: Training dataset: RDD of LabeledPoint. Labels should take values {0, 1, ..., numClasses-1}. :param numClasses: Number of classes for classification. :param categoricalFeaturesInfo: Map storing arity of categorical features. An entry (n -> k) indicates that feature n is categorical with k categories indexed from 0: {0, 1, ..., k-1}. :param numTrees: Number of trees in the random forest. :param featureSubsetStrategy: Number of features to consider for splits at each node. Supported values: "auto", "all", "sqrt", "log2", "onethird". If "auto" is set, this parameter is set based on numTrees: if numTrees == 1, set to "all"; if numTrees > 1 (forest) set to "sqrt". (default: "auto") :param impurity: Criterion used for information gain calculation. Supported values: "gini" or "entropy". (default: "gini") :param maxDepth: Maximum depth of tree (e.g. depth 0 means 1 leaf node, depth 1 means 1 internal node + 2 leaf nodes). (default: 4) :param maxBins: Maximum number of bins used for splitting features. (default: 32) :param seed: Random seed for bootstrapping and choosing feature subsets. Set as None to generate seed based on system time. (default: None) :return: RandomForestModel that can be used for prediction. Example usage: >>> from pyspark.mllib.regression import LabeledPoint >>> from pyspark.mllib.tree import RandomForest >>> >>> data = [ ... LabeledPoint(0.0, [0.0]), ... LabeledPoint(0.0, [1.0]), ... LabeledPoint(1.0, [2.0]), ... LabeledPoint(1.0, [3.0]) ... ] >>> model = RandomForest.trainClassifier(sc.parallelize(data), 2, {}, 3, seed=42) >>> model.numTrees() 3 >>> model.totalNumNodes() 7 >>> print(model) TreeEnsembleModel classifier with 3 trees <BLANKLINE> >>> print(model.toDebugString()) TreeEnsembleModel classifier with 3 trees <BLANKLINE> Tree 0: Predict: 1.0 Tree 1: If (feature 0 <= 1.5) Predict: 0.0 Else (feature 0 > 1.5) Predict: 1.0 Tree 2: If (feature 0 <= 1.5) Predict: 0.0 Else (feature 0 > 1.5) Predict: 1.0 <BLANKLINE> >>> model.predict([2.0]) 1.0 >>> model.predict([0.0]) 0.0 >>> rdd = sc.parallelize([[3.0], [1.0]]) >>> model.predict(rdd).collect() [1.0, 0.0]
[ "Train", "a", "random", "forest", "model", "for", "binary", "or", "multiclass", "classification", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/tree.py#L319-L407
19,049
apache/spark
python/pyspark/mllib/tree.py
RandomForest.trainRegressor
def trainRegressor(cls, data, categoricalFeaturesInfo, numTrees, featureSubsetStrategy="auto", impurity="variance", maxDepth=4, maxBins=32, seed=None): """ Train a random forest model for regression. :param data: Training dataset: RDD of LabeledPoint. Labels are real numbers. :param categoricalFeaturesInfo: Map storing arity of categorical features. An entry (n -> k) indicates that feature n is categorical with k categories indexed from 0: {0, 1, ..., k-1}. :param numTrees: Number of trees in the random forest. :param featureSubsetStrategy: Number of features to consider for splits at each node. Supported values: "auto", "all", "sqrt", "log2", "onethird". If "auto" is set, this parameter is set based on numTrees: if numTrees == 1, set to "all"; if numTrees > 1 (forest) set to "onethird" for regression. (default: "auto") :param impurity: Criterion used for information gain calculation. The only supported value for regression is "variance". (default: "variance") :param maxDepth: Maximum depth of tree (e.g. depth 0 means 1 leaf node, depth 1 means 1 internal node + 2 leaf nodes). (default: 4) :param maxBins: Maximum number of bins used for splitting features. (default: 32) :param seed: Random seed for bootstrapping and choosing feature subsets. Set as None to generate seed based on system time. (default: None) :return: RandomForestModel that can be used for prediction. Example usage: >>> from pyspark.mllib.regression import LabeledPoint >>> from pyspark.mllib.tree import RandomForest >>> from pyspark.mllib.linalg import SparseVector >>> >>> sparse_data = [ ... LabeledPoint(0.0, SparseVector(2, {0: 1.0})), ... LabeledPoint(1.0, SparseVector(2, {1: 1.0})), ... LabeledPoint(0.0, SparseVector(2, {0: 1.0})), ... LabeledPoint(1.0, SparseVector(2, {1: 2.0})) ... ] >>> >>> model = RandomForest.trainRegressor(sc.parallelize(sparse_data), {}, 2, seed=42) >>> model.numTrees() 2 >>> model.totalNumNodes() 4 >>> model.predict(SparseVector(2, {1: 1.0})) 1.0 >>> model.predict(SparseVector(2, {0: 1.0})) 0.5 >>> rdd = sc.parallelize([[0.0, 1.0], [1.0, 0.0]]) >>> model.predict(rdd).collect() [1.0, 0.5] """ return cls._train(data, "regression", 0, categoricalFeaturesInfo, numTrees, featureSubsetStrategy, impurity, maxDepth, maxBins, seed)
python
def trainRegressor(cls, data, categoricalFeaturesInfo, numTrees, featureSubsetStrategy="auto", impurity="variance", maxDepth=4, maxBins=32, seed=None): """ Train a random forest model for regression. :param data: Training dataset: RDD of LabeledPoint. Labels are real numbers. :param categoricalFeaturesInfo: Map storing arity of categorical features. An entry (n -> k) indicates that feature n is categorical with k categories indexed from 0: {0, 1, ..., k-1}. :param numTrees: Number of trees in the random forest. :param featureSubsetStrategy: Number of features to consider for splits at each node. Supported values: "auto", "all", "sqrt", "log2", "onethird". If "auto" is set, this parameter is set based on numTrees: if numTrees == 1, set to "all"; if numTrees > 1 (forest) set to "onethird" for regression. (default: "auto") :param impurity: Criterion used for information gain calculation. The only supported value for regression is "variance". (default: "variance") :param maxDepth: Maximum depth of tree (e.g. depth 0 means 1 leaf node, depth 1 means 1 internal node + 2 leaf nodes). (default: 4) :param maxBins: Maximum number of bins used for splitting features. (default: 32) :param seed: Random seed for bootstrapping and choosing feature subsets. Set as None to generate seed based on system time. (default: None) :return: RandomForestModel that can be used for prediction. Example usage: >>> from pyspark.mllib.regression import LabeledPoint >>> from pyspark.mllib.tree import RandomForest >>> from pyspark.mllib.linalg import SparseVector >>> >>> sparse_data = [ ... LabeledPoint(0.0, SparseVector(2, {0: 1.0})), ... LabeledPoint(1.0, SparseVector(2, {1: 1.0})), ... LabeledPoint(0.0, SparseVector(2, {0: 1.0})), ... LabeledPoint(1.0, SparseVector(2, {1: 2.0})) ... ] >>> >>> model = RandomForest.trainRegressor(sc.parallelize(sparse_data), {}, 2, seed=42) >>> model.numTrees() 2 >>> model.totalNumNodes() 4 >>> model.predict(SparseVector(2, {1: 1.0})) 1.0 >>> model.predict(SparseVector(2, {0: 1.0})) 0.5 >>> rdd = sc.parallelize([[0.0, 1.0], [1.0, 0.0]]) >>> model.predict(rdd).collect() [1.0, 0.5] """ return cls._train(data, "regression", 0, categoricalFeaturesInfo, numTrees, featureSubsetStrategy, impurity, maxDepth, maxBins, seed)
[ "def", "trainRegressor", "(", "cls", ",", "data", ",", "categoricalFeaturesInfo", ",", "numTrees", ",", "featureSubsetStrategy", "=", "\"auto\"", ",", "impurity", "=", "\"variance\"", ",", "maxDepth", "=", "4", ",", "maxBins", "=", "32", ",", "seed", "=", "None", ")", ":", "return", "cls", ".", "_train", "(", "data", ",", "\"regression\"", ",", "0", ",", "categoricalFeaturesInfo", ",", "numTrees", ",", "featureSubsetStrategy", ",", "impurity", ",", "maxDepth", ",", "maxBins", ",", "seed", ")" ]
Train a random forest model for regression. :param data: Training dataset: RDD of LabeledPoint. Labels are real numbers. :param categoricalFeaturesInfo: Map storing arity of categorical features. An entry (n -> k) indicates that feature n is categorical with k categories indexed from 0: {0, 1, ..., k-1}. :param numTrees: Number of trees in the random forest. :param featureSubsetStrategy: Number of features to consider for splits at each node. Supported values: "auto", "all", "sqrt", "log2", "onethird". If "auto" is set, this parameter is set based on numTrees: if numTrees == 1, set to "all"; if numTrees > 1 (forest) set to "onethird" for regression. (default: "auto") :param impurity: Criterion used for information gain calculation. The only supported value for regression is "variance". (default: "variance") :param maxDepth: Maximum depth of tree (e.g. depth 0 means 1 leaf node, depth 1 means 1 internal node + 2 leaf nodes). (default: 4) :param maxBins: Maximum number of bins used for splitting features. (default: 32) :param seed: Random seed for bootstrapping and choosing feature subsets. Set as None to generate seed based on system time. (default: None) :return: RandomForestModel that can be used for prediction. Example usage: >>> from pyspark.mllib.regression import LabeledPoint >>> from pyspark.mllib.tree import RandomForest >>> from pyspark.mllib.linalg import SparseVector >>> >>> sparse_data = [ ... LabeledPoint(0.0, SparseVector(2, {0: 1.0})), ... LabeledPoint(1.0, SparseVector(2, {1: 1.0})), ... LabeledPoint(0.0, SparseVector(2, {0: 1.0})), ... LabeledPoint(1.0, SparseVector(2, {1: 2.0})) ... ] >>> >>> model = RandomForest.trainRegressor(sc.parallelize(sparse_data), {}, 2, seed=42) >>> model.numTrees() 2 >>> model.totalNumNodes() 4 >>> model.predict(SparseVector(2, {1: 1.0})) 1.0 >>> model.predict(SparseVector(2, {0: 1.0})) 0.5 >>> rdd = sc.parallelize([[0.0, 1.0], [1.0, 0.0]]) >>> model.predict(rdd).collect() [1.0, 0.5]
[ "Train", "a", "random", "forest", "model", "for", "regression", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/tree.py#L411-L476
19,050
apache/spark
python/pyspark/mllib/tree.py
GradientBoostedTrees.trainClassifier
def trainClassifier(cls, data, categoricalFeaturesInfo, loss="logLoss", numIterations=100, learningRate=0.1, maxDepth=3, maxBins=32): """ Train a gradient-boosted trees model for classification. :param data: Training dataset: RDD of LabeledPoint. Labels should take values {0, 1}. :param categoricalFeaturesInfo: Map storing arity of categorical features. An entry (n -> k) indicates that feature n is categorical with k categories indexed from 0: {0, 1, ..., k-1}. :param loss: Loss function used for minimization during gradient boosting. Supported values: "logLoss", "leastSquaresError", "leastAbsoluteError". (default: "logLoss") :param numIterations: Number of iterations of boosting. (default: 100) :param learningRate: Learning rate for shrinking the contribution of each estimator. The learning rate should be between in the interval (0, 1]. (default: 0.1) :param maxDepth: Maximum depth of tree (e.g. depth 0 means 1 leaf node, depth 1 means 1 internal node + 2 leaf nodes). (default: 3) :param maxBins: Maximum number of bins used for splitting features. DecisionTree requires maxBins >= max categories. (default: 32) :return: GradientBoostedTreesModel that can be used for prediction. Example usage: >>> from pyspark.mllib.regression import LabeledPoint >>> from pyspark.mllib.tree import GradientBoostedTrees >>> >>> data = [ ... LabeledPoint(0.0, [0.0]), ... LabeledPoint(0.0, [1.0]), ... LabeledPoint(1.0, [2.0]), ... LabeledPoint(1.0, [3.0]) ... ] >>> >>> model = GradientBoostedTrees.trainClassifier(sc.parallelize(data), {}, numIterations=10) >>> model.numTrees() 10 >>> model.totalNumNodes() 30 >>> print(model) # it already has newline TreeEnsembleModel classifier with 10 trees <BLANKLINE> >>> model.predict([2.0]) 1.0 >>> model.predict([0.0]) 0.0 >>> rdd = sc.parallelize([[2.0], [0.0]]) >>> model.predict(rdd).collect() [1.0, 0.0] """ return cls._train(data, "classification", categoricalFeaturesInfo, loss, numIterations, learningRate, maxDepth, maxBins)
python
def trainClassifier(cls, data, categoricalFeaturesInfo, loss="logLoss", numIterations=100, learningRate=0.1, maxDepth=3, maxBins=32): """ Train a gradient-boosted trees model for classification. :param data: Training dataset: RDD of LabeledPoint. Labels should take values {0, 1}. :param categoricalFeaturesInfo: Map storing arity of categorical features. An entry (n -> k) indicates that feature n is categorical with k categories indexed from 0: {0, 1, ..., k-1}. :param loss: Loss function used for minimization during gradient boosting. Supported values: "logLoss", "leastSquaresError", "leastAbsoluteError". (default: "logLoss") :param numIterations: Number of iterations of boosting. (default: 100) :param learningRate: Learning rate for shrinking the contribution of each estimator. The learning rate should be between in the interval (0, 1]. (default: 0.1) :param maxDepth: Maximum depth of tree (e.g. depth 0 means 1 leaf node, depth 1 means 1 internal node + 2 leaf nodes). (default: 3) :param maxBins: Maximum number of bins used for splitting features. DecisionTree requires maxBins >= max categories. (default: 32) :return: GradientBoostedTreesModel that can be used for prediction. Example usage: >>> from pyspark.mllib.regression import LabeledPoint >>> from pyspark.mllib.tree import GradientBoostedTrees >>> >>> data = [ ... LabeledPoint(0.0, [0.0]), ... LabeledPoint(0.0, [1.0]), ... LabeledPoint(1.0, [2.0]), ... LabeledPoint(1.0, [3.0]) ... ] >>> >>> model = GradientBoostedTrees.trainClassifier(sc.parallelize(data), {}, numIterations=10) >>> model.numTrees() 10 >>> model.totalNumNodes() 30 >>> print(model) # it already has newline TreeEnsembleModel classifier with 10 trees <BLANKLINE> >>> model.predict([2.0]) 1.0 >>> model.predict([0.0]) 0.0 >>> rdd = sc.parallelize([[2.0], [0.0]]) >>> model.predict(rdd).collect() [1.0, 0.0] """ return cls._train(data, "classification", categoricalFeaturesInfo, loss, numIterations, learningRate, maxDepth, maxBins)
[ "def", "trainClassifier", "(", "cls", ",", "data", ",", "categoricalFeaturesInfo", ",", "loss", "=", "\"logLoss\"", ",", "numIterations", "=", "100", ",", "learningRate", "=", "0.1", ",", "maxDepth", "=", "3", ",", "maxBins", "=", "32", ")", ":", "return", "cls", ".", "_train", "(", "data", ",", "\"classification\"", ",", "categoricalFeaturesInfo", ",", "loss", ",", "numIterations", ",", "learningRate", ",", "maxDepth", ",", "maxBins", ")" ]
Train a gradient-boosted trees model for classification. :param data: Training dataset: RDD of LabeledPoint. Labels should take values {0, 1}. :param categoricalFeaturesInfo: Map storing arity of categorical features. An entry (n -> k) indicates that feature n is categorical with k categories indexed from 0: {0, 1, ..., k-1}. :param loss: Loss function used for minimization during gradient boosting. Supported values: "logLoss", "leastSquaresError", "leastAbsoluteError". (default: "logLoss") :param numIterations: Number of iterations of boosting. (default: 100) :param learningRate: Learning rate for shrinking the contribution of each estimator. The learning rate should be between in the interval (0, 1]. (default: 0.1) :param maxDepth: Maximum depth of tree (e.g. depth 0 means 1 leaf node, depth 1 means 1 internal node + 2 leaf nodes). (default: 3) :param maxBins: Maximum number of bins used for splitting features. DecisionTree requires maxBins >= max categories. (default: 32) :return: GradientBoostedTreesModel that can be used for prediction. Example usage: >>> from pyspark.mllib.regression import LabeledPoint >>> from pyspark.mllib.tree import GradientBoostedTrees >>> >>> data = [ ... LabeledPoint(0.0, [0.0]), ... LabeledPoint(0.0, [1.0]), ... LabeledPoint(1.0, [2.0]), ... LabeledPoint(1.0, [3.0]) ... ] >>> >>> model = GradientBoostedTrees.trainClassifier(sc.parallelize(data), {}, numIterations=10) >>> model.numTrees() 10 >>> model.totalNumNodes() 30 >>> print(model) # it already has newline TreeEnsembleModel classifier with 10 trees <BLANKLINE> >>> model.predict([2.0]) 1.0 >>> model.predict([0.0]) 0.0 >>> rdd = sc.parallelize([[2.0], [0.0]]) >>> model.predict(rdd).collect() [1.0, 0.0]
[ "Train", "a", "gradient", "-", "boosted", "trees", "model", "for", "classification", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/tree.py#L511-L576
19,051
apache/spark
python/pyspark/conf.py
SparkConf.set
def set(self, key, value): """Set a configuration property.""" # Try to set self._jconf first if JVM is created, set self._conf if JVM is not created yet. if self._jconf is not None: self._jconf.set(key, unicode(value)) else: self._conf[key] = unicode(value) return self
python
def set(self, key, value): """Set a configuration property.""" # Try to set self._jconf first if JVM is created, set self._conf if JVM is not created yet. if self._jconf is not None: self._jconf.set(key, unicode(value)) else: self._conf[key] = unicode(value) return self
[ "def", "set", "(", "self", ",", "key", ",", "value", ")", ":", "# Try to set self._jconf first if JVM is created, set self._conf if JVM is not created yet.", "if", "self", ".", "_jconf", "is", "not", "None", ":", "self", ".", "_jconf", ".", "set", "(", "key", ",", "unicode", "(", "value", ")", ")", "else", ":", "self", ".", "_conf", "[", "key", "]", "=", "unicode", "(", "value", ")", "return", "self" ]
Set a configuration property.
[ "Set", "a", "configuration", "property", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/conf.py#L123-L130
19,052
apache/spark
python/pyspark/conf.py
SparkConf.setIfMissing
def setIfMissing(self, key, value): """Set a configuration property, if not already set.""" if self.get(key) is None: self.set(key, value) return self
python
def setIfMissing(self, key, value): """Set a configuration property, if not already set.""" if self.get(key) is None: self.set(key, value) return self
[ "def", "setIfMissing", "(", "self", ",", "key", ",", "value", ")", ":", "if", "self", ".", "get", "(", "key", ")", "is", "None", ":", "self", ".", "set", "(", "key", ",", "value", ")", "return", "self" ]
Set a configuration property, if not already set.
[ "Set", "a", "configuration", "property", "if", "not", "already", "set", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/conf.py#L132-L136
19,053
apache/spark
python/pyspark/conf.py
SparkConf.setExecutorEnv
def setExecutorEnv(self, key=None, value=None, pairs=None): """Set an environment variable to be passed to executors.""" if (key is not None and pairs is not None) or (key is None and pairs is None): raise Exception("Either pass one key-value pair or a list of pairs") elif key is not None: self.set("spark.executorEnv." + key, value) elif pairs is not None: for (k, v) in pairs: self.set("spark.executorEnv." + k, v) return self
python
def setExecutorEnv(self, key=None, value=None, pairs=None): """Set an environment variable to be passed to executors.""" if (key is not None and pairs is not None) or (key is None and pairs is None): raise Exception("Either pass one key-value pair or a list of pairs") elif key is not None: self.set("spark.executorEnv." + key, value) elif pairs is not None: for (k, v) in pairs: self.set("spark.executorEnv." + k, v) return self
[ "def", "setExecutorEnv", "(", "self", ",", "key", "=", "None", ",", "value", "=", "None", ",", "pairs", "=", "None", ")", ":", "if", "(", "key", "is", "not", "None", "and", "pairs", "is", "not", "None", ")", "or", "(", "key", "is", "None", "and", "pairs", "is", "None", ")", ":", "raise", "Exception", "(", "\"Either pass one key-value pair or a list of pairs\"", ")", "elif", "key", "is", "not", "None", ":", "self", ".", "set", "(", "\"spark.executorEnv.\"", "+", "key", ",", "value", ")", "elif", "pairs", "is", "not", "None", ":", "for", "(", "k", ",", "v", ")", "in", "pairs", ":", "self", ".", "set", "(", "\"spark.executorEnv.\"", "+", "k", ",", "v", ")", "return", "self" ]
Set an environment variable to be passed to executors.
[ "Set", "an", "environment", "variable", "to", "be", "passed", "to", "executors", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/conf.py#L153-L162
19,054
apache/spark
python/pyspark/conf.py
SparkConf.setAll
def setAll(self, pairs): """ Set multiple parameters, passed as a list of key-value pairs. :param pairs: list of key-value pairs to set """ for (k, v) in pairs: self.set(k, v) return self
python
def setAll(self, pairs): """ Set multiple parameters, passed as a list of key-value pairs. :param pairs: list of key-value pairs to set """ for (k, v) in pairs: self.set(k, v) return self
[ "def", "setAll", "(", "self", ",", "pairs", ")", ":", "for", "(", "k", ",", "v", ")", "in", "pairs", ":", "self", ".", "set", "(", "k", ",", "v", ")", "return", "self" ]
Set multiple parameters, passed as a list of key-value pairs. :param pairs: list of key-value pairs to set
[ "Set", "multiple", "parameters", "passed", "as", "a", "list", "of", "key", "-", "value", "pairs", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/conf.py#L164-L172
19,055
apache/spark
python/pyspark/conf.py
SparkConf.get
def get(self, key, defaultValue=None): """Get the configured value for some key, or return a default otherwise.""" if defaultValue is None: # Py4J doesn't call the right get() if we pass None if self._jconf is not None: if not self._jconf.contains(key): return None return self._jconf.get(key) else: if key not in self._conf: return None return self._conf[key] else: if self._jconf is not None: return self._jconf.get(key, defaultValue) else: return self._conf.get(key, defaultValue)
python
def get(self, key, defaultValue=None): """Get the configured value for some key, or return a default otherwise.""" if defaultValue is None: # Py4J doesn't call the right get() if we pass None if self._jconf is not None: if not self._jconf.contains(key): return None return self._jconf.get(key) else: if key not in self._conf: return None return self._conf[key] else: if self._jconf is not None: return self._jconf.get(key, defaultValue) else: return self._conf.get(key, defaultValue)
[ "def", "get", "(", "self", ",", "key", ",", "defaultValue", "=", "None", ")", ":", "if", "defaultValue", "is", "None", ":", "# Py4J doesn't call the right get() if we pass None", "if", "self", ".", "_jconf", "is", "not", "None", ":", "if", "not", "self", ".", "_jconf", ".", "contains", "(", "key", ")", ":", "return", "None", "return", "self", ".", "_jconf", ".", "get", "(", "key", ")", "else", ":", "if", "key", "not", "in", "self", ".", "_conf", ":", "return", "None", "return", "self", ".", "_conf", "[", "key", "]", "else", ":", "if", "self", ".", "_jconf", "is", "not", "None", ":", "return", "self", ".", "_jconf", ".", "get", "(", "key", ",", "defaultValue", ")", "else", ":", "return", "self", ".", "_conf", ".", "get", "(", "key", ",", "defaultValue", ")" ]
Get the configured value for some key, or return a default otherwise.
[ "Get", "the", "configured", "value", "for", "some", "key", "or", "return", "a", "default", "otherwise", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/conf.py#L174-L189
19,056
apache/spark
python/pyspark/conf.py
SparkConf.getAll
def getAll(self): """Get all values as a list of key-value pairs.""" if self._jconf is not None: return [(elem._1(), elem._2()) for elem in self._jconf.getAll()] else: return self._conf.items()
python
def getAll(self): """Get all values as a list of key-value pairs.""" if self._jconf is not None: return [(elem._1(), elem._2()) for elem in self._jconf.getAll()] else: return self._conf.items()
[ "def", "getAll", "(", "self", ")", ":", "if", "self", ".", "_jconf", "is", "not", "None", ":", "return", "[", "(", "elem", ".", "_1", "(", ")", ",", "elem", ".", "_2", "(", ")", ")", "for", "elem", "in", "self", ".", "_jconf", ".", "getAll", "(", ")", "]", "else", ":", "return", "self", ".", "_conf", ".", "items", "(", ")" ]
Get all values as a list of key-value pairs.
[ "Get", "all", "values", "as", "a", "list", "of", "key", "-", "value", "pairs", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/conf.py#L191-L196
19,057
apache/spark
python/pyspark/conf.py
SparkConf.contains
def contains(self, key): """Does this configuration contain a given key?""" if self._jconf is not None: return self._jconf.contains(key) else: return key in self._conf
python
def contains(self, key): """Does this configuration contain a given key?""" if self._jconf is not None: return self._jconf.contains(key) else: return key in self._conf
[ "def", "contains", "(", "self", ",", "key", ")", ":", "if", "self", ".", "_jconf", "is", "not", "None", ":", "return", "self", ".", "_jconf", ".", "contains", "(", "key", ")", "else", ":", "return", "key", "in", "self", ".", "_conf" ]
Does this configuration contain a given key?
[ "Does", "this", "configuration", "contain", "a", "given", "key?" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/conf.py#L198-L203
19,058
apache/spark
python/pyspark/conf.py
SparkConf.toDebugString
def toDebugString(self): """ Returns a printable version of the configuration, as a list of key=value pairs, one per line. """ if self._jconf is not None: return self._jconf.toDebugString() else: return '\n'.join('%s=%s' % (k, v) for k, v in self._conf.items())
python
def toDebugString(self): """ Returns a printable version of the configuration, as a list of key=value pairs, one per line. """ if self._jconf is not None: return self._jconf.toDebugString() else: return '\n'.join('%s=%s' % (k, v) for k, v in self._conf.items())
[ "def", "toDebugString", "(", "self", ")", ":", "if", "self", ".", "_jconf", "is", "not", "None", ":", "return", "self", ".", "_jconf", ".", "toDebugString", "(", ")", "else", ":", "return", "'\\n'", ".", "join", "(", "'%s=%s'", "%", "(", "k", ",", "v", ")", "for", "k", ",", "v", "in", "self", ".", "_conf", ".", "items", "(", ")", ")" ]
Returns a printable version of the configuration, as a list of key=value pairs, one per line.
[ "Returns", "a", "printable", "version", "of", "the", "configuration", "as", "a", "list", "of", "key", "=", "value", "pairs", "one", "per", "line", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/conf.py#L205-L213
19,059
apache/spark
python/pyspark/sql/catalog.py
Catalog.listDatabases
def listDatabases(self): """Returns a list of databases available across all sessions.""" iter = self._jcatalog.listDatabases().toLocalIterator() databases = [] while iter.hasNext(): jdb = iter.next() databases.append(Database( name=jdb.name(), description=jdb.description(), locationUri=jdb.locationUri())) return databases
python
def listDatabases(self): """Returns a list of databases available across all sessions.""" iter = self._jcatalog.listDatabases().toLocalIterator() databases = [] while iter.hasNext(): jdb = iter.next() databases.append(Database( name=jdb.name(), description=jdb.description(), locationUri=jdb.locationUri())) return databases
[ "def", "listDatabases", "(", "self", ")", ":", "iter", "=", "self", ".", "_jcatalog", ".", "listDatabases", "(", ")", ".", "toLocalIterator", "(", ")", "databases", "=", "[", "]", "while", "iter", ".", "hasNext", "(", ")", ":", "jdb", "=", "iter", ".", "next", "(", ")", "databases", ".", "append", "(", "Database", "(", "name", "=", "jdb", ".", "name", "(", ")", ",", "description", "=", "jdb", ".", "description", "(", ")", ",", "locationUri", "=", "jdb", ".", "locationUri", "(", ")", ")", ")", "return", "databases" ]
Returns a list of databases available across all sessions.
[ "Returns", "a", "list", "of", "databases", "available", "across", "all", "sessions", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/catalog.py#L61-L71
19,060
apache/spark
python/pyspark/sql/catalog.py
Catalog.listFunctions
def listFunctions(self, dbName=None): """Returns a list of functions registered in the specified database. If no database is specified, the current database is used. This includes all temporary functions. """ if dbName is None: dbName = self.currentDatabase() iter = self._jcatalog.listFunctions(dbName).toLocalIterator() functions = [] while iter.hasNext(): jfunction = iter.next() functions.append(Function( name=jfunction.name(), description=jfunction.description(), className=jfunction.className(), isTemporary=jfunction.isTemporary())) return functions
python
def listFunctions(self, dbName=None): """Returns a list of functions registered in the specified database. If no database is specified, the current database is used. This includes all temporary functions. """ if dbName is None: dbName = self.currentDatabase() iter = self._jcatalog.listFunctions(dbName).toLocalIterator() functions = [] while iter.hasNext(): jfunction = iter.next() functions.append(Function( name=jfunction.name(), description=jfunction.description(), className=jfunction.className(), isTemporary=jfunction.isTemporary())) return functions
[ "def", "listFunctions", "(", "self", ",", "dbName", "=", "None", ")", ":", "if", "dbName", "is", "None", ":", "dbName", "=", "self", ".", "currentDatabase", "(", ")", "iter", "=", "self", ".", "_jcatalog", ".", "listFunctions", "(", "dbName", ")", ".", "toLocalIterator", "(", ")", "functions", "=", "[", "]", "while", "iter", ".", "hasNext", "(", ")", ":", "jfunction", "=", "iter", ".", "next", "(", ")", "functions", ".", "append", "(", "Function", "(", "name", "=", "jfunction", ".", "name", "(", ")", ",", "description", "=", "jfunction", ".", "description", "(", ")", ",", "className", "=", "jfunction", ".", "className", "(", ")", ",", "isTemporary", "=", "jfunction", ".", "isTemporary", "(", ")", ")", ")", "return", "functions" ]
Returns a list of functions registered in the specified database. If no database is specified, the current database is used. This includes all temporary functions.
[ "Returns", "a", "list", "of", "functions", "registered", "in", "the", "specified", "database", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/catalog.py#L97-L114
19,061
apache/spark
python/pyspark/taskcontext.py
_load_from_socket
def _load_from_socket(port, auth_secret): """ Load data from a given socket, this is a blocking method thus only return when the socket connection has been closed. """ (sockfile, sock) = local_connect_and_auth(port, auth_secret) # The barrier() call may block forever, so no timeout sock.settimeout(None) # Make a barrier() function call. write_int(BARRIER_FUNCTION, sockfile) sockfile.flush() # Collect result. res = UTF8Deserializer().loads(sockfile) # Release resources. sockfile.close() sock.close() return res
python
def _load_from_socket(port, auth_secret): """ Load data from a given socket, this is a blocking method thus only return when the socket connection has been closed. """ (sockfile, sock) = local_connect_and_auth(port, auth_secret) # The barrier() call may block forever, so no timeout sock.settimeout(None) # Make a barrier() function call. write_int(BARRIER_FUNCTION, sockfile) sockfile.flush() # Collect result. res = UTF8Deserializer().loads(sockfile) # Release resources. sockfile.close() sock.close() return res
[ "def", "_load_from_socket", "(", "port", ",", "auth_secret", ")", ":", "(", "sockfile", ",", "sock", ")", "=", "local_connect_and_auth", "(", "port", ",", "auth_secret", ")", "# The barrier() call may block forever, so no timeout", "sock", ".", "settimeout", "(", "None", ")", "# Make a barrier() function call.", "write_int", "(", "BARRIER_FUNCTION", ",", "sockfile", ")", "sockfile", ".", "flush", "(", ")", "# Collect result.", "res", "=", "UTF8Deserializer", "(", ")", ".", "loads", "(", "sockfile", ")", "# Release resources.", "sockfile", ".", "close", "(", ")", "sock", ".", "close", "(", ")", "return", "res" ]
Load data from a given socket, this is a blocking method thus only return when the socket connection has been closed.
[ "Load", "data", "from", "a", "given", "socket", "this", "is", "a", "blocking", "method", "thus", "only", "return", "when", "the", "socket", "connection", "has", "been", "closed", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/taskcontext.py#L102-L121
19,062
apache/spark
python/pyspark/taskcontext.py
BarrierTaskContext._getOrCreate
def _getOrCreate(cls): """ Internal function to get or create global BarrierTaskContext. We need to make sure BarrierTaskContext is returned from here because it is needed in python worker reuse scenario, see SPARK-25921 for more details. """ if not isinstance(cls._taskContext, BarrierTaskContext): cls._taskContext = object.__new__(cls) return cls._taskContext
python
def _getOrCreate(cls): """ Internal function to get or create global BarrierTaskContext. We need to make sure BarrierTaskContext is returned from here because it is needed in python worker reuse scenario, see SPARK-25921 for more details. """ if not isinstance(cls._taskContext, BarrierTaskContext): cls._taskContext = object.__new__(cls) return cls._taskContext
[ "def", "_getOrCreate", "(", "cls", ")", ":", "if", "not", "isinstance", "(", "cls", ".", "_taskContext", ",", "BarrierTaskContext", ")", ":", "cls", ".", "_taskContext", "=", "object", ".", "__new__", "(", "cls", ")", "return", "cls", ".", "_taskContext" ]
Internal function to get or create global BarrierTaskContext. We need to make sure BarrierTaskContext is returned from here because it is needed in python worker reuse scenario, see SPARK-25921 for more details.
[ "Internal", "function", "to", "get", "or", "create", "global", "BarrierTaskContext", ".", "We", "need", "to", "make", "sure", "BarrierTaskContext", "is", "returned", "from", "here", "because", "it", "is", "needed", "in", "python", "worker", "reuse", "scenario", "see", "SPARK", "-", "25921", "for", "more", "details", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/taskcontext.py#L139-L147
19,063
apache/spark
python/pyspark/taskcontext.py
BarrierTaskContext._initialize
def _initialize(cls, port, secret): """ Initialize BarrierTaskContext, other methods within BarrierTaskContext can only be called after BarrierTaskContext is initialized. """ cls._port = port cls._secret = secret
python
def _initialize(cls, port, secret): """ Initialize BarrierTaskContext, other methods within BarrierTaskContext can only be called after BarrierTaskContext is initialized. """ cls._port = port cls._secret = secret
[ "def", "_initialize", "(", "cls", ",", "port", ",", "secret", ")", ":", "cls", ".", "_port", "=", "port", "cls", ".", "_secret", "=", "secret" ]
Initialize BarrierTaskContext, other methods within BarrierTaskContext can only be called after BarrierTaskContext is initialized.
[ "Initialize", "BarrierTaskContext", "other", "methods", "within", "BarrierTaskContext", "can", "only", "be", "called", "after", "BarrierTaskContext", "is", "initialized", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/taskcontext.py#L163-L169
19,064
apache/spark
python/pyspark/__init__.py
since
def since(version): """ A decorator that annotates a function to append the version of Spark the function was added. """ import re indent_p = re.compile(r'\n( +)') def deco(f): indents = indent_p.findall(f.__doc__) indent = ' ' * (min(len(m) for m in indents) if indents else 0) f.__doc__ = f.__doc__.rstrip() + "\n\n%s.. versionadded:: %s" % (indent, version) return f return deco
python
def since(version): """ A decorator that annotates a function to append the version of Spark the function was added. """ import re indent_p = re.compile(r'\n( +)') def deco(f): indents = indent_p.findall(f.__doc__) indent = ' ' * (min(len(m) for m in indents) if indents else 0) f.__doc__ = f.__doc__.rstrip() + "\n\n%s.. versionadded:: %s" % (indent, version) return f return deco
[ "def", "since", "(", "version", ")", ":", "import", "re", "indent_p", "=", "re", ".", "compile", "(", "r'\\n( +)'", ")", "def", "deco", "(", "f", ")", ":", "indents", "=", "indent_p", ".", "findall", "(", "f", ".", "__doc__", ")", "indent", "=", "' '", "*", "(", "min", "(", "len", "(", "m", ")", "for", "m", "in", "indents", ")", "if", "indents", "else", "0", ")", "f", ".", "__doc__", "=", "f", ".", "__doc__", ".", "rstrip", "(", ")", "+", "\"\\n\\n%s.. versionadded:: %s\"", "%", "(", "indent", ",", "version", ")", "return", "f", "return", "deco" ]
A decorator that annotates a function to append the version of Spark the function was added.
[ "A", "decorator", "that", "annotates", "a", "function", "to", "append", "the", "version", "of", "Spark", "the", "function", "was", "added", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/__init__.py#L65-L77
19,065
apache/spark
python/pyspark/__init__.py
keyword_only
def keyword_only(func): """ A decorator that forces keyword arguments in the wrapped method and saves actual input keyword arguments in `_input_kwargs`. .. note:: Should only be used to wrap a method where first arg is `self` """ @wraps(func) def wrapper(self, *args, **kwargs): if len(args) > 0: raise TypeError("Method %s forces keyword arguments." % func.__name__) self._input_kwargs = kwargs return func(self, **kwargs) return wrapper
python
def keyword_only(func): """ A decorator that forces keyword arguments in the wrapped method and saves actual input keyword arguments in `_input_kwargs`. .. note:: Should only be used to wrap a method where first arg is `self` """ @wraps(func) def wrapper(self, *args, **kwargs): if len(args) > 0: raise TypeError("Method %s forces keyword arguments." % func.__name__) self._input_kwargs = kwargs return func(self, **kwargs) return wrapper
[ "def", "keyword_only", "(", "func", ")", ":", "@", "wraps", "(", "func", ")", "def", "wrapper", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "len", "(", "args", ")", ">", "0", ":", "raise", "TypeError", "(", "\"Method %s forces keyword arguments.\"", "%", "func", ".", "__name__", ")", "self", ".", "_input_kwargs", "=", "kwargs", "return", "func", "(", "self", ",", "*", "*", "kwargs", ")", "return", "wrapper" ]
A decorator that forces keyword arguments in the wrapped method and saves actual input keyword arguments in `_input_kwargs`. .. note:: Should only be used to wrap a method where first arg is `self`
[ "A", "decorator", "that", "forces", "keyword", "arguments", "in", "the", "wrapped", "method", "and", "saves", "actual", "input", "keyword", "arguments", "in", "_input_kwargs", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/__init__.py#L98-L111
19,066
apache/spark
python/pyspark/ml/param/_shared_params_code_gen.py
_gen_param_code
def _gen_param_code(name, doc, defaultValueStr): """ Generates Python code for a shared param class. :param name: param name :param doc: param doc :param defaultValueStr: string representation of the default value :return: code string """ # TODO: How to correctly inherit instance attributes? template = ''' def set$Name(self, value): """ Sets the value of :py:attr:`$name`. """ return self._set($name=value) def get$Name(self): """ Gets the value of $name or its default value. """ return self.getOrDefault(self.$name)''' Name = name[0].upper() + name[1:] return template \ .replace("$name", name) \ .replace("$Name", Name) \ .replace("$doc", doc) \ .replace("$defaultValueStr", str(defaultValueStr))
python
def _gen_param_code(name, doc, defaultValueStr): """ Generates Python code for a shared param class. :param name: param name :param doc: param doc :param defaultValueStr: string representation of the default value :return: code string """ # TODO: How to correctly inherit instance attributes? template = ''' def set$Name(self, value): """ Sets the value of :py:attr:`$name`. """ return self._set($name=value) def get$Name(self): """ Gets the value of $name or its default value. """ return self.getOrDefault(self.$name)''' Name = name[0].upper() + name[1:] return template \ .replace("$name", name) \ .replace("$Name", Name) \ .replace("$doc", doc) \ .replace("$defaultValueStr", str(defaultValueStr))
[ "def", "_gen_param_code", "(", "name", ",", "doc", ",", "defaultValueStr", ")", ":", "# TODO: How to correctly inherit instance attributes?", "template", "=", "'''\n def set$Name(self, value):\n \"\"\"\n Sets the value of :py:attr:`$name`.\n \"\"\"\n return self._set($name=value)\n\n def get$Name(self):\n \"\"\"\n Gets the value of $name or its default value.\n \"\"\"\n return self.getOrDefault(self.$name)'''", "Name", "=", "name", "[", "0", "]", ".", "upper", "(", ")", "+", "name", "[", "1", ":", "]", "return", "template", ".", "replace", "(", "\"$name\"", ",", "name", ")", ".", "replace", "(", "\"$Name\"", ",", "Name", ")", ".", "replace", "(", "\"$doc\"", ",", "doc", ")", ".", "replace", "(", "\"$defaultValueStr\"", ",", "str", "(", "defaultValueStr", ")", ")" ]
Generates Python code for a shared param class. :param name: param name :param doc: param doc :param defaultValueStr: string representation of the default value :return: code string
[ "Generates", "Python", "code", "for", "a", "shared", "param", "class", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/param/_shared_params_code_gen.py#L73-L101
19,067
apache/spark
python/pyspark/mllib/clustering.py
BisectingKMeans.train
def train(self, rdd, k=4, maxIterations=20, minDivisibleClusterSize=1.0, seed=-1888008604): """ Runs the bisecting k-means algorithm return the model. :param rdd: Training points as an `RDD` of `Vector` or convertible sequence types. :param k: The desired number of leaf clusters. The actual number could be smaller if there are no divisible leaf clusters. (default: 4) :param maxIterations: Maximum number of iterations allowed to split clusters. (default: 20) :param minDivisibleClusterSize: Minimum number of points (if >= 1.0) or the minimum proportion of points (if < 1.0) of a divisible cluster. (default: 1) :param seed: Random seed value for cluster initialization. (default: -1888008604 from classOf[BisectingKMeans].getName.##) """ java_model = callMLlibFunc( "trainBisectingKMeans", rdd.map(_convert_to_vector), k, maxIterations, minDivisibleClusterSize, seed) return BisectingKMeansModel(java_model)
python
def train(self, rdd, k=4, maxIterations=20, minDivisibleClusterSize=1.0, seed=-1888008604): """ Runs the bisecting k-means algorithm return the model. :param rdd: Training points as an `RDD` of `Vector` or convertible sequence types. :param k: The desired number of leaf clusters. The actual number could be smaller if there are no divisible leaf clusters. (default: 4) :param maxIterations: Maximum number of iterations allowed to split clusters. (default: 20) :param minDivisibleClusterSize: Minimum number of points (if >= 1.0) or the minimum proportion of points (if < 1.0) of a divisible cluster. (default: 1) :param seed: Random seed value for cluster initialization. (default: -1888008604 from classOf[BisectingKMeans].getName.##) """ java_model = callMLlibFunc( "trainBisectingKMeans", rdd.map(_convert_to_vector), k, maxIterations, minDivisibleClusterSize, seed) return BisectingKMeansModel(java_model)
[ "def", "train", "(", "self", ",", "rdd", ",", "k", "=", "4", ",", "maxIterations", "=", "20", ",", "minDivisibleClusterSize", "=", "1.0", ",", "seed", "=", "-", "1888008604", ")", ":", "java_model", "=", "callMLlibFunc", "(", "\"trainBisectingKMeans\"", ",", "rdd", ".", "map", "(", "_convert_to_vector", ")", ",", "k", ",", "maxIterations", ",", "minDivisibleClusterSize", ",", "seed", ")", "return", "BisectingKMeansModel", "(", "java_model", ")" ]
Runs the bisecting k-means algorithm return the model. :param rdd: Training points as an `RDD` of `Vector` or convertible sequence types. :param k: The desired number of leaf clusters. The actual number could be smaller if there are no divisible leaf clusters. (default: 4) :param maxIterations: Maximum number of iterations allowed to split clusters. (default: 20) :param minDivisibleClusterSize: Minimum number of points (if >= 1.0) or the minimum proportion of points (if < 1.0) of a divisible cluster. (default: 1) :param seed: Random seed value for cluster initialization. (default: -1888008604 from classOf[BisectingKMeans].getName.##)
[ "Runs", "the", "bisecting", "k", "-", "means", "algorithm", "return", "the", "model", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/clustering.py#L142-L167
19,068
apache/spark
python/pyspark/mllib/clustering.py
KMeans.train
def train(cls, rdd, k, maxIterations=100, runs=1, initializationMode="k-means||", seed=None, initializationSteps=2, epsilon=1e-4, initialModel=None): """ Train a k-means clustering model. :param rdd: Training points as an `RDD` of `Vector` or convertible sequence types. :param k: Number of clusters to create. :param maxIterations: Maximum number of iterations allowed. (default: 100) :param runs: This param has no effect since Spark 2.0.0. :param initializationMode: The initialization algorithm. This can be either "random" or "k-means||". (default: "k-means||") :param seed: Random seed value for cluster initialization. Set as None to generate seed based on system time. (default: None) :param initializationSteps: Number of steps for the k-means|| initialization mode. This is an advanced setting -- the default of 2 is almost always enough. (default: 2) :param epsilon: Distance threshold within which a center will be considered to have converged. If all centers move less than this Euclidean distance, iterations are stopped. (default: 1e-4) :param initialModel: Initial cluster centers can be provided as a KMeansModel object rather than using the random or k-means|| initializationModel. (default: None) """ if runs != 1: warnings.warn("The param `runs` has no effect since Spark 2.0.0.") clusterInitialModel = [] if initialModel is not None: if not isinstance(initialModel, KMeansModel): raise Exception("initialModel is of "+str(type(initialModel))+". It needs " "to be of <type 'KMeansModel'>") clusterInitialModel = [_convert_to_vector(c) for c in initialModel.clusterCenters] model = callMLlibFunc("trainKMeansModel", rdd.map(_convert_to_vector), k, maxIterations, runs, initializationMode, seed, initializationSteps, epsilon, clusterInitialModel) centers = callJavaFunc(rdd.context, model.clusterCenters) return KMeansModel([c.toArray() for c in centers])
python
def train(cls, rdd, k, maxIterations=100, runs=1, initializationMode="k-means||", seed=None, initializationSteps=2, epsilon=1e-4, initialModel=None): """ Train a k-means clustering model. :param rdd: Training points as an `RDD` of `Vector` or convertible sequence types. :param k: Number of clusters to create. :param maxIterations: Maximum number of iterations allowed. (default: 100) :param runs: This param has no effect since Spark 2.0.0. :param initializationMode: The initialization algorithm. This can be either "random" or "k-means||". (default: "k-means||") :param seed: Random seed value for cluster initialization. Set as None to generate seed based on system time. (default: None) :param initializationSteps: Number of steps for the k-means|| initialization mode. This is an advanced setting -- the default of 2 is almost always enough. (default: 2) :param epsilon: Distance threshold within which a center will be considered to have converged. If all centers move less than this Euclidean distance, iterations are stopped. (default: 1e-4) :param initialModel: Initial cluster centers can be provided as a KMeansModel object rather than using the random or k-means|| initializationModel. (default: None) """ if runs != 1: warnings.warn("The param `runs` has no effect since Spark 2.0.0.") clusterInitialModel = [] if initialModel is not None: if not isinstance(initialModel, KMeansModel): raise Exception("initialModel is of "+str(type(initialModel))+". It needs " "to be of <type 'KMeansModel'>") clusterInitialModel = [_convert_to_vector(c) for c in initialModel.clusterCenters] model = callMLlibFunc("trainKMeansModel", rdd.map(_convert_to_vector), k, maxIterations, runs, initializationMode, seed, initializationSteps, epsilon, clusterInitialModel) centers = callJavaFunc(rdd.context, model.clusterCenters) return KMeansModel([c.toArray() for c in centers])
[ "def", "train", "(", "cls", ",", "rdd", ",", "k", ",", "maxIterations", "=", "100", ",", "runs", "=", "1", ",", "initializationMode", "=", "\"k-means||\"", ",", "seed", "=", "None", ",", "initializationSteps", "=", "2", ",", "epsilon", "=", "1e-4", ",", "initialModel", "=", "None", ")", ":", "if", "runs", "!=", "1", ":", "warnings", ".", "warn", "(", "\"The param `runs` has no effect since Spark 2.0.0.\"", ")", "clusterInitialModel", "=", "[", "]", "if", "initialModel", "is", "not", "None", ":", "if", "not", "isinstance", "(", "initialModel", ",", "KMeansModel", ")", ":", "raise", "Exception", "(", "\"initialModel is of \"", "+", "str", "(", "type", "(", "initialModel", ")", ")", "+", "\". It needs \"", "\"to be of <type 'KMeansModel'>\"", ")", "clusterInitialModel", "=", "[", "_convert_to_vector", "(", "c", ")", "for", "c", "in", "initialModel", ".", "clusterCenters", "]", "model", "=", "callMLlibFunc", "(", "\"trainKMeansModel\"", ",", "rdd", ".", "map", "(", "_convert_to_vector", ")", ",", "k", ",", "maxIterations", ",", "runs", ",", "initializationMode", ",", "seed", ",", "initializationSteps", ",", "epsilon", ",", "clusterInitialModel", ")", "centers", "=", "callJavaFunc", "(", "rdd", ".", "context", ",", "model", ".", "clusterCenters", ")", "return", "KMeansModel", "(", "[", "c", ".", "toArray", "(", ")", "for", "c", "in", "centers", "]", ")" ]
Train a k-means clustering model. :param rdd: Training points as an `RDD` of `Vector` or convertible sequence types. :param k: Number of clusters to create. :param maxIterations: Maximum number of iterations allowed. (default: 100) :param runs: This param has no effect since Spark 2.0.0. :param initializationMode: The initialization algorithm. This can be either "random" or "k-means||". (default: "k-means||") :param seed: Random seed value for cluster initialization. Set as None to generate seed based on system time. (default: None) :param initializationSteps: Number of steps for the k-means|| initialization mode. This is an advanced setting -- the default of 2 is almost always enough. (default: 2) :param epsilon: Distance threshold within which a center will be considered to have converged. If all centers move less than this Euclidean distance, iterations are stopped. (default: 1e-4) :param initialModel: Initial cluster centers can be provided as a KMeansModel object rather than using the random or k-means|| initializationModel. (default: None)
[ "Train", "a", "k", "-", "means", "clustering", "model", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/clustering.py#L307-L357
19,069
apache/spark
python/pyspark/mllib/clustering.py
GaussianMixture.train
def train(cls, rdd, k, convergenceTol=1e-3, maxIterations=100, seed=None, initialModel=None): """ Train a Gaussian Mixture clustering model. :param rdd: Training points as an `RDD` of `Vector` or convertible sequence types. :param k: Number of independent Gaussians in the mixture model. :param convergenceTol: Maximum change in log-likelihood at which convergence is considered to have occurred. (default: 1e-3) :param maxIterations: Maximum number of iterations allowed. (default: 100) :param seed: Random seed for initial Gaussian distribution. Set as None to generate seed based on system time. (default: None) :param initialModel: Initial GMM starting point, bypassing the random initialization. (default: None) """ initialModelWeights = None initialModelMu = None initialModelSigma = None if initialModel is not None: if initialModel.k != k: raise Exception("Mismatched cluster count, initialModel.k = %s, however k = %s" % (initialModel.k, k)) initialModelWeights = list(initialModel.weights) initialModelMu = [initialModel.gaussians[i].mu for i in range(initialModel.k)] initialModelSigma = [initialModel.gaussians[i].sigma for i in range(initialModel.k)] java_model = callMLlibFunc("trainGaussianMixtureModel", rdd.map(_convert_to_vector), k, convergenceTol, maxIterations, seed, initialModelWeights, initialModelMu, initialModelSigma) return GaussianMixtureModel(java_model)
python
def train(cls, rdd, k, convergenceTol=1e-3, maxIterations=100, seed=None, initialModel=None): """ Train a Gaussian Mixture clustering model. :param rdd: Training points as an `RDD` of `Vector` or convertible sequence types. :param k: Number of independent Gaussians in the mixture model. :param convergenceTol: Maximum change in log-likelihood at which convergence is considered to have occurred. (default: 1e-3) :param maxIterations: Maximum number of iterations allowed. (default: 100) :param seed: Random seed for initial Gaussian distribution. Set as None to generate seed based on system time. (default: None) :param initialModel: Initial GMM starting point, bypassing the random initialization. (default: None) """ initialModelWeights = None initialModelMu = None initialModelSigma = None if initialModel is not None: if initialModel.k != k: raise Exception("Mismatched cluster count, initialModel.k = %s, however k = %s" % (initialModel.k, k)) initialModelWeights = list(initialModel.weights) initialModelMu = [initialModel.gaussians[i].mu for i in range(initialModel.k)] initialModelSigma = [initialModel.gaussians[i].sigma for i in range(initialModel.k)] java_model = callMLlibFunc("trainGaussianMixtureModel", rdd.map(_convert_to_vector), k, convergenceTol, maxIterations, seed, initialModelWeights, initialModelMu, initialModelSigma) return GaussianMixtureModel(java_model)
[ "def", "train", "(", "cls", ",", "rdd", ",", "k", ",", "convergenceTol", "=", "1e-3", ",", "maxIterations", "=", "100", ",", "seed", "=", "None", ",", "initialModel", "=", "None", ")", ":", "initialModelWeights", "=", "None", "initialModelMu", "=", "None", "initialModelSigma", "=", "None", "if", "initialModel", "is", "not", "None", ":", "if", "initialModel", ".", "k", "!=", "k", ":", "raise", "Exception", "(", "\"Mismatched cluster count, initialModel.k = %s, however k = %s\"", "%", "(", "initialModel", ".", "k", ",", "k", ")", ")", "initialModelWeights", "=", "list", "(", "initialModel", ".", "weights", ")", "initialModelMu", "=", "[", "initialModel", ".", "gaussians", "[", "i", "]", ".", "mu", "for", "i", "in", "range", "(", "initialModel", ".", "k", ")", "]", "initialModelSigma", "=", "[", "initialModel", ".", "gaussians", "[", "i", "]", ".", "sigma", "for", "i", "in", "range", "(", "initialModel", ".", "k", ")", "]", "java_model", "=", "callMLlibFunc", "(", "\"trainGaussianMixtureModel\"", ",", "rdd", ".", "map", "(", "_convert_to_vector", ")", ",", "k", ",", "convergenceTol", ",", "maxIterations", ",", "seed", ",", "initialModelWeights", ",", "initialModelMu", ",", "initialModelSigma", ")", "return", "GaussianMixtureModel", "(", "java_model", ")" ]
Train a Gaussian Mixture clustering model. :param rdd: Training points as an `RDD` of `Vector` or convertible sequence types. :param k: Number of independent Gaussians in the mixture model. :param convergenceTol: Maximum change in log-likelihood at which convergence is considered to have occurred. (default: 1e-3) :param maxIterations: Maximum number of iterations allowed. (default: 100) :param seed: Random seed for initial Gaussian distribution. Set as None to generate seed based on system time. (default: None) :param initialModel: Initial GMM starting point, bypassing the random initialization. (default: None)
[ "Train", "a", "Gaussian", "Mixture", "clustering", "model", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/clustering.py#L515-L553
19,070
apache/spark
python/pyspark/mllib/clustering.py
StreamingKMeansModel.update
def update(self, data, decayFactor, timeUnit): """Update the centroids, according to data :param data: RDD with new data for the model update. :param decayFactor: Forgetfulness of the previous centroids. :param timeUnit: Can be "batches" or "points". If points, then the decay factor is raised to the power of number of new points and if batches, then decay factor will be used as is. """ if not isinstance(data, RDD): raise TypeError("Data should be of an RDD, got %s." % type(data)) data = data.map(_convert_to_vector) decayFactor = float(decayFactor) if timeUnit not in ["batches", "points"]: raise ValueError( "timeUnit should be 'batches' or 'points', got %s." % timeUnit) vectorCenters = [_convert_to_vector(center) for center in self.centers] updatedModel = callMLlibFunc( "updateStreamingKMeansModel", vectorCenters, self._clusterWeights, data, decayFactor, timeUnit) self.centers = array(updatedModel[0]) self._clusterWeights = list(updatedModel[1]) return self
python
def update(self, data, decayFactor, timeUnit): """Update the centroids, according to data :param data: RDD with new data for the model update. :param decayFactor: Forgetfulness of the previous centroids. :param timeUnit: Can be "batches" or "points". If points, then the decay factor is raised to the power of number of new points and if batches, then decay factor will be used as is. """ if not isinstance(data, RDD): raise TypeError("Data should be of an RDD, got %s." % type(data)) data = data.map(_convert_to_vector) decayFactor = float(decayFactor) if timeUnit not in ["batches", "points"]: raise ValueError( "timeUnit should be 'batches' or 'points', got %s." % timeUnit) vectorCenters = [_convert_to_vector(center) for center in self.centers] updatedModel = callMLlibFunc( "updateStreamingKMeansModel", vectorCenters, self._clusterWeights, data, decayFactor, timeUnit) self.centers = array(updatedModel[0]) self._clusterWeights = list(updatedModel[1]) return self
[ "def", "update", "(", "self", ",", "data", ",", "decayFactor", ",", "timeUnit", ")", ":", "if", "not", "isinstance", "(", "data", ",", "RDD", ")", ":", "raise", "TypeError", "(", "\"Data should be of an RDD, got %s.\"", "%", "type", "(", "data", ")", ")", "data", "=", "data", ".", "map", "(", "_convert_to_vector", ")", "decayFactor", "=", "float", "(", "decayFactor", ")", "if", "timeUnit", "not", "in", "[", "\"batches\"", ",", "\"points\"", "]", ":", "raise", "ValueError", "(", "\"timeUnit should be 'batches' or 'points', got %s.\"", "%", "timeUnit", ")", "vectorCenters", "=", "[", "_convert_to_vector", "(", "center", ")", "for", "center", "in", "self", ".", "centers", "]", "updatedModel", "=", "callMLlibFunc", "(", "\"updateStreamingKMeansModel\"", ",", "vectorCenters", ",", "self", ".", "_clusterWeights", ",", "data", ",", "decayFactor", ",", "timeUnit", ")", "self", ".", "centers", "=", "array", "(", "updatedModel", "[", "0", "]", ")", "self", ".", "_clusterWeights", "=", "list", "(", "updatedModel", "[", "1", "]", ")", "return", "self" ]
Update the centroids, according to data :param data: RDD with new data for the model update. :param decayFactor: Forgetfulness of the previous centroids. :param timeUnit: Can be "batches" or "points". If points, then the decay factor is raised to the power of number of new points and if batches, then decay factor will be used as is.
[ "Update", "the", "centroids", "according", "to", "data" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/clustering.py#L752-L777
19,071
apache/spark
python/pyspark/mllib/clustering.py
StreamingKMeans.setHalfLife
def setHalfLife(self, halfLife, timeUnit): """ Set number of batches after which the centroids of that particular batch has half the weightage. """ self._timeUnit = timeUnit self._decayFactor = exp(log(0.5) / halfLife) return self
python
def setHalfLife(self, halfLife, timeUnit): """ Set number of batches after which the centroids of that particular batch has half the weightage. """ self._timeUnit = timeUnit self._decayFactor = exp(log(0.5) / halfLife) return self
[ "def", "setHalfLife", "(", "self", ",", "halfLife", ",", "timeUnit", ")", ":", "self", ".", "_timeUnit", "=", "timeUnit", "self", ".", "_decayFactor", "=", "exp", "(", "log", "(", "0.5", ")", "/", "halfLife", ")", "return", "self" ]
Set number of batches after which the centroids of that particular batch has half the weightage.
[ "Set", "number", "of", "batches", "after", "which", "the", "centroids", "of", "that", "particular", "batch", "has", "half", "the", "weightage", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/clustering.py#L838-L845
19,072
apache/spark
python/pyspark/mllib/clustering.py
StreamingKMeans.setInitialCenters
def setInitialCenters(self, centers, weights): """ Set initial centers. Should be set before calling trainOn. """ self._model = StreamingKMeansModel(centers, weights) return self
python
def setInitialCenters(self, centers, weights): """ Set initial centers. Should be set before calling trainOn. """ self._model = StreamingKMeansModel(centers, weights) return self
[ "def", "setInitialCenters", "(", "self", ",", "centers", ",", "weights", ")", ":", "self", ".", "_model", "=", "StreamingKMeansModel", "(", "centers", ",", "weights", ")", "return", "self" ]
Set initial centers. Should be set before calling trainOn.
[ "Set", "initial", "centers", ".", "Should", "be", "set", "before", "calling", "trainOn", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/clustering.py#L848-L853
19,073
apache/spark
python/pyspark/mllib/clustering.py
StreamingKMeans.setRandomCenters
def setRandomCenters(self, dim, weight, seed): """ Set the initial centres to be random samples from a gaussian population with constant weights. """ rng = random.RandomState(seed) clusterCenters = rng.randn(self._k, dim) clusterWeights = tile(weight, self._k) self._model = StreamingKMeansModel(clusterCenters, clusterWeights) return self
python
def setRandomCenters(self, dim, weight, seed): """ Set the initial centres to be random samples from a gaussian population with constant weights. """ rng = random.RandomState(seed) clusterCenters = rng.randn(self._k, dim) clusterWeights = tile(weight, self._k) self._model = StreamingKMeansModel(clusterCenters, clusterWeights) return self
[ "def", "setRandomCenters", "(", "self", ",", "dim", ",", "weight", ",", "seed", ")", ":", "rng", "=", "random", ".", "RandomState", "(", "seed", ")", "clusterCenters", "=", "rng", ".", "randn", "(", "self", ".", "_k", ",", "dim", ")", "clusterWeights", "=", "tile", "(", "weight", ",", "self", ".", "_k", ")", "self", ".", "_model", "=", "StreamingKMeansModel", "(", "clusterCenters", ",", "clusterWeights", ")", "return", "self" ]
Set the initial centres to be random samples from a gaussian population with constant weights.
[ "Set", "the", "initial", "centres", "to", "be", "random", "samples", "from", "a", "gaussian", "population", "with", "constant", "weights", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/clustering.py#L856-L865
19,074
apache/spark
python/pyspark/mllib/clustering.py
StreamingKMeans.trainOn
def trainOn(self, dstream): """Train the model on the incoming dstream.""" self._validate(dstream) def update(rdd): self._model.update(rdd, self._decayFactor, self._timeUnit) dstream.foreachRDD(update)
python
def trainOn(self, dstream): """Train the model on the incoming dstream.""" self._validate(dstream) def update(rdd): self._model.update(rdd, self._decayFactor, self._timeUnit) dstream.foreachRDD(update)
[ "def", "trainOn", "(", "self", ",", "dstream", ")", ":", "self", ".", "_validate", "(", "dstream", ")", "def", "update", "(", "rdd", ")", ":", "self", ".", "_model", ".", "update", "(", "rdd", ",", "self", ".", "_decayFactor", ",", "self", ".", "_timeUnit", ")", "dstream", ".", "foreachRDD", "(", "update", ")" ]
Train the model on the incoming dstream.
[ "Train", "the", "model", "on", "the", "incoming", "dstream", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/clustering.py#L868-L875
19,075
apache/spark
python/pyspark/mllib/clustering.py
StreamingKMeans.predictOn
def predictOn(self, dstream): """ Make predictions on a dstream. Returns a transformed dstream object """ self._validate(dstream) return dstream.map(lambda x: self._model.predict(x))
python
def predictOn(self, dstream): """ Make predictions on a dstream. Returns a transformed dstream object """ self._validate(dstream) return dstream.map(lambda x: self._model.predict(x))
[ "def", "predictOn", "(", "self", ",", "dstream", ")", ":", "self", ".", "_validate", "(", "dstream", ")", "return", "dstream", ".", "map", "(", "lambda", "x", ":", "self", ".", "_model", ".", "predict", "(", "x", ")", ")" ]
Make predictions on a dstream. Returns a transformed dstream object
[ "Make", "predictions", "on", "a", "dstream", ".", "Returns", "a", "transformed", "dstream", "object" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/clustering.py#L878-L884
19,076
apache/spark
python/pyspark/mllib/clustering.py
StreamingKMeans.predictOnValues
def predictOnValues(self, dstream): """ Make predictions on a keyed dstream. Returns a transformed dstream object. """ self._validate(dstream) return dstream.mapValues(lambda x: self._model.predict(x))
python
def predictOnValues(self, dstream): """ Make predictions on a keyed dstream. Returns a transformed dstream object. """ self._validate(dstream) return dstream.mapValues(lambda x: self._model.predict(x))
[ "def", "predictOnValues", "(", "self", ",", "dstream", ")", ":", "self", ".", "_validate", "(", "dstream", ")", "return", "dstream", ".", "mapValues", "(", "lambda", "x", ":", "self", ".", "_model", ".", "predict", "(", "x", ")", ")" ]
Make predictions on a keyed dstream. Returns a transformed dstream object.
[ "Make", "predictions", "on", "a", "keyed", "dstream", ".", "Returns", "a", "transformed", "dstream", "object", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/clustering.py#L887-L893
19,077
apache/spark
python/pyspark/mllib/clustering.py
LDAModel.describeTopics
def describeTopics(self, maxTermsPerTopic=None): """Return the topics described by weighted terms. WARNING: If vocabSize and k are large, this can return a large object! :param maxTermsPerTopic: Maximum number of terms to collect for each topic. (default: vocabulary size) :return: Array over topics. Each topic is represented as a pair of matching arrays: (term indices, term weights in topic). Each topic's terms are sorted in order of decreasing weight. """ if maxTermsPerTopic is None: topics = self.call("describeTopics") else: topics = self.call("describeTopics", maxTermsPerTopic) return topics
python
def describeTopics(self, maxTermsPerTopic=None): """Return the topics described by weighted terms. WARNING: If vocabSize and k are large, this can return a large object! :param maxTermsPerTopic: Maximum number of terms to collect for each topic. (default: vocabulary size) :return: Array over topics. Each topic is represented as a pair of matching arrays: (term indices, term weights in topic). Each topic's terms are sorted in order of decreasing weight. """ if maxTermsPerTopic is None: topics = self.call("describeTopics") else: topics = self.call("describeTopics", maxTermsPerTopic) return topics
[ "def", "describeTopics", "(", "self", ",", "maxTermsPerTopic", "=", "None", ")", ":", "if", "maxTermsPerTopic", "is", "None", ":", "topics", "=", "self", ".", "call", "(", "\"describeTopics\"", ")", "else", ":", "topics", "=", "self", ".", "call", "(", "\"describeTopics\"", ",", "maxTermsPerTopic", ")", "return", "topics" ]
Return the topics described by weighted terms. WARNING: If vocabSize and k are large, this can return a large object! :param maxTermsPerTopic: Maximum number of terms to collect for each topic. (default: vocabulary size) :return: Array over topics. Each topic is represented as a pair of matching arrays: (term indices, term weights in topic). Each topic's terms are sorted in order of decreasing weight.
[ "Return", "the", "topics", "described", "by", "weighted", "terms", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/clustering.py#L955-L972
19,078
apache/spark
python/pyspark/mllib/clustering.py
LDAModel.load
def load(cls, sc, path): """Load the LDAModel from disk. :param sc: SparkContext. :param path: Path to where the model is stored. """ if not isinstance(sc, SparkContext): raise TypeError("sc should be a SparkContext, got type %s" % type(sc)) if not isinstance(path, basestring): raise TypeError("path should be a basestring, got type %s" % type(path)) model = callMLlibFunc("loadLDAModel", sc, path) return LDAModel(model)
python
def load(cls, sc, path): """Load the LDAModel from disk. :param sc: SparkContext. :param path: Path to where the model is stored. """ if not isinstance(sc, SparkContext): raise TypeError("sc should be a SparkContext, got type %s" % type(sc)) if not isinstance(path, basestring): raise TypeError("path should be a basestring, got type %s" % type(path)) model = callMLlibFunc("loadLDAModel", sc, path) return LDAModel(model)
[ "def", "load", "(", "cls", ",", "sc", ",", "path", ")", ":", "if", "not", "isinstance", "(", "sc", ",", "SparkContext", ")", ":", "raise", "TypeError", "(", "\"sc should be a SparkContext, got type %s\"", "%", "type", "(", "sc", ")", ")", "if", "not", "isinstance", "(", "path", ",", "basestring", ")", ":", "raise", "TypeError", "(", "\"path should be a basestring, got type %s\"", "%", "type", "(", "path", ")", ")", "model", "=", "callMLlibFunc", "(", "\"loadLDAModel\"", ",", "sc", ",", "path", ")", "return", "LDAModel", "(", "model", ")" ]
Load the LDAModel from disk. :param sc: SparkContext. :param path: Path to where the model is stored.
[ "Load", "the", "LDAModel", "from", "disk", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/clustering.py#L976-L989
19,079
apache/spark
python/pyspark/mllib/clustering.py
LDA.train
def train(cls, rdd, k=10, maxIterations=20, docConcentration=-1.0, topicConcentration=-1.0, seed=None, checkpointInterval=10, optimizer="em"): """Train a LDA model. :param rdd: RDD of documents, which are tuples of document IDs and term (word) count vectors. The term count vectors are "bags of words" with a fixed-size vocabulary (where the vocabulary size is the length of the vector). Document IDs must be unique and >= 0. :param k: Number of topics to infer, i.e., the number of soft cluster centers. (default: 10) :param maxIterations: Maximum number of iterations allowed. (default: 20) :param docConcentration: Concentration parameter (commonly named "alpha") for the prior placed on documents' distributions over topics ("theta"). (default: -1.0) :param topicConcentration: Concentration parameter (commonly named "beta" or "eta") for the prior placed on topics' distributions over terms. (default: -1.0) :param seed: Random seed for cluster initialization. Set as None to generate seed based on system time. (default: None) :param checkpointInterval: Period (in iterations) between checkpoints. (default: 10) :param optimizer: LDAOptimizer used to perform the actual calculation. Currently "em", "online" are supported. (default: "em") """ model = callMLlibFunc("trainLDAModel", rdd, k, maxIterations, docConcentration, topicConcentration, seed, checkpointInterval, optimizer) return LDAModel(model)
python
def train(cls, rdd, k=10, maxIterations=20, docConcentration=-1.0, topicConcentration=-1.0, seed=None, checkpointInterval=10, optimizer="em"): """Train a LDA model. :param rdd: RDD of documents, which are tuples of document IDs and term (word) count vectors. The term count vectors are "bags of words" with a fixed-size vocabulary (where the vocabulary size is the length of the vector). Document IDs must be unique and >= 0. :param k: Number of topics to infer, i.e., the number of soft cluster centers. (default: 10) :param maxIterations: Maximum number of iterations allowed. (default: 20) :param docConcentration: Concentration parameter (commonly named "alpha") for the prior placed on documents' distributions over topics ("theta"). (default: -1.0) :param topicConcentration: Concentration parameter (commonly named "beta" or "eta") for the prior placed on topics' distributions over terms. (default: -1.0) :param seed: Random seed for cluster initialization. Set as None to generate seed based on system time. (default: None) :param checkpointInterval: Period (in iterations) between checkpoints. (default: 10) :param optimizer: LDAOptimizer used to perform the actual calculation. Currently "em", "online" are supported. (default: "em") """ model = callMLlibFunc("trainLDAModel", rdd, k, maxIterations, docConcentration, topicConcentration, seed, checkpointInterval, optimizer) return LDAModel(model)
[ "def", "train", "(", "cls", ",", "rdd", ",", "k", "=", "10", ",", "maxIterations", "=", "20", ",", "docConcentration", "=", "-", "1.0", ",", "topicConcentration", "=", "-", "1.0", ",", "seed", "=", "None", ",", "checkpointInterval", "=", "10", ",", "optimizer", "=", "\"em\"", ")", ":", "model", "=", "callMLlibFunc", "(", "\"trainLDAModel\"", ",", "rdd", ",", "k", ",", "maxIterations", ",", "docConcentration", ",", "topicConcentration", ",", "seed", ",", "checkpointInterval", ",", "optimizer", ")", "return", "LDAModel", "(", "model", ")" ]
Train a LDA model. :param rdd: RDD of documents, which are tuples of document IDs and term (word) count vectors. The term count vectors are "bags of words" with a fixed-size vocabulary (where the vocabulary size is the length of the vector). Document IDs must be unique and >= 0. :param k: Number of topics to infer, i.e., the number of soft cluster centers. (default: 10) :param maxIterations: Maximum number of iterations allowed. (default: 20) :param docConcentration: Concentration parameter (commonly named "alpha") for the prior placed on documents' distributions over topics ("theta"). (default: -1.0) :param topicConcentration: Concentration parameter (commonly named "beta" or "eta") for the prior placed on topics' distributions over terms. (default: -1.0) :param seed: Random seed for cluster initialization. Set as None to generate seed based on system time. (default: None) :param checkpointInterval: Period (in iterations) between checkpoints. (default: 10) :param optimizer: LDAOptimizer used to perform the actual calculation. Currently "em", "online" are supported. (default: "em")
[ "Train", "a", "LDA", "model", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/clustering.py#L999-L1039
19,080
apache/spark
python/pyspark/mllib/common.py
_py2java
def _py2java(sc, obj): """ Convert Python object into Java """ if isinstance(obj, RDD): obj = _to_java_object_rdd(obj) elif isinstance(obj, DataFrame): obj = obj._jdf elif isinstance(obj, SparkContext): obj = obj._jsc elif isinstance(obj, list): obj = [_py2java(sc, x) for x in obj] elif isinstance(obj, JavaObject): pass elif isinstance(obj, (int, long, float, bool, bytes, unicode)): pass else: data = bytearray(PickleSerializer().dumps(obj)) obj = sc._jvm.org.apache.spark.mllib.api.python.SerDe.loads(data) return obj
python
def _py2java(sc, obj): """ Convert Python object into Java """ if isinstance(obj, RDD): obj = _to_java_object_rdd(obj) elif isinstance(obj, DataFrame): obj = obj._jdf elif isinstance(obj, SparkContext): obj = obj._jsc elif isinstance(obj, list): obj = [_py2java(sc, x) for x in obj] elif isinstance(obj, JavaObject): pass elif isinstance(obj, (int, long, float, bool, bytes, unicode)): pass else: data = bytearray(PickleSerializer().dumps(obj)) obj = sc._jvm.org.apache.spark.mllib.api.python.SerDe.loads(data) return obj
[ "def", "_py2java", "(", "sc", ",", "obj", ")", ":", "if", "isinstance", "(", "obj", ",", "RDD", ")", ":", "obj", "=", "_to_java_object_rdd", "(", "obj", ")", "elif", "isinstance", "(", "obj", ",", "DataFrame", ")", ":", "obj", "=", "obj", ".", "_jdf", "elif", "isinstance", "(", "obj", ",", "SparkContext", ")", ":", "obj", "=", "obj", ".", "_jsc", "elif", "isinstance", "(", "obj", ",", "list", ")", ":", "obj", "=", "[", "_py2java", "(", "sc", ",", "x", ")", "for", "x", "in", "obj", "]", "elif", "isinstance", "(", "obj", ",", "JavaObject", ")", ":", "pass", "elif", "isinstance", "(", "obj", ",", "(", "int", ",", "long", ",", "float", ",", "bool", ",", "bytes", ",", "unicode", ")", ")", ":", "pass", "else", ":", "data", "=", "bytearray", "(", "PickleSerializer", "(", ")", ".", "dumps", "(", "obj", ")", ")", "obj", "=", "sc", ".", "_jvm", ".", "org", ".", "apache", ".", "spark", ".", "mllib", ".", "api", ".", "python", ".", "SerDe", ".", "loads", "(", "data", ")", "return", "obj" ]
Convert Python object into Java
[ "Convert", "Python", "object", "into", "Java" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/common.py#L72-L89
19,081
apache/spark
python/pyspark/mllib/common.py
callJavaFunc
def callJavaFunc(sc, func, *args): """ Call Java Function """ args = [_py2java(sc, a) for a in args] return _java2py(sc, func(*args))
python
def callJavaFunc(sc, func, *args): """ Call Java Function """ args = [_py2java(sc, a) for a in args] return _java2py(sc, func(*args))
[ "def", "callJavaFunc", "(", "sc", ",", "func", ",", "*", "args", ")", ":", "args", "=", "[", "_py2java", "(", "sc", ",", "a", ")", "for", "a", "in", "args", "]", "return", "_java2py", "(", "sc", ",", "func", "(", "*", "args", ")", ")" ]
Call Java Function
[ "Call", "Java", "Function" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/common.py#L120-L123
19,082
apache/spark
python/pyspark/mllib/common.py
callMLlibFunc
def callMLlibFunc(name, *args): """ Call API in PythonMLLibAPI """ sc = SparkContext.getOrCreate() api = getattr(sc._jvm.PythonMLLibAPI(), name) return callJavaFunc(sc, api, *args)
python
def callMLlibFunc(name, *args): """ Call API in PythonMLLibAPI """ sc = SparkContext.getOrCreate() api = getattr(sc._jvm.PythonMLLibAPI(), name) return callJavaFunc(sc, api, *args)
[ "def", "callMLlibFunc", "(", "name", ",", "*", "args", ")", ":", "sc", "=", "SparkContext", ".", "getOrCreate", "(", ")", "api", "=", "getattr", "(", "sc", ".", "_jvm", ".", "PythonMLLibAPI", "(", ")", ",", "name", ")", "return", "callJavaFunc", "(", "sc", ",", "api", ",", "*", "args", ")" ]
Call API in PythonMLLibAPI
[ "Call", "API", "in", "PythonMLLibAPI" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/common.py#L126-L130
19,083
apache/spark
python/pyspark/mllib/common.py
inherit_doc
def inherit_doc(cls): """ A decorator that makes a class inherit documentation from its parents. """ for name, func in vars(cls).items(): # only inherit docstring for public functions if name.startswith("_"): continue if not func.__doc__: for parent in cls.__bases__: parent_func = getattr(parent, name, None) if parent_func and getattr(parent_func, "__doc__", None): func.__doc__ = parent_func.__doc__ break return cls
python
def inherit_doc(cls): """ A decorator that makes a class inherit documentation from its parents. """ for name, func in vars(cls).items(): # only inherit docstring for public functions if name.startswith("_"): continue if not func.__doc__: for parent in cls.__bases__: parent_func = getattr(parent, name, None) if parent_func and getattr(parent_func, "__doc__", None): func.__doc__ = parent_func.__doc__ break return cls
[ "def", "inherit_doc", "(", "cls", ")", ":", "for", "name", ",", "func", "in", "vars", "(", "cls", ")", ".", "items", "(", ")", ":", "# only inherit docstring for public functions", "if", "name", ".", "startswith", "(", "\"_\"", ")", ":", "continue", "if", "not", "func", ".", "__doc__", ":", "for", "parent", "in", "cls", ".", "__bases__", ":", "parent_func", "=", "getattr", "(", "parent", ",", "name", ",", "None", ")", "if", "parent_func", "and", "getattr", "(", "parent_func", ",", "\"__doc__\"", ",", "None", ")", ":", "func", ".", "__doc__", "=", "parent_func", ".", "__doc__", "break", "return", "cls" ]
A decorator that makes a class inherit documentation from its parents.
[ "A", "decorator", "that", "makes", "a", "class", "inherit", "documentation", "from", "its", "parents", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/common.py#L149-L163
19,084
apache/spark
python/pyspark/mllib/common.py
JavaModelWrapper.call
def call(self, name, *a): """Call method of java_model""" return callJavaFunc(self._sc, getattr(self._java_model, name), *a)
python
def call(self, name, *a): """Call method of java_model""" return callJavaFunc(self._sc, getattr(self._java_model, name), *a)
[ "def", "call", "(", "self", ",", "name", ",", "*", "a", ")", ":", "return", "callJavaFunc", "(", "self", ".", "_sc", ",", "getattr", "(", "self", ".", "_java_model", ",", "name", ")", ",", "*", "a", ")" ]
Call method of java_model
[ "Call", "method", "of", "java_model" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/common.py#L144-L146
19,085
apache/spark
python/pyspark/streaming/dstream.py
DStream.count
def count(self): """ Return a new DStream in which each RDD has a single element generated by counting each RDD of this DStream. """ return self.mapPartitions(lambda i: [sum(1 for _ in i)]).reduce(operator.add)
python
def count(self): """ Return a new DStream in which each RDD has a single element generated by counting each RDD of this DStream. """ return self.mapPartitions(lambda i: [sum(1 for _ in i)]).reduce(operator.add)
[ "def", "count", "(", "self", ")", ":", "return", "self", ".", "mapPartitions", "(", "lambda", "i", ":", "[", "sum", "(", "1", "for", "_", "in", "i", ")", "]", ")", ".", "reduce", "(", "operator", ".", "add", ")" ]
Return a new DStream in which each RDD has a single element generated by counting each RDD of this DStream.
[ "Return", "a", "new", "DStream", "in", "which", "each", "RDD", "has", "a", "single", "element", "generated", "by", "counting", "each", "RDD", "of", "this", "DStream", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/streaming/dstream.py#L73-L78
19,086
apache/spark
python/pyspark/streaming/dstream.py
DStream.filter
def filter(self, f): """ Return a new DStream containing only the elements that satisfy predicate. """ def func(iterator): return filter(f, iterator) return self.mapPartitions(func, True)
python
def filter(self, f): """ Return a new DStream containing only the elements that satisfy predicate. """ def func(iterator): return filter(f, iterator) return self.mapPartitions(func, True)
[ "def", "filter", "(", "self", ",", "f", ")", ":", "def", "func", "(", "iterator", ")", ":", "return", "filter", "(", "f", ",", "iterator", ")", "return", "self", ".", "mapPartitions", "(", "func", ",", "True", ")" ]
Return a new DStream containing only the elements that satisfy predicate.
[ "Return", "a", "new", "DStream", "containing", "only", "the", "elements", "that", "satisfy", "predicate", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/streaming/dstream.py#L80-L86
19,087
apache/spark
python/pyspark/streaming/dstream.py
DStream.map
def map(self, f, preservesPartitioning=False): """ Return a new DStream by applying a function to each element of DStream. """ def func(iterator): return map(f, iterator) return self.mapPartitions(func, preservesPartitioning)
python
def map(self, f, preservesPartitioning=False): """ Return a new DStream by applying a function to each element of DStream. """ def func(iterator): return map(f, iterator) return self.mapPartitions(func, preservesPartitioning)
[ "def", "map", "(", "self", ",", "f", ",", "preservesPartitioning", "=", "False", ")", ":", "def", "func", "(", "iterator", ")", ":", "return", "map", "(", "f", ",", "iterator", ")", "return", "self", ".", "mapPartitions", "(", "func", ",", "preservesPartitioning", ")" ]
Return a new DStream by applying a function to each element of DStream.
[ "Return", "a", "new", "DStream", "by", "applying", "a", "function", "to", "each", "element", "of", "DStream", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/streaming/dstream.py#L97-L103
19,088
apache/spark
python/pyspark/streaming/dstream.py
DStream.reduce
def reduce(self, func): """ Return a new DStream in which each RDD has a single element generated by reducing each RDD of this DStream. """ return self.map(lambda x: (None, x)).reduceByKey(func, 1).map(lambda x: x[1])
python
def reduce(self, func): """ Return a new DStream in which each RDD has a single element generated by reducing each RDD of this DStream. """ return self.map(lambda x: (None, x)).reduceByKey(func, 1).map(lambda x: x[1])
[ "def", "reduce", "(", "self", ",", "func", ")", ":", "return", "self", ".", "map", "(", "lambda", "x", ":", "(", "None", ",", "x", ")", ")", ".", "reduceByKey", "(", "func", ",", "1", ")", ".", "map", "(", "lambda", "x", ":", "x", "[", "1", "]", ")" ]
Return a new DStream in which each RDD has a single element generated by reducing each RDD of this DStream.
[ "Return", "a", "new", "DStream", "in", "which", "each", "RDD", "has", "a", "single", "element", "generated", "by", "reducing", "each", "RDD", "of", "this", "DStream", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/streaming/dstream.py#L121-L126
19,089
apache/spark
python/pyspark/streaming/dstream.py
DStream.reduceByKey
def reduceByKey(self, func, numPartitions=None): """ Return a new DStream by applying reduceByKey to each RDD. """ if numPartitions is None: numPartitions = self._sc.defaultParallelism return self.combineByKey(lambda x: x, func, func, numPartitions)
python
def reduceByKey(self, func, numPartitions=None): """ Return a new DStream by applying reduceByKey to each RDD. """ if numPartitions is None: numPartitions = self._sc.defaultParallelism return self.combineByKey(lambda x: x, func, func, numPartitions)
[ "def", "reduceByKey", "(", "self", ",", "func", ",", "numPartitions", "=", "None", ")", ":", "if", "numPartitions", "is", "None", ":", "numPartitions", "=", "self", ".", "_sc", ".", "defaultParallelism", "return", "self", ".", "combineByKey", "(", "lambda", "x", ":", "x", ",", "func", ",", "func", ",", "numPartitions", ")" ]
Return a new DStream by applying reduceByKey to each RDD.
[ "Return", "a", "new", "DStream", "by", "applying", "reduceByKey", "to", "each", "RDD", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/streaming/dstream.py#L128-L134
19,090
apache/spark
python/pyspark/streaming/dstream.py
DStream.combineByKey
def combineByKey(self, createCombiner, mergeValue, mergeCombiners, numPartitions=None): """ Return a new DStream by applying combineByKey to each RDD. """ if numPartitions is None: numPartitions = self._sc.defaultParallelism def func(rdd): return rdd.combineByKey(createCombiner, mergeValue, mergeCombiners, numPartitions) return self.transform(func)
python
def combineByKey(self, createCombiner, mergeValue, mergeCombiners, numPartitions=None): """ Return a new DStream by applying combineByKey to each RDD. """ if numPartitions is None: numPartitions = self._sc.defaultParallelism def func(rdd): return rdd.combineByKey(createCombiner, mergeValue, mergeCombiners, numPartitions) return self.transform(func)
[ "def", "combineByKey", "(", "self", ",", "createCombiner", ",", "mergeValue", ",", "mergeCombiners", ",", "numPartitions", "=", "None", ")", ":", "if", "numPartitions", "is", "None", ":", "numPartitions", "=", "self", ".", "_sc", ".", "defaultParallelism", "def", "func", "(", "rdd", ")", ":", "return", "rdd", ".", "combineByKey", "(", "createCombiner", ",", "mergeValue", ",", "mergeCombiners", ",", "numPartitions", ")", "return", "self", ".", "transform", "(", "func", ")" ]
Return a new DStream by applying combineByKey to each RDD.
[ "Return", "a", "new", "DStream", "by", "applying", "combineByKey", "to", "each", "RDD", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/streaming/dstream.py#L136-L146
19,091
apache/spark
python/pyspark/streaming/dstream.py
DStream.partitionBy
def partitionBy(self, numPartitions, partitionFunc=portable_hash): """ Return a copy of the DStream in which each RDD are partitioned using the specified partitioner. """ return self.transform(lambda rdd: rdd.partitionBy(numPartitions, partitionFunc))
python
def partitionBy(self, numPartitions, partitionFunc=portable_hash): """ Return a copy of the DStream in which each RDD are partitioned using the specified partitioner. """ return self.transform(lambda rdd: rdd.partitionBy(numPartitions, partitionFunc))
[ "def", "partitionBy", "(", "self", ",", "numPartitions", ",", "partitionFunc", "=", "portable_hash", ")", ":", "return", "self", ".", "transform", "(", "lambda", "rdd", ":", "rdd", ".", "partitionBy", "(", "numPartitions", ",", "partitionFunc", ")", ")" ]
Return a copy of the DStream in which each RDD are partitioned using the specified partitioner.
[ "Return", "a", "copy", "of", "the", "DStream", "in", "which", "each", "RDD", "are", "partitioned", "using", "the", "specified", "partitioner", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/streaming/dstream.py#L148-L153
19,092
apache/spark
python/pyspark/streaming/dstream.py
DStream.foreachRDD
def foreachRDD(self, func): """ Apply a function to each RDD in this DStream. """ if func.__code__.co_argcount == 1: old_func = func func = lambda t, rdd: old_func(rdd) jfunc = TransformFunction(self._sc, func, self._jrdd_deserializer) api = self._ssc._jvm.PythonDStream api.callForeachRDD(self._jdstream, jfunc)
python
def foreachRDD(self, func): """ Apply a function to each RDD in this DStream. """ if func.__code__.co_argcount == 1: old_func = func func = lambda t, rdd: old_func(rdd) jfunc = TransformFunction(self._sc, func, self._jrdd_deserializer) api = self._ssc._jvm.PythonDStream api.callForeachRDD(self._jdstream, jfunc)
[ "def", "foreachRDD", "(", "self", ",", "func", ")", ":", "if", "func", ".", "__code__", ".", "co_argcount", "==", "1", ":", "old_func", "=", "func", "func", "=", "lambda", "t", ",", "rdd", ":", "old_func", "(", "rdd", ")", "jfunc", "=", "TransformFunction", "(", "self", ".", "_sc", ",", "func", ",", "self", ".", "_jrdd_deserializer", ")", "api", "=", "self", ".", "_ssc", ".", "_jvm", ".", "PythonDStream", "api", ".", "callForeachRDD", "(", "self", ".", "_jdstream", ",", "jfunc", ")" ]
Apply a function to each RDD in this DStream.
[ "Apply", "a", "function", "to", "each", "RDD", "in", "this", "DStream", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/streaming/dstream.py#L155-L164
19,093
apache/spark
python/pyspark/streaming/dstream.py
DStream.pprint
def pprint(self, num=10): """ Print the first num elements of each RDD generated in this DStream. @param num: the number of elements from the first will be printed. """ def takeAndPrint(time, rdd): taken = rdd.take(num + 1) print("-------------------------------------------") print("Time: %s" % time) print("-------------------------------------------") for record in taken[:num]: print(record) if len(taken) > num: print("...") print("") self.foreachRDD(takeAndPrint)
python
def pprint(self, num=10): """ Print the first num elements of each RDD generated in this DStream. @param num: the number of elements from the first will be printed. """ def takeAndPrint(time, rdd): taken = rdd.take(num + 1) print("-------------------------------------------") print("Time: %s" % time) print("-------------------------------------------") for record in taken[:num]: print(record) if len(taken) > num: print("...") print("") self.foreachRDD(takeAndPrint)
[ "def", "pprint", "(", "self", ",", "num", "=", "10", ")", ":", "def", "takeAndPrint", "(", "time", ",", "rdd", ")", ":", "taken", "=", "rdd", ".", "take", "(", "num", "+", "1", ")", "print", "(", "\"-------------------------------------------\"", ")", "print", "(", "\"Time: %s\"", "%", "time", ")", "print", "(", "\"-------------------------------------------\"", ")", "for", "record", "in", "taken", "[", ":", "num", "]", ":", "print", "(", "record", ")", "if", "len", "(", "taken", ")", ">", "num", ":", "print", "(", "\"...\"", ")", "print", "(", "\"\"", ")", "self", ".", "foreachRDD", "(", "takeAndPrint", ")" ]
Print the first num elements of each RDD generated in this DStream. @param num: the number of elements from the first will be printed.
[ "Print", "the", "first", "num", "elements", "of", "each", "RDD", "generated", "in", "this", "DStream", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/streaming/dstream.py#L166-L183
19,094
apache/spark
python/pyspark/streaming/dstream.py
DStream.persist
def persist(self, storageLevel): """ Persist the RDDs of this DStream with the given storage level """ self.is_cached = True javaStorageLevel = self._sc._getJavaStorageLevel(storageLevel) self._jdstream.persist(javaStorageLevel) return self
python
def persist(self, storageLevel): """ Persist the RDDs of this DStream with the given storage level """ self.is_cached = True javaStorageLevel = self._sc._getJavaStorageLevel(storageLevel) self._jdstream.persist(javaStorageLevel) return self
[ "def", "persist", "(", "self", ",", "storageLevel", ")", ":", "self", ".", "is_cached", "=", "True", "javaStorageLevel", "=", "self", ".", "_sc", ".", "_getJavaStorageLevel", "(", "storageLevel", ")", "self", ".", "_jdstream", ".", "persist", "(", "javaStorageLevel", ")", "return", "self" ]
Persist the RDDs of this DStream with the given storage level
[ "Persist", "the", "RDDs", "of", "this", "DStream", "with", "the", "given", "storage", "level" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/streaming/dstream.py#L219-L226
19,095
apache/spark
python/pyspark/streaming/dstream.py
DStream.checkpoint
def checkpoint(self, interval): """ Enable periodic checkpointing of RDDs of this DStream @param interval: time in seconds, after each period of that, generated RDD will be checkpointed """ self.is_checkpointed = True self._jdstream.checkpoint(self._ssc._jduration(interval)) return self
python
def checkpoint(self, interval): """ Enable periodic checkpointing of RDDs of this DStream @param interval: time in seconds, after each period of that, generated RDD will be checkpointed """ self.is_checkpointed = True self._jdstream.checkpoint(self._ssc._jduration(interval)) return self
[ "def", "checkpoint", "(", "self", ",", "interval", ")", ":", "self", ".", "is_checkpointed", "=", "True", "self", ".", "_jdstream", ".", "checkpoint", "(", "self", ".", "_ssc", ".", "_jduration", "(", "interval", ")", ")", "return", "self" ]
Enable periodic checkpointing of RDDs of this DStream @param interval: time in seconds, after each period of that, generated RDD will be checkpointed
[ "Enable", "periodic", "checkpointing", "of", "RDDs", "of", "this", "DStream" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/streaming/dstream.py#L228-L237
19,096
apache/spark
python/pyspark/streaming/dstream.py
DStream.groupByKey
def groupByKey(self, numPartitions=None): """ Return a new DStream by applying groupByKey on each RDD. """ if numPartitions is None: numPartitions = self._sc.defaultParallelism return self.transform(lambda rdd: rdd.groupByKey(numPartitions))
python
def groupByKey(self, numPartitions=None): """ Return a new DStream by applying groupByKey on each RDD. """ if numPartitions is None: numPartitions = self._sc.defaultParallelism return self.transform(lambda rdd: rdd.groupByKey(numPartitions))
[ "def", "groupByKey", "(", "self", ",", "numPartitions", "=", "None", ")", ":", "if", "numPartitions", "is", "None", ":", "numPartitions", "=", "self", ".", "_sc", ".", "defaultParallelism", "return", "self", ".", "transform", "(", "lambda", "rdd", ":", "rdd", ".", "groupByKey", "(", "numPartitions", ")", ")" ]
Return a new DStream by applying groupByKey on each RDD.
[ "Return", "a", "new", "DStream", "by", "applying", "groupByKey", "on", "each", "RDD", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/streaming/dstream.py#L239-L245
19,097
apache/spark
python/pyspark/streaming/dstream.py
DStream.countByValue
def countByValue(self): """ Return a new DStream in which each RDD contains the counts of each distinct value in each RDD of this DStream. """ return self.map(lambda x: (x, 1)).reduceByKey(lambda x, y: x+y)
python
def countByValue(self): """ Return a new DStream in which each RDD contains the counts of each distinct value in each RDD of this DStream. """ return self.map(lambda x: (x, 1)).reduceByKey(lambda x, y: x+y)
[ "def", "countByValue", "(", "self", ")", ":", "return", "self", ".", "map", "(", "lambda", "x", ":", "(", "x", ",", "1", ")", ")", ".", "reduceByKey", "(", "lambda", "x", ",", "y", ":", "x", "+", "y", ")" ]
Return a new DStream in which each RDD contains the counts of each distinct value in each RDD of this DStream.
[ "Return", "a", "new", "DStream", "in", "which", "each", "RDD", "contains", "the", "counts", "of", "each", "distinct", "value", "in", "each", "RDD", "of", "this", "DStream", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/streaming/dstream.py#L247-L252
19,098
apache/spark
python/pyspark/streaming/dstream.py
DStream.saveAsTextFiles
def saveAsTextFiles(self, prefix, suffix=None): """ Save each RDD in this DStream as at text file, using string representation of elements. """ def saveAsTextFile(t, rdd): path = rddToFileName(prefix, suffix, t) try: rdd.saveAsTextFile(path) except Py4JJavaError as e: # after recovered from checkpointing, the foreachRDD may # be called twice if 'FileAlreadyExistsException' not in str(e): raise return self.foreachRDD(saveAsTextFile)
python
def saveAsTextFiles(self, prefix, suffix=None): """ Save each RDD in this DStream as at text file, using string representation of elements. """ def saveAsTextFile(t, rdd): path = rddToFileName(prefix, suffix, t) try: rdd.saveAsTextFile(path) except Py4JJavaError as e: # after recovered from checkpointing, the foreachRDD may # be called twice if 'FileAlreadyExistsException' not in str(e): raise return self.foreachRDD(saveAsTextFile)
[ "def", "saveAsTextFiles", "(", "self", ",", "prefix", ",", "suffix", "=", "None", ")", ":", "def", "saveAsTextFile", "(", "t", ",", "rdd", ")", ":", "path", "=", "rddToFileName", "(", "prefix", ",", "suffix", ",", "t", ")", "try", ":", "rdd", ".", "saveAsTextFile", "(", "path", ")", "except", "Py4JJavaError", "as", "e", ":", "# after recovered from checkpointing, the foreachRDD may", "# be called twice", "if", "'FileAlreadyExistsException'", "not", "in", "str", "(", "e", ")", ":", "raise", "return", "self", ".", "foreachRDD", "(", "saveAsTextFile", ")" ]
Save each RDD in this DStream as at text file, using string representation of elements.
[ "Save", "each", "RDD", "in", "this", "DStream", "as", "at", "text", "file", "using", "string", "representation", "of", "elements", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/streaming/dstream.py#L254-L268
19,099
apache/spark
python/pyspark/streaming/dstream.py
DStream.transform
def transform(self, func): """ Return a new DStream in which each RDD is generated by applying a function on each RDD of this DStream. `func` can have one argument of `rdd`, or have two arguments of (`time`, `rdd`) """ if func.__code__.co_argcount == 1: oldfunc = func func = lambda t, rdd: oldfunc(rdd) assert func.__code__.co_argcount == 2, "func should take one or two arguments" return TransformedDStream(self, func)
python
def transform(self, func): """ Return a new DStream in which each RDD is generated by applying a function on each RDD of this DStream. `func` can have one argument of `rdd`, or have two arguments of (`time`, `rdd`) """ if func.__code__.co_argcount == 1: oldfunc = func func = lambda t, rdd: oldfunc(rdd) assert func.__code__.co_argcount == 2, "func should take one or two arguments" return TransformedDStream(self, func)
[ "def", "transform", "(", "self", ",", "func", ")", ":", "if", "func", ".", "__code__", ".", "co_argcount", "==", "1", ":", "oldfunc", "=", "func", "func", "=", "lambda", "t", ",", "rdd", ":", "oldfunc", "(", "rdd", ")", "assert", "func", ".", "__code__", ".", "co_argcount", "==", "2", ",", "\"func should take one or two arguments\"", "return", "TransformedDStream", "(", "self", ",", "func", ")" ]
Return a new DStream in which each RDD is generated by applying a function on each RDD of this DStream. `func` can have one argument of `rdd`, or have two arguments of (`time`, `rdd`)
[ "Return", "a", "new", "DStream", "in", "which", "each", "RDD", "is", "generated", "by", "applying", "a", "function", "on", "each", "RDD", "of", "this", "DStream", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/streaming/dstream.py#L287-L299