INSTRUCTION
stringlengths
1
8.43k
RESPONSE
stringlengths
75
104k
Parse a line of text into an MLlib LabeledPoint object.
def parsePoint(line): """ Parse a line of text into an MLlib LabeledPoint object. """ values = [float(s) for s in line.split(' ')] if values[0] == -1: # Convert -1 labels to 0 for MLlib values[0] = 0 return LabeledPoint(values[0], values[1:])
Returns f - measure.
def fMeasure(self, label, beta=None): """ Returns f-measure. """ if beta is None: return self.call("fMeasure", label) else: return self.call("fMeasure", label, beta)
Returns precision or precision for a given label ( category ) if specified.
def precision(self, label=None): """ Returns precision or precision for a given label (category) if specified. """ if label is None: return self.call("precision") else: return self.call("precision", float(label))
Returns recall or recall for a given label ( category ) if specified.
def recall(self, label=None): """ Returns recall or recall for a given label (category) if specified. """ if label is None: return self.call("recall") else: return self.call("recall", float(label))
Returns f1Measure or f1Measure for a given label ( category ) if specified.
def f1Measure(self, label=None): """ Returns f1Measure or f1Measure for a given label (category) if specified. """ if label is None: return self.call("f1Measure") else: return self.call("f1Measure", float(label))
When converting Spark SQL records to Pandas DataFrame the inferred data type may be wrong. This method gets the corrected data type for Pandas if that type may be inferred uncorrectly.
def _to_corrected_pandas_type(dt): """ When converting Spark SQL records to Pandas DataFrame, the inferred data type may be wrong. This method gets the corrected data type for Pandas if that type may be inferred uncorrectly. """ import numpy as np if type(dt) == ByteType: return np.int8 elif type(dt) == ShortType: return np.int16 elif type(dt) == IntegerType: return np.int32 elif type(dt) == FloatType: return np.float32 else: return None
Returns the content as an: class: pyspark. RDD of: class: Row.
def rdd(self): """Returns the content as an :class:`pyspark.RDD` of :class:`Row`. """ if self._lazy_rdd is None: jrdd = self._jdf.javaToPython() self._lazy_rdd = RDD(jrdd, self.sql_ctx._sc, BatchedSerializer(PickleSerializer())) return self._lazy_rdd
Converts a: class: DataFrame into a: class: RDD of string.
def toJSON(self, use_unicode=True): """Converts a :class:`DataFrame` into a :class:`RDD` of string. Each row is turned into a JSON document as one element in the returned RDD. >>> df.toJSON().first() u'{"age":2,"name":"Alice"}' """ rdd = self._jdf.toJSON() return RDD(rdd.toJavaRDD(), self._sc, UTF8Deserializer(use_unicode))
Returns the schema of this: class: DataFrame as a: class: pyspark. sql. types. StructType.
def schema(self): """Returns the schema of this :class:`DataFrame` as a :class:`pyspark.sql.types.StructType`. >>> df.schema StructType(List(StructField(age,IntegerType,true),StructField(name,StringType,true))) """ if self._schema is None: try: self._schema = _parse_datatype_json_string(self._jdf.schema().json()) except AttributeError as e: raise Exception( "Unable to parse datatype from schema. %s" % e) return self._schema
Prints the ( logical and physical ) plans to the console for debugging purpose.
def explain(self, extended=False): """Prints the (logical and physical) plans to the console for debugging purpose. :param extended: boolean, default ``False``. If ``False``, prints only the physical plan. >>> df.explain() == Physical Plan == *(1) Scan ExistingRDD[age#0,name#1] >>> df.explain(True) == Parsed Logical Plan == ... == Analyzed Logical Plan == ... == Optimized Logical Plan == ... == Physical Plan == ... """ if extended: print(self._jdf.queryExecution().toString()) else: print(self._jdf.queryExecution().simpleString())
Return a new: class: DataFrame containing rows in this: class: DataFrame but not in another: class: DataFrame while preserving duplicates.
def exceptAll(self, other): """Return a new :class:`DataFrame` containing rows in this :class:`DataFrame` but not in another :class:`DataFrame` while preserving duplicates. This is equivalent to `EXCEPT ALL` in SQL. >>> df1 = spark.createDataFrame( ... [("a", 1), ("a", 1), ("a", 1), ("a", 2), ("b", 3), ("c", 4)], ["C1", "C2"]) >>> df2 = spark.createDataFrame([("a", 1), ("b", 3)], ["C1", "C2"]) >>> df1.exceptAll(df2).show() +---+---+ | C1| C2| +---+---+ | a| 1| | a| 1| | a| 2| | c| 4| +---+---+ Also as standard in SQL, this function resolves columns by position (not by name). """ return DataFrame(self._jdf.exceptAll(other._jdf), self.sql_ctx)
Prints the first n rows to the console.
def show(self, n=20, truncate=True, vertical=False): """Prints the first ``n`` rows to the console. :param n: Number of rows to show. :param truncate: If set to True, truncate strings longer than 20 chars by default. If set to a number greater than one, truncates long strings to length ``truncate`` and align cells right. :param vertical: If set to True, print output rows vertically (one line per column value). >>> df DataFrame[age: int, name: string] >>> df.show() +---+-----+ |age| name| +---+-----+ | 2|Alice| | 5| Bob| +---+-----+ >>> df.show(truncate=3) +---+----+ |age|name| +---+----+ | 2| Ali| | 5| Bob| +---+----+ >>> df.show(vertical=True) -RECORD 0----- age | 2 name | Alice -RECORD 1----- age | 5 name | Bob """ if isinstance(truncate, bool) and truncate: print(self._jdf.showString(n, 20, vertical)) else: print(self._jdf.showString(n, int(truncate), vertical))
Returns a dataframe with html code when you enabled eager evaluation by spark. sql. repl. eagerEval. enabled this only called by REPL you are using support eager evaluation with HTML.
def _repr_html_(self): """Returns a dataframe with html code when you enabled eager evaluation by 'spark.sql.repl.eagerEval.enabled', this only called by REPL you are using support eager evaluation with HTML. """ import cgi if not self._support_repr_html: self._support_repr_html = True if self.sql_ctx._conf.isReplEagerEvalEnabled(): max_num_rows = max(self.sql_ctx._conf.replEagerEvalMaxNumRows(), 0) sock_info = self._jdf.getRowsToPython( max_num_rows, self.sql_ctx._conf.replEagerEvalTruncate()) rows = list(_load_from_socket(sock_info, BatchedSerializer(PickleSerializer()))) head = rows[0] row_data = rows[1:] has_more_data = len(row_data) > max_num_rows row_data = row_data[:max_num_rows] html = "<table border='1'>\n" # generate table head html += "<tr><th>%s</th></tr>\n" % "</th><th>".join(map(lambda x: cgi.escape(x), head)) # generate table rows for row in row_data: html += "<tr><td>%s</td></tr>\n" % "</td><td>".join( map(lambda x: cgi.escape(x), row)) html += "</table>\n" if has_more_data: html += "only showing top %d %s\n" % ( max_num_rows, "row" if max_num_rows == 1 else "rows") return html else: return None
Returns a checkpointed version of this Dataset. Checkpointing can be used to truncate the logical plan of this DataFrame which is especially useful in iterative algorithms where the plan may grow exponentially. It will be saved to files inside the checkpoint directory set with L { SparkContext. setCheckpointDir () }.
def checkpoint(self, eager=True): """Returns a checkpointed version of this Dataset. Checkpointing can be used to truncate the logical plan of this DataFrame, which is especially useful in iterative algorithms where the plan may grow exponentially. It will be saved to files inside the checkpoint directory set with L{SparkContext.setCheckpointDir()}. :param eager: Whether to checkpoint this DataFrame immediately .. note:: Experimental """ jdf = self._jdf.checkpoint(eager) return DataFrame(jdf, self.sql_ctx)
Returns a locally checkpointed version of this Dataset. Checkpointing can be used to truncate the logical plan of this DataFrame which is especially useful in iterative algorithms where the plan may grow exponentially. Local checkpoints are stored in the executors using the caching subsystem and therefore they are not reliable.
def localCheckpoint(self, eager=True): """Returns a locally checkpointed version of this Dataset. Checkpointing can be used to truncate the logical plan of this DataFrame, which is especially useful in iterative algorithms where the plan may grow exponentially. Local checkpoints are stored in the executors using the caching subsystem and therefore they are not reliable. :param eager: Whether to checkpoint this DataFrame immediately .. note:: Experimental """ jdf = self._jdf.localCheckpoint(eager) return DataFrame(jdf, self.sql_ctx)
Defines an event time watermark for this: class: DataFrame. A watermark tracks a point in time before which we assume no more late data is going to arrive.
def withWatermark(self, eventTime, delayThreshold): """Defines an event time watermark for this :class:`DataFrame`. A watermark tracks a point in time before which we assume no more late data is going to arrive. Spark will use this watermark for several purposes: - To know when a given time window aggregation can be finalized and thus can be emitted when using output modes that do not allow updates. - To minimize the amount of state that we need to keep for on-going aggregations. The current watermark is computed by looking at the `MAX(eventTime)` seen across all of the partitions in the query minus a user specified `delayThreshold`. Due to the cost of coordinating this value across partitions, the actual watermark used is only guaranteed to be at least `delayThreshold` behind the actual event time. In some cases we may still process records that arrive more than `delayThreshold` late. :param eventTime: the name of the column that contains the event time of the row. :param delayThreshold: the minimum delay to wait to data to arrive late, relative to the latest record that has been processed in the form of an interval (e.g. "1 minute" or "5 hours"). .. note:: Evolving >>> sdf.select('name', sdf.time.cast('timestamp')).withWatermark('time', '10 minutes') DataFrame[name: string, time: timestamp] """ if not eventTime or type(eventTime) is not str: raise TypeError("eventTime should be provided as a string") if not delayThreshold or type(delayThreshold) is not str: raise TypeError("delayThreshold should be provided as a string interval") jdf = self._jdf.withWatermark(eventTime, delayThreshold) return DataFrame(jdf, self.sql_ctx)
Specifies some hint on the current DataFrame.
def hint(self, name, *parameters): """Specifies some hint on the current DataFrame. :param name: A name of the hint. :param parameters: Optional parameters. :return: :class:`DataFrame` >>> df.join(df2.hint("broadcast"), "name").show() +----+---+------+ |name|age|height| +----+---+------+ | Bob| 5| 85| +----+---+------+ """ if len(parameters) == 1 and isinstance(parameters[0], list): parameters = parameters[0] if not isinstance(name, str): raise TypeError("name should be provided as str, got {0}".format(type(name))) allowed_types = (basestring, list, float, int) for p in parameters: if not isinstance(p, allowed_types): raise TypeError( "all parameters should be in {0}, got {1} of type {2}".format( allowed_types, p, type(p))) jdf = self._jdf.hint(name, self._jseq(parameters)) return DataFrame(jdf, self.sql_ctx)
Returns all the records as a list of: class: Row.
def collect(self): """Returns all the records as a list of :class:`Row`. >>> df.collect() [Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')] """ with SCCallSiteSync(self._sc) as css: sock_info = self._jdf.collectToPython() return list(_load_from_socket(sock_info, BatchedSerializer(PickleSerializer())))
Returns an iterator that contains all of the rows in this: class: DataFrame. The iterator will consume as much memory as the largest partition in this DataFrame.
def toLocalIterator(self): """ Returns an iterator that contains all of the rows in this :class:`DataFrame`. The iterator will consume as much memory as the largest partition in this DataFrame. >>> list(df.toLocalIterator()) [Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')] """ with SCCallSiteSync(self._sc) as css: sock_info = self._jdf.toPythonIterator() return _load_from_socket(sock_info, BatchedSerializer(PickleSerializer()))
Limits the result count to the number specified.
def limit(self, num): """Limits the result count to the number specified. >>> df.limit(1).collect() [Row(age=2, name=u'Alice')] >>> df.limit(0).collect() [] """ jdf = self._jdf.limit(num) return DataFrame(jdf, self.sql_ctx)
Sets the storage level to persist the contents of the: class: DataFrame across operations after the first time it is computed. This can only be used to assign a new storage level if the: class: DataFrame does not have a storage level set yet. If no storage level is specified defaults to ( C { MEMORY_AND_DISK } ).
def persist(self, storageLevel=StorageLevel.MEMORY_AND_DISK): """Sets the storage level to persist the contents of the :class:`DataFrame` across operations after the first time it is computed. This can only be used to assign a new storage level if the :class:`DataFrame` does not have a storage level set yet. If no storage level is specified defaults to (C{MEMORY_AND_DISK}). .. note:: The default storage level has changed to C{MEMORY_AND_DISK} to match Scala in 2.0. """ self.is_cached = True javaStorageLevel = self._sc._getJavaStorageLevel(storageLevel) self._jdf.persist(javaStorageLevel) return self
Get the: class: DataFrame s current storage level.
def storageLevel(self): """Get the :class:`DataFrame`'s current storage level. >>> df.storageLevel StorageLevel(False, False, False, False, 1) >>> df.cache().storageLevel StorageLevel(True, True, False, True, 1) >>> df2.persist(StorageLevel.DISK_ONLY_2).storageLevel StorageLevel(True, False, False, False, 2) """ java_storage_level = self._jdf.storageLevel() storage_level = StorageLevel(java_storage_level.useDisk(), java_storage_level.useMemory(), java_storage_level.useOffHeap(), java_storage_level.deserialized(), java_storage_level.replication()) return storage_level
Marks the: class: DataFrame as non - persistent and remove all blocks for it from memory and disk.
def unpersist(self, blocking=False): """Marks the :class:`DataFrame` as non-persistent, and remove all blocks for it from memory and disk. .. note:: `blocking` default has changed to False to match Scala in 2.0. """ self.is_cached = False self._jdf.unpersist(blocking) return self
Returns a new: class: DataFrame that has exactly numPartitions partitions.
def coalesce(self, numPartitions): """ Returns a new :class:`DataFrame` that has exactly `numPartitions` partitions. :param numPartitions: int, to specify the target number of partitions Similar to coalesce defined on an :class:`RDD`, this operation results in a narrow dependency, e.g. if you go from 1000 partitions to 100 partitions, there will not be a shuffle, instead each of the 100 new partitions will claim 10 of the current partitions. If a larger number of partitions is requested, it will stay at the current number of partitions. However, if you're doing a drastic coalesce, e.g. to numPartitions = 1, this may result in your computation taking place on fewer nodes than you like (e.g. one node in the case of numPartitions = 1). To avoid this, you can call repartition(). This will add a shuffle step, but means the current upstream partitions will be executed in parallel (per whatever the current partitioning is). >>> df.coalesce(1).rdd.getNumPartitions() 1 """ return DataFrame(self._jdf.coalesce(numPartitions), self.sql_ctx)
Returns a new: class: DataFrame partitioned by the given partitioning expressions. The resulting DataFrame is hash partitioned.
def repartition(self, numPartitions, *cols): """ Returns a new :class:`DataFrame` partitioned by the given partitioning expressions. The resulting DataFrame is hash partitioned. :param numPartitions: can be an int to specify the target number of partitions or a Column. If it is a Column, it will be used as the first partitioning column. If not specified, the default number of partitions is used. .. versionchanged:: 1.6 Added optional arguments to specify the partitioning columns. Also made numPartitions optional if partitioning columns are specified. >>> df.repartition(10).rdd.getNumPartitions() 10 >>> data = df.union(df).repartition("age") >>> data.show() +---+-----+ |age| name| +---+-----+ | 5| Bob| | 5| Bob| | 2|Alice| | 2|Alice| +---+-----+ >>> data = data.repartition(7, "age") >>> data.show() +---+-----+ |age| name| +---+-----+ | 2|Alice| | 5| Bob| | 2|Alice| | 5| Bob| +---+-----+ >>> data.rdd.getNumPartitions() 7 >>> data = data.repartition("name", "age") >>> data.show() +---+-----+ |age| name| +---+-----+ | 5| Bob| | 5| Bob| | 2|Alice| | 2|Alice| +---+-----+ """ if isinstance(numPartitions, int): if len(cols) == 0: return DataFrame(self._jdf.repartition(numPartitions), self.sql_ctx) else: return DataFrame( self._jdf.repartition(numPartitions, self._jcols(*cols)), self.sql_ctx) elif isinstance(numPartitions, (basestring, Column)): cols = (numPartitions, ) + cols return DataFrame(self._jdf.repartition(self._jcols(*cols)), self.sql_ctx) else: raise TypeError("numPartitions should be an int or Column")
Returns a sampled subset of this: class: DataFrame.
def sample(self, withReplacement=None, fraction=None, seed=None): """Returns a sampled subset of this :class:`DataFrame`. :param withReplacement: Sample with replacement or not (default False). :param fraction: Fraction of rows to generate, range [0.0, 1.0]. :param seed: Seed for sampling (default a random seed). .. note:: This is not guaranteed to provide exactly the fraction specified of the total count of the given :class:`DataFrame`. .. note:: `fraction` is required and, `withReplacement` and `seed` are optional. >>> df = spark.range(10) >>> df.sample(0.5, 3).count() 7 >>> df.sample(fraction=0.5, seed=3).count() 7 >>> df.sample(withReplacement=True, fraction=0.5, seed=3).count() 1 >>> df.sample(1.0).count() 10 >>> df.sample(fraction=1.0).count() 10 >>> df.sample(False, fraction=1.0).count() 10 """ # For the cases below: # sample(True, 0.5 [, seed]) # sample(True, fraction=0.5 [, seed]) # sample(withReplacement=False, fraction=0.5 [, seed]) is_withReplacement_set = \ type(withReplacement) == bool and isinstance(fraction, float) # For the case below: # sample(faction=0.5 [, seed]) is_withReplacement_omitted_kwargs = \ withReplacement is None and isinstance(fraction, float) # For the case below: # sample(0.5 [, seed]) is_withReplacement_omitted_args = isinstance(withReplacement, float) if not (is_withReplacement_set or is_withReplacement_omitted_kwargs or is_withReplacement_omitted_args): argtypes = [ str(type(arg)) for arg in [withReplacement, fraction, seed] if arg is not None] raise TypeError( "withReplacement (optional), fraction (required) and seed (optional)" " should be a bool, float and number; however, " "got [%s]." % ", ".join(argtypes)) if is_withReplacement_omitted_args: if fraction is not None: seed = fraction fraction = withReplacement withReplacement = None seed = long(seed) if seed is not None else None args = [arg for arg in [withReplacement, fraction, seed] if arg is not None] jdf = self._jdf.sample(*args) return DataFrame(jdf, self.sql_ctx)
Returns a stratified sample without replacement based on the fraction given on each stratum.
def sampleBy(self, col, fractions, seed=None): """ Returns a stratified sample without replacement based on the fraction given on each stratum. :param col: column that defines strata :param fractions: sampling fraction for each stratum. If a stratum is not specified, we treat its fraction as zero. :param seed: random seed :return: a new DataFrame that represents the stratified sample >>> from pyspark.sql.functions import col >>> dataset = sqlContext.range(0, 100).select((col("id") % 3).alias("key")) >>> sampled = dataset.sampleBy("key", fractions={0: 0.1, 1: 0.2}, seed=0) >>> sampled.groupBy("key").count().orderBy("key").show() +---+-----+ |key|count| +---+-----+ | 0| 3| | 1| 6| +---+-----+ >>> dataset.sampleBy(col("key"), fractions={2: 1.0}, seed=0).count() 33 .. versionchanged:: 3.0 Added sampling by a column of :class:`Column` """ if isinstance(col, basestring): col = Column(col) elif not isinstance(col, Column): raise ValueError("col must be a string or a column, but got %r" % type(col)) if not isinstance(fractions, dict): raise ValueError("fractions must be a dict but got %r" % type(fractions)) for k, v in fractions.items(): if not isinstance(k, (float, int, long, basestring)): raise ValueError("key must be float, int, long, or string, but got %r" % type(k)) fractions[k] = float(v) col = col._jc seed = seed if seed is not None else random.randint(0, sys.maxsize) return DataFrame(self._jdf.stat().sampleBy(col, self._jmap(fractions), seed), self.sql_ctx)
Randomly splits this: class: DataFrame with the provided weights.
def randomSplit(self, weights, seed=None): """Randomly splits this :class:`DataFrame` with the provided weights. :param weights: list of doubles as weights with which to split the DataFrame. Weights will be normalized if they don't sum up to 1.0. :param seed: The seed for sampling. >>> splits = df4.randomSplit([1.0, 2.0], 24) >>> splits[0].count() 2 >>> splits[1].count() 2 """ for w in weights: if w < 0.0: raise ValueError("Weights must be positive. Found weight value: %s" % w) seed = seed if seed is not None else random.randint(0, sys.maxsize) rdd_array = self._jdf.randomSplit(_to_list(self.sql_ctx._sc, weights), long(seed)) return [DataFrame(rdd, self.sql_ctx) for rdd in rdd_array]
Returns all column names and their data types as a list.
def dtypes(self): """Returns all column names and their data types as a list. >>> df.dtypes [('age', 'int'), ('name', 'string')] """ return [(str(f.name), f.dataType.simpleString()) for f in self.schema.fields]
Selects column based on the column name specified as a regex and returns it as: class: Column.
def colRegex(self, colName): """ Selects column based on the column name specified as a regex and returns it as :class:`Column`. :param colName: string, column name specified as a regex. >>> df = spark.createDataFrame([("a", 1), ("b", 2), ("c", 3)], ["Col1", "Col2"]) >>> df.select(df.colRegex("`(Col1)?+.+`")).show() +----+ |Col2| +----+ | 1| | 2| | 3| +----+ """ if not isinstance(colName, basestring): raise ValueError("colName should be provided as string") jc = self._jdf.colRegex(colName) return Column(jc)
Returns a new: class: DataFrame with an alias set.
def alias(self, alias): """Returns a new :class:`DataFrame` with an alias set. :param alias: string, an alias name to be set for the DataFrame. >>> from pyspark.sql.functions import * >>> df_as1 = df.alias("df_as1") >>> df_as2 = df.alias("df_as2") >>> joined_df = df_as1.join(df_as2, col("df_as1.name") == col("df_as2.name"), 'inner') >>> joined_df.select("df_as1.name", "df_as2.name", "df_as2.age").collect() [Row(name=u'Bob', name=u'Bob', age=5), Row(name=u'Alice', name=u'Alice', age=2)] """ assert isinstance(alias, basestring), "alias should be a string" return DataFrame(getattr(self._jdf, "as")(alias), self.sql_ctx)
Returns the cartesian product with another: class: DataFrame.
def crossJoin(self, other): """Returns the cartesian product with another :class:`DataFrame`. :param other: Right side of the cartesian product. >>> df.select("age", "name").collect() [Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')] >>> df2.select("name", "height").collect() [Row(name=u'Tom', height=80), Row(name=u'Bob', height=85)] >>> df.crossJoin(df2.select("height")).select("age", "name", "height").collect() [Row(age=2, name=u'Alice', height=80), Row(age=2, name=u'Alice', height=85), Row(age=5, name=u'Bob', height=80), Row(age=5, name=u'Bob', height=85)] """ jdf = self._jdf.crossJoin(other._jdf) return DataFrame(jdf, self.sql_ctx)
Joins with another: class: DataFrame using the given join expression.
def join(self, other, on=None, how=None): """Joins with another :class:`DataFrame`, using the given join expression. :param other: Right side of the join :param on: a string for the join column name, a list of column names, a join expression (Column), or a list of Columns. If `on` is a string or a list of strings indicating the name of the join column(s), the column(s) must exist on both sides, and this performs an equi-join. :param how: str, default ``inner``. Must be one of: ``inner``, ``cross``, ``outer``, ``full``, ``fullouter``, ``full_outer``, ``left``, ``leftouter``, ``left_outer``, ``right``, ``rightouter``, ``right_outer``, ``semi``, ``leftsemi``, ``left_semi``, ``anti``, ``leftanti`` and ``left_anti``. The following performs a full outer join between ``df1`` and ``df2``. >>> df.join(df2, df.name == df2.name, 'outer').select(df.name, df2.height).collect() [Row(name=None, height=80), Row(name=u'Bob', height=85), Row(name=u'Alice', height=None)] >>> df.join(df2, 'name', 'outer').select('name', 'height').collect() [Row(name=u'Tom', height=80), Row(name=u'Bob', height=85), Row(name=u'Alice', height=None)] >>> cond = [df.name == df3.name, df.age == df3.age] >>> df.join(df3, cond, 'outer').select(df.name, df3.age).collect() [Row(name=u'Alice', age=2), Row(name=u'Bob', age=5)] >>> df.join(df2, 'name').select(df.name, df2.height).collect() [Row(name=u'Bob', height=85)] >>> df.join(df4, ['name', 'age']).select(df.name, df.age).collect() [Row(name=u'Bob', age=5)] """ if on is not None and not isinstance(on, list): on = [on] if on is not None: if isinstance(on[0], basestring): on = self._jseq(on) else: assert isinstance(on[0], Column), "on should be Column or list of Column" on = reduce(lambda x, y: x.__and__(y), on) on = on._jc if on is None and how is None: jdf = self._jdf.join(other._jdf) else: if how is None: how = "inner" if on is None: on = self._jseq([]) assert isinstance(how, basestring), "how should be basestring" jdf = self._jdf.join(other._jdf, on, how) return DataFrame(jdf, self.sql_ctx)
Returns a new: class: DataFrame with each partition sorted by the specified column ( s ).
def sortWithinPartitions(self, *cols, **kwargs): """Returns a new :class:`DataFrame` with each partition sorted by the specified column(s). :param cols: list of :class:`Column` or column names to sort by. :param ascending: boolean or list of boolean (default True). Sort ascending vs. descending. Specify list for multiple sort orders. If a list is specified, length of the list must equal length of the `cols`. >>> df.sortWithinPartitions("age", ascending=False).show() +---+-----+ |age| name| +---+-----+ | 2|Alice| | 5| Bob| +---+-----+ """ jdf = self._jdf.sortWithinPartitions(self._sort_cols(cols, kwargs)) return DataFrame(jdf, self.sql_ctx)
Return a JVM Seq of Columns from a list of Column or names
def _jseq(self, cols, converter=None): """Return a JVM Seq of Columns from a list of Column or names""" return _to_seq(self.sql_ctx._sc, cols, converter)
Return a JVM Seq of Columns from a list of Column or column names
def _jcols(self, *cols): """Return a JVM Seq of Columns from a list of Column or column names If `cols` has only one list in it, cols[0] will be used as the list. """ if len(cols) == 1 and isinstance(cols[0], list): cols = cols[0] return self._jseq(cols, _to_java_column)
Return a JVM Seq of Columns that describes the sort order
def _sort_cols(self, cols, kwargs): """ Return a JVM Seq of Columns that describes the sort order """ if not cols: raise ValueError("should sort by at least one column") if len(cols) == 1 and isinstance(cols[0], list): cols = cols[0] jcols = [_to_java_column(c) for c in cols] ascending = kwargs.get('ascending', True) if isinstance(ascending, (bool, int)): if not ascending: jcols = [jc.desc() for jc in jcols] elif isinstance(ascending, list): jcols = [jc if asc else jc.desc() for asc, jc in zip(ascending, jcols)] else: raise TypeError("ascending can only be boolean or list, but got %s" % type(ascending)) return self._jseq(jcols)
Computes basic statistics for numeric and string columns.
def describe(self, *cols): """Computes basic statistics for numeric and string columns. This include count, mean, stddev, min, and max. If no columns are given, this function computes statistics for all numerical or string columns. .. note:: This function is meant for exploratory data analysis, as we make no guarantee about the backward compatibility of the schema of the resulting DataFrame. >>> df.describe(['age']).show() +-------+------------------+ |summary| age| +-------+------------------+ | count| 2| | mean| 3.5| | stddev|2.1213203435596424| | min| 2| | max| 5| +-------+------------------+ >>> df.describe().show() +-------+------------------+-----+ |summary| age| name| +-------+------------------+-----+ | count| 2| 2| | mean| 3.5| null| | stddev|2.1213203435596424| null| | min| 2|Alice| | max| 5| Bob| +-------+------------------+-----+ Use summary for expanded statistics and control over which statistics to compute. """ if len(cols) == 1 and isinstance(cols[0], list): cols = cols[0] jdf = self._jdf.describe(self._jseq(cols)) return DataFrame(jdf, self.sql_ctx)
Computes specified statistics for numeric and string columns. Available statistics are: - count - mean - stddev - min - max - arbitrary approximate percentiles specified as a percentage ( eg 75% )
def summary(self, *statistics): """Computes specified statistics for numeric and string columns. Available statistics are: - count - mean - stddev - min - max - arbitrary approximate percentiles specified as a percentage (eg, 75%) If no statistics are given, this function computes count, mean, stddev, min, approximate quartiles (percentiles at 25%, 50%, and 75%), and max. .. note:: This function is meant for exploratory data analysis, as we make no guarantee about the backward compatibility of the schema of the resulting DataFrame. >>> df.summary().show() +-------+------------------+-----+ |summary| age| name| +-------+------------------+-----+ | count| 2| 2| | mean| 3.5| null| | stddev|2.1213203435596424| null| | min| 2|Alice| | 25%| 2| null| | 50%| 2| null| | 75%| 5| null| | max| 5| Bob| +-------+------------------+-----+ >>> df.summary("count", "min", "25%", "75%", "max").show() +-------+---+-----+ |summary|age| name| +-------+---+-----+ | count| 2| 2| | min| 2|Alice| | 25%| 2| null| | 75%| 5| null| | max| 5| Bob| +-------+---+-----+ To do a summary for specific columns first select them: >>> df.select("age", "name").summary("count").show() +-------+---+----+ |summary|age|name| +-------+---+----+ | count| 2| 2| +-------+---+----+ See also describe for basic statistics. """ if len(statistics) == 1 and isinstance(statistics[0], list): statistics = statistics[0] jdf = self._jdf.summary(self._jseq(statistics)) return DataFrame(jdf, self.sql_ctx)
Returns the first n rows.
def head(self, n=None): """Returns the first ``n`` rows. .. note:: This method should only be used if the resulting array is expected to be small, as all the data is loaded into the driver's memory. :param n: int, default 1. Number of rows to return. :return: If n is greater than 1, return a list of :class:`Row`. If n is 1, return a single Row. >>> df.head() Row(age=2, name=u'Alice') >>> df.head(1) [Row(age=2, name=u'Alice')] """ if n is None: rs = self.head(1) return rs[0] if rs else None return self.take(n)
Projects a set of expressions and returns a new: class: DataFrame.
def select(self, *cols): """Projects a set of expressions and returns a new :class:`DataFrame`. :param cols: list of column names (string) or expressions (:class:`Column`). If one of the column names is '*', that column is expanded to include all columns in the current DataFrame. >>> df.select('*').collect() [Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')] >>> df.select('name', 'age').collect() [Row(name=u'Alice', age=2), Row(name=u'Bob', age=5)] >>> df.select(df.name, (df.age + 10).alias('age')).collect() [Row(name=u'Alice', age=12), Row(name=u'Bob', age=15)] """ jdf = self._jdf.select(self._jcols(*cols)) return DataFrame(jdf, self.sql_ctx)
Projects a set of SQL expressions and returns a new: class: DataFrame.
def selectExpr(self, *expr): """Projects a set of SQL expressions and returns a new :class:`DataFrame`. This is a variant of :func:`select` that accepts SQL expressions. >>> df.selectExpr("age * 2", "abs(age)").collect() [Row((age * 2)=4, abs(age)=2), Row((age * 2)=10, abs(age)=5)] """ if len(expr) == 1 and isinstance(expr[0], list): expr = expr[0] jdf = self._jdf.selectExpr(self._jseq(expr)) return DataFrame(jdf, self.sql_ctx)
Filters rows using the given condition.
def filter(self, condition): """Filters rows using the given condition. :func:`where` is an alias for :func:`filter`. :param condition: a :class:`Column` of :class:`types.BooleanType` or a string of SQL expression. >>> df.filter(df.age > 3).collect() [Row(age=5, name=u'Bob')] >>> df.where(df.age == 2).collect() [Row(age=2, name=u'Alice')] >>> df.filter("age > 3").collect() [Row(age=5, name=u'Bob')] >>> df.where("age = 2").collect() [Row(age=2, name=u'Alice')] """ if isinstance(condition, basestring): jdf = self._jdf.filter(condition) elif isinstance(condition, Column): jdf = self._jdf.filter(condition._jc) else: raise TypeError("condition should be string or Column") return DataFrame(jdf, self.sql_ctx)
Groups the: class: DataFrame using the specified columns so we can run aggregation on them. See: class: GroupedData for all the available aggregate functions.
def groupBy(self, *cols): """Groups the :class:`DataFrame` using the specified columns, so we can run aggregation on them. See :class:`GroupedData` for all the available aggregate functions. :func:`groupby` is an alias for :func:`groupBy`. :param cols: list of columns to group by. Each element should be a column name (string) or an expression (:class:`Column`). >>> df.groupBy().avg().collect() [Row(avg(age)=3.5)] >>> sorted(df.groupBy('name').agg({'age': 'mean'}).collect()) [Row(name=u'Alice', avg(age)=2.0), Row(name=u'Bob', avg(age)=5.0)] >>> sorted(df.groupBy(df.name).avg().collect()) [Row(name=u'Alice', avg(age)=2.0), Row(name=u'Bob', avg(age)=5.0)] >>> sorted(df.groupBy(['name', df.age]).count().collect()) [Row(name=u'Alice', age=2, count=1), Row(name=u'Bob', age=5, count=1)] """ jgd = self._jdf.groupBy(self._jcols(*cols)) from pyspark.sql.group import GroupedData return GroupedData(jgd, self)
Return a new: class: DataFrame containing union of rows in this and another frame.
def union(self, other): """ Return a new :class:`DataFrame` containing union of rows in this and another frame. This is equivalent to `UNION ALL` in SQL. To do a SQL-style set union (that does deduplication of elements), use this function followed by :func:`distinct`. Also as standard in SQL, this function resolves columns by position (not by name). """ return DataFrame(self._jdf.union(other._jdf), self.sql_ctx)
Returns a new: class: DataFrame containing union of rows in this and another frame.
def unionByName(self, other): """ Returns a new :class:`DataFrame` containing union of rows in this and another frame. This is different from both `UNION ALL` and `UNION DISTINCT` in SQL. To do a SQL-style set union (that does deduplication of elements), use this function followed by :func:`distinct`. The difference between this function and :func:`union` is that this function resolves columns by name (not by position): >>> df1 = spark.createDataFrame([[1, 2, 3]], ["col0", "col1", "col2"]) >>> df2 = spark.createDataFrame([[4, 5, 6]], ["col1", "col2", "col0"]) >>> df1.unionByName(df2).show() +----+----+----+ |col0|col1|col2| +----+----+----+ | 1| 2| 3| | 6| 4| 5| +----+----+----+ """ return DataFrame(self._jdf.unionByName(other._jdf), self.sql_ctx)
Return a new: class: DataFrame containing rows only in both this frame and another frame.
def intersect(self, other): """ Return a new :class:`DataFrame` containing rows only in both this frame and another frame. This is equivalent to `INTERSECT` in SQL. """ return DataFrame(self._jdf.intersect(other._jdf), self.sql_ctx)
Return a new: class: DataFrame containing rows in both this dataframe and other dataframe while preserving duplicates.
def intersectAll(self, other): """ Return a new :class:`DataFrame` containing rows in both this dataframe and other dataframe while preserving duplicates. This is equivalent to `INTERSECT ALL` in SQL. >>> df1 = spark.createDataFrame([("a", 1), ("a", 1), ("b", 3), ("c", 4)], ["C1", "C2"]) >>> df2 = spark.createDataFrame([("a", 1), ("a", 1), ("b", 3)], ["C1", "C2"]) >>> df1.intersectAll(df2).sort("C1", "C2").show() +---+---+ | C1| C2| +---+---+ | a| 1| | a| 1| | b| 3| +---+---+ Also as standard in SQL, this function resolves columns by position (not by name). """ return DataFrame(self._jdf.intersectAll(other._jdf), self.sql_ctx)
Return a new: class: DataFrame containing rows in this frame but not in another frame.
def subtract(self, other): """ Return a new :class:`DataFrame` containing rows in this frame but not in another frame. This is equivalent to `EXCEPT DISTINCT` in SQL. """ return DataFrame(getattr(self._jdf, "except")(other._jdf), self.sql_ctx)
Return a new: class: DataFrame with duplicate rows removed optionally only considering certain columns.
def dropDuplicates(self, subset=None): """Return a new :class:`DataFrame` with duplicate rows removed, optionally only considering certain columns. For a static batch :class:`DataFrame`, it just drops duplicate rows. For a streaming :class:`DataFrame`, it will keep all data across triggers as intermediate state to drop duplicates rows. You can use :func:`withWatermark` to limit how late the duplicate data can be and system will accordingly limit the state. In addition, too late data older than watermark will be dropped to avoid any possibility of duplicates. :func:`drop_duplicates` is an alias for :func:`dropDuplicates`. >>> from pyspark.sql import Row >>> df = sc.parallelize([ \\ ... Row(name='Alice', age=5, height=80), \\ ... Row(name='Alice', age=5, height=80), \\ ... Row(name='Alice', age=10, height=80)]).toDF() >>> df.dropDuplicates().show() +---+------+-----+ |age|height| name| +---+------+-----+ | 5| 80|Alice| | 10| 80|Alice| +---+------+-----+ >>> df.dropDuplicates(['name', 'height']).show() +---+------+-----+ |age|height| name| +---+------+-----+ | 5| 80|Alice| +---+------+-----+ """ if subset is None: jdf = self._jdf.dropDuplicates() else: jdf = self._jdf.dropDuplicates(self._jseq(subset)) return DataFrame(jdf, self.sql_ctx)
Returns a new: class: DataFrame omitting rows with null values.: func: DataFrame. dropna and: func: DataFrameNaFunctions. drop are aliases of each other.
def dropna(self, how='any', thresh=None, subset=None): """Returns a new :class:`DataFrame` omitting rows with null values. :func:`DataFrame.dropna` and :func:`DataFrameNaFunctions.drop` are aliases of each other. :param how: 'any' or 'all'. If 'any', drop a row if it contains any nulls. If 'all', drop a row only if all its values are null. :param thresh: int, default None If specified, drop rows that have less than `thresh` non-null values. This overwrites the `how` parameter. :param subset: optional list of column names to consider. >>> df4.na.drop().show() +---+------+-----+ |age|height| name| +---+------+-----+ | 10| 80|Alice| +---+------+-----+ """ if how is not None and how not in ['any', 'all']: raise ValueError("how ('" + how + "') should be 'any' or 'all'") if subset is None: subset = self.columns elif isinstance(subset, basestring): subset = [subset] elif not isinstance(subset, (list, tuple)): raise ValueError("subset should be a list or tuple of column names") if thresh is None: thresh = len(subset) if how == 'any' else 1 return DataFrame(self._jdf.na().drop(thresh, self._jseq(subset)), self.sql_ctx)
Replace null values alias for na. fill ().: func: DataFrame. fillna and: func: DataFrameNaFunctions. fill are aliases of each other.
def fillna(self, value, subset=None): """Replace null values, alias for ``na.fill()``. :func:`DataFrame.fillna` and :func:`DataFrameNaFunctions.fill` are aliases of each other. :param value: int, long, float, string, bool or dict. Value to replace null values with. If the value is a dict, then `subset` is ignored and `value` must be a mapping from column name (string) to replacement value. The replacement value must be an int, long, float, boolean, or string. :param subset: optional list of column names to consider. Columns specified in subset that do not have matching data type are ignored. For example, if `value` is a string, and subset contains a non-string column, then the non-string column is simply ignored. >>> df4.na.fill(50).show() +---+------+-----+ |age|height| name| +---+------+-----+ | 10| 80|Alice| | 5| 50| Bob| | 50| 50| Tom| | 50| 50| null| +---+------+-----+ >>> df5.na.fill(False).show() +----+-------+-----+ | age| name| spy| +----+-------+-----+ | 10| Alice|false| | 5| Bob|false| |null|Mallory| true| +----+-------+-----+ >>> df4.na.fill({'age': 50, 'name': 'unknown'}).show() +---+------+-------+ |age|height| name| +---+------+-------+ | 10| 80| Alice| | 5| null| Bob| | 50| null| Tom| | 50| null|unknown| +---+------+-------+ """ if not isinstance(value, (float, int, long, basestring, bool, dict)): raise ValueError("value should be a float, int, long, string, bool or dict") # Note that bool validates isinstance(int), but we don't want to # convert bools to floats if not isinstance(value, bool) and isinstance(value, (int, long)): value = float(value) if isinstance(value, dict): return DataFrame(self._jdf.na().fill(value), self.sql_ctx) elif subset is None: return DataFrame(self._jdf.na().fill(value), self.sql_ctx) else: if isinstance(subset, basestring): subset = [subset] elif not isinstance(subset, (list, tuple)): raise ValueError("subset should be a list or tuple of column names") return DataFrame(self._jdf.na().fill(value, self._jseq(subset)), self.sql_ctx)
Returns a new: class: DataFrame replacing a value with another value.: func: DataFrame. replace and: func: DataFrameNaFunctions. replace are aliases of each other. Values to_replace and value must have the same type and can only be numerics booleans or strings. Value can have None. When replacing the new value will be cast to the type of the existing column. For numeric replacements all values to be replaced should have unique floating point representation. In case of conflicts ( for example with { 42: - 1 42. 0: 1 } ) and arbitrary replacement will be used.
def replace(self, to_replace, value=_NoValue, subset=None): """Returns a new :class:`DataFrame` replacing a value with another value. :func:`DataFrame.replace` and :func:`DataFrameNaFunctions.replace` are aliases of each other. Values to_replace and value must have the same type and can only be numerics, booleans, or strings. Value can have None. When replacing, the new value will be cast to the type of the existing column. For numeric replacements all values to be replaced should have unique floating point representation. In case of conflicts (for example with `{42: -1, 42.0: 1}`) and arbitrary replacement will be used. :param to_replace: bool, int, long, float, string, list or dict. Value to be replaced. If the value is a dict, then `value` is ignored or can be omitted, and `to_replace` must be a mapping between a value and a replacement. :param value: bool, int, long, float, string, list or None. The replacement value must be a bool, int, long, float, string or None. If `value` is a list, `value` should be of the same length and type as `to_replace`. If `value` is a scalar and `to_replace` is a sequence, then `value` is used as a replacement for each item in `to_replace`. :param subset: optional list of column names to consider. Columns specified in subset that do not have matching data type are ignored. For example, if `value` is a string, and subset contains a non-string column, then the non-string column is simply ignored. >>> df4.na.replace(10, 20).show() +----+------+-----+ | age|height| name| +----+------+-----+ | 20| 80|Alice| | 5| null| Bob| |null| null| Tom| |null| null| null| +----+------+-----+ >>> df4.na.replace('Alice', None).show() +----+------+----+ | age|height|name| +----+------+----+ | 10| 80|null| | 5| null| Bob| |null| null| Tom| |null| null|null| +----+------+----+ >>> df4.na.replace({'Alice': None}).show() +----+------+----+ | age|height|name| +----+------+----+ | 10| 80|null| | 5| null| Bob| |null| null| Tom| |null| null|null| +----+------+----+ >>> df4.na.replace(['Alice', 'Bob'], ['A', 'B'], 'name').show() +----+------+----+ | age|height|name| +----+------+----+ | 10| 80| A| | 5| null| B| |null| null| Tom| |null| null|null| +----+------+----+ """ if value is _NoValue: if isinstance(to_replace, dict): value = None else: raise TypeError("value argument is required when to_replace is not a dictionary.") # Helper functions def all_of(types): """Given a type or tuple of types and a sequence of xs check if each x is instance of type(s) >>> all_of(bool)([True, False]) True >>> all_of(basestring)(["a", 1]) False """ def all_of_(xs): return all(isinstance(x, types) for x in xs) return all_of_ all_of_bool = all_of(bool) all_of_str = all_of(basestring) all_of_numeric = all_of((float, int, long)) # Validate input types valid_types = (bool, float, int, long, basestring, list, tuple) if not isinstance(to_replace, valid_types + (dict, )): raise ValueError( "to_replace should be a bool, float, int, long, string, list, tuple, or dict. " "Got {0}".format(type(to_replace))) if not isinstance(value, valid_types) and value is not None \ and not isinstance(to_replace, dict): raise ValueError("If to_replace is not a dict, value should be " "a bool, float, int, long, string, list, tuple or None. " "Got {0}".format(type(value))) if isinstance(to_replace, (list, tuple)) and isinstance(value, (list, tuple)): if len(to_replace) != len(value): raise ValueError("to_replace and value lists should be of the same length. " "Got {0} and {1}".format(len(to_replace), len(value))) if not (subset is None or isinstance(subset, (list, tuple, basestring))): raise ValueError("subset should be a list or tuple of column names, " "column name or None. Got {0}".format(type(subset))) # Reshape input arguments if necessary if isinstance(to_replace, (float, int, long, basestring)): to_replace = [to_replace] if isinstance(to_replace, dict): rep_dict = to_replace if value is not None: warnings.warn("to_replace is a dict and value is not None. value will be ignored.") else: if isinstance(value, (float, int, long, basestring)) or value is None: value = [value for _ in range(len(to_replace))] rep_dict = dict(zip(to_replace, value)) if isinstance(subset, basestring): subset = [subset] # Verify we were not passed in mixed type generics. if not any(all_of_type(rep_dict.keys()) and all_of_type(x for x in rep_dict.values() if x is not None) for all_of_type in [all_of_bool, all_of_str, all_of_numeric]): raise ValueError("Mixed type replacements are not supported") if subset is None: return DataFrame(self._jdf.na().replace('*', rep_dict), self.sql_ctx) else: return DataFrame( self._jdf.na().replace(self._jseq(subset), self._jmap(rep_dict)), self.sql_ctx)
Calculates the approximate quantiles of numerical columns of a DataFrame.
def approxQuantile(self, col, probabilities, relativeError): """ Calculates the approximate quantiles of numerical columns of a DataFrame. The result of this algorithm has the following deterministic bound: If the DataFrame has N elements and if we request the quantile at probability `p` up to error `err`, then the algorithm will return a sample `x` from the DataFrame so that the *exact* rank of `x` is close to (p * N). More precisely, floor((p - err) * N) <= rank(x) <= ceil((p + err) * N). This method implements a variation of the Greenwald-Khanna algorithm (with some speed optimizations). The algorithm was first present in [[https://doi.org/10.1145/375663.375670 Space-efficient Online Computation of Quantile Summaries]] by Greenwald and Khanna. Note that null values will be ignored in numerical columns before calculation. For columns only containing null values, an empty list is returned. :param col: str, list. Can be a single column name, or a list of names for multiple columns. :param probabilities: a list of quantile probabilities Each number must belong to [0, 1]. For example 0 is the minimum, 0.5 is the median, 1 is the maximum. :param relativeError: The relative target precision to achieve (>= 0). If set to zero, the exact quantiles are computed, which could be very expensive. Note that values greater than 1 are accepted but give the same result as 1. :return: the approximate quantiles at the given probabilities. If the input `col` is a string, the output is a list of floats. If the input `col` is a list or tuple of strings, the output is also a list, but each element in it is a list of floats, i.e., the output is a list of list of floats. .. versionchanged:: 2.2 Added support for multiple columns. """ if not isinstance(col, (basestring, list, tuple)): raise ValueError("col should be a string, list or tuple, but got %r" % type(col)) isStr = isinstance(col, basestring) if isinstance(col, tuple): col = list(col) elif isStr: col = [col] for c in col: if not isinstance(c, basestring): raise ValueError("columns should be strings, but got %r" % type(c)) col = _to_list(self._sc, col) if not isinstance(probabilities, (list, tuple)): raise ValueError("probabilities should be a list or tuple") if isinstance(probabilities, tuple): probabilities = list(probabilities) for p in probabilities: if not isinstance(p, (float, int, long)) or p < 0 or p > 1: raise ValueError("probabilities should be numerical (float, int, long) in [0,1].") probabilities = _to_list(self._sc, probabilities) if not isinstance(relativeError, (float, int, long)) or relativeError < 0: raise ValueError("relativeError should be numerical (float, int, long) >= 0.") relativeError = float(relativeError) jaq = self._jdf.stat().approxQuantile(col, probabilities, relativeError) jaq_list = [list(j) for j in jaq] return jaq_list[0] if isStr else jaq_list
Calculates the correlation of two columns of a DataFrame as a double value. Currently only supports the Pearson Correlation Coefficient.: func: DataFrame. corr and: func: DataFrameStatFunctions. corr are aliases of each other.
def corr(self, col1, col2, method=None): """ Calculates the correlation of two columns of a DataFrame as a double value. Currently only supports the Pearson Correlation Coefficient. :func:`DataFrame.corr` and :func:`DataFrameStatFunctions.corr` are aliases of each other. :param col1: The name of the first column :param col2: The name of the second column :param method: The correlation method. Currently only supports "pearson" """ if not isinstance(col1, basestring): raise ValueError("col1 should be a string.") if not isinstance(col2, basestring): raise ValueError("col2 should be a string.") if not method: method = "pearson" if not method == "pearson": raise ValueError("Currently only the calculation of the Pearson Correlation " + "coefficient is supported.") return self._jdf.stat().corr(col1, col2, method)
Calculate the sample covariance for the given columns specified by their names as a double value.: func: DataFrame. cov and: func: DataFrameStatFunctions. cov are aliases.
def cov(self, col1, col2): """ Calculate the sample covariance for the given columns, specified by their names, as a double value. :func:`DataFrame.cov` and :func:`DataFrameStatFunctions.cov` are aliases. :param col1: The name of the first column :param col2: The name of the second column """ if not isinstance(col1, basestring): raise ValueError("col1 should be a string.") if not isinstance(col2, basestring): raise ValueError("col2 should be a string.") return self._jdf.stat().cov(col1, col2)
Computes a pair - wise frequency table of the given columns. Also known as a contingency table. The number of distinct values for each column should be less than 1e4. At most 1e6 non - zero pair frequencies will be returned. The first column of each row will be the distinct values of col1 and the column names will be the distinct values of col2. The name of the first column will be $col1_$col2. Pairs that have no occurrences will have zero as their counts.: func: DataFrame. crosstab and: func: DataFrameStatFunctions. crosstab are aliases.
def crosstab(self, col1, col2): """ Computes a pair-wise frequency table of the given columns. Also known as a contingency table. The number of distinct values for each column should be less than 1e4. At most 1e6 non-zero pair frequencies will be returned. The first column of each row will be the distinct values of `col1` and the column names will be the distinct values of `col2`. The name of the first column will be `$col1_$col2`. Pairs that have no occurrences will have zero as their counts. :func:`DataFrame.crosstab` and :func:`DataFrameStatFunctions.crosstab` are aliases. :param col1: The name of the first column. Distinct items will make the first item of each row. :param col2: The name of the second column. Distinct items will make the column names of the DataFrame. """ if not isinstance(col1, basestring): raise ValueError("col1 should be a string.") if not isinstance(col2, basestring): raise ValueError("col2 should be a string.") return DataFrame(self._jdf.stat().crosstab(col1, col2), self.sql_ctx)
Finding frequent items for columns possibly with false positives. Using the frequent element count algorithm described in https:// doi. org/ 10. 1145/ 762471. 762473 proposed by Karp Schenker and Papadimitriou.: func: DataFrame. freqItems and: func: DataFrameStatFunctions. freqItems are aliases.
def freqItems(self, cols, support=None): """ Finding frequent items for columns, possibly with false positives. Using the frequent element count algorithm described in "https://doi.org/10.1145/762471.762473, proposed by Karp, Schenker, and Papadimitriou". :func:`DataFrame.freqItems` and :func:`DataFrameStatFunctions.freqItems` are aliases. .. note:: This function is meant for exploratory data analysis, as we make no guarantee about the backward compatibility of the schema of the resulting DataFrame. :param cols: Names of the columns to calculate frequent items for as a list or tuple of strings. :param support: The frequency with which to consider an item 'frequent'. Default is 1%. The support must be greater than 1e-4. """ if isinstance(cols, tuple): cols = list(cols) if not isinstance(cols, list): raise ValueError("cols must be a list or tuple of column names as strings.") if not support: support = 0.01 return DataFrame(self._jdf.stat().freqItems(_to_seq(self._sc, cols), support), self.sql_ctx)
Returns a new: class: DataFrame by adding a column or replacing the existing column that has the same name.
def withColumn(self, colName, col): """ Returns a new :class:`DataFrame` by adding a column or replacing the existing column that has the same name. The column expression must be an expression over this DataFrame; attempting to add a column from some other dataframe will raise an error. :param colName: string, name of the new column. :param col: a :class:`Column` expression for the new column. .. note:: This method introduces a projection internally. Therefore, calling it multiple times, for instance, via loops in order to add multiple columns can generate big plans which can cause performance issues and even `StackOverflowException`. To avoid this, use :func:`select` with the multiple columns at once. >>> df.withColumn('age2', df.age + 2).collect() [Row(age=2, name=u'Alice', age2=4), Row(age=5, name=u'Bob', age2=7)] """ assert isinstance(col, Column), "col should be Column" return DataFrame(self._jdf.withColumn(colName, col._jc), self.sql_ctx)
Returns a new: class: DataFrame by renaming an existing column. This is a no - op if schema doesn t contain the given column name.
def withColumnRenamed(self, existing, new): """Returns a new :class:`DataFrame` by renaming an existing column. This is a no-op if schema doesn't contain the given column name. :param existing: string, name of the existing column to rename. :param new: string, new name of the column. >>> df.withColumnRenamed('age', 'age2').collect() [Row(age2=2, name=u'Alice'), Row(age2=5, name=u'Bob')] """ return DataFrame(self._jdf.withColumnRenamed(existing, new), self.sql_ctx)
Returns a new: class: DataFrame that drops the specified column. This is a no - op if schema doesn t contain the given column name ( s ).
def drop(self, *cols): """Returns a new :class:`DataFrame` that drops the specified column. This is a no-op if schema doesn't contain the given column name(s). :param cols: a string name of the column to drop, or a :class:`Column` to drop, or a list of string name of the columns to drop. >>> df.drop('age').collect() [Row(name=u'Alice'), Row(name=u'Bob')] >>> df.drop(df.age).collect() [Row(name=u'Alice'), Row(name=u'Bob')] >>> df.join(df2, df.name == df2.name, 'inner').drop(df.name).collect() [Row(age=5, height=85, name=u'Bob')] >>> df.join(df2, df.name == df2.name, 'inner').drop(df2.name).collect() [Row(age=5, name=u'Bob', height=85)] >>> df.join(df2, 'name', 'inner').drop('age', 'height').collect() [Row(name=u'Bob')] """ if len(cols) == 1: col = cols[0] if isinstance(col, basestring): jdf = self._jdf.drop(col) elif isinstance(col, Column): jdf = self._jdf.drop(col._jc) else: raise TypeError("col should be a string or a Column") else: for col in cols: if not isinstance(col, basestring): raise TypeError("each col in the param list should be a string") jdf = self._jdf.drop(self._jseq(cols)) return DataFrame(jdf, self.sql_ctx)
Returns a new class: DataFrame that with new specified column names
def toDF(self, *cols): """Returns a new class:`DataFrame` that with new specified column names :param cols: list of new column names (string) >>> df.toDF('f1', 'f2').collect() [Row(f1=2, f2=u'Alice'), Row(f1=5, f2=u'Bob')] """ jdf = self._jdf.toDF(self._jseq(cols)) return DataFrame(jdf, self.sql_ctx)
Returns a new class: DataFrame. Concise syntax for chaining custom transformations.
def transform(self, func): """Returns a new class:`DataFrame`. Concise syntax for chaining custom transformations. :param func: a function that takes and returns a class:`DataFrame`. >>> from pyspark.sql.functions import col >>> df = spark.createDataFrame([(1, 1.0), (2, 2.0)], ["int", "float"]) >>> def cast_all_to_int(input_df): ... return input_df.select([col(col_name).cast("int") for col_name in input_df.columns]) >>> def sort_columns_asc(input_df): ... return input_df.select(*sorted(input_df.columns)) >>> df.transform(cast_all_to_int).transform(sort_columns_asc).show() +-----+---+ |float|int| +-----+---+ | 1| 1| | 2| 2| +-----+---+ """ result = func(self) assert isinstance(result, DataFrame), "Func returned an instance of type [%s], " \ "should have been DataFrame." % type(result) return result
Returns the contents of this: class: DataFrame as Pandas pandas. DataFrame.
def toPandas(self): """ Returns the contents of this :class:`DataFrame` as Pandas ``pandas.DataFrame``. This is only available if Pandas is installed and available. .. note:: This method should only be used if the resulting Pandas's DataFrame is expected to be small, as all the data is loaded into the driver's memory. .. note:: Usage with spark.sql.execution.arrow.enabled=True is experimental. >>> df.toPandas() # doctest: +SKIP age name 0 2 Alice 1 5 Bob """ from pyspark.sql.utils import require_minimum_pandas_version require_minimum_pandas_version() import pandas as pd if self.sql_ctx._conf.pandasRespectSessionTimeZone(): timezone = self.sql_ctx._conf.sessionLocalTimeZone() else: timezone = None if self.sql_ctx._conf.arrowEnabled(): use_arrow = True try: from pyspark.sql.types import to_arrow_schema from pyspark.sql.utils import require_minimum_pyarrow_version require_minimum_pyarrow_version() to_arrow_schema(self.schema) except Exception as e: if self.sql_ctx._conf.arrowFallbackEnabled(): msg = ( "toPandas attempted Arrow optimization because " "'spark.sql.execution.arrow.enabled' is set to true; however, " "failed by the reason below:\n %s\n" "Attempting non-optimization as " "'spark.sql.execution.arrow.fallback.enabled' is set to " "true." % _exception_message(e)) warnings.warn(msg) use_arrow = False else: msg = ( "toPandas attempted Arrow optimization because " "'spark.sql.execution.arrow.enabled' is set to true, but has reached " "the error below and will not continue because automatic fallback " "with 'spark.sql.execution.arrow.fallback.enabled' has been set to " "false.\n %s" % _exception_message(e)) warnings.warn(msg) raise # Try to use Arrow optimization when the schema is supported and the required version # of PyArrow is found, if 'spark.sql.execution.arrow.enabled' is enabled. if use_arrow: try: from pyspark.sql.types import _check_dataframe_localize_timestamps import pyarrow batches = self._collectAsArrow() if len(batches) > 0: table = pyarrow.Table.from_batches(batches) # Pandas DataFrame created from PyArrow uses datetime64[ns] for date type # values, but we should use datetime.date to match the behavior with when # Arrow optimization is disabled. pdf = table.to_pandas(date_as_object=True) return _check_dataframe_localize_timestamps(pdf, timezone) else: return pd.DataFrame.from_records([], columns=self.columns) except Exception as e: # We might have to allow fallback here as well but multiple Spark jobs can # be executed. So, simply fail in this case for now. msg = ( "toPandas attempted Arrow optimization because " "'spark.sql.execution.arrow.enabled' is set to true, but has reached " "the error below and can not continue. Note that " "'spark.sql.execution.arrow.fallback.enabled' does not have an effect " "on failures in the middle of computation.\n %s" % _exception_message(e)) warnings.warn(msg) raise # Below is toPandas without Arrow optimization. pdf = pd.DataFrame.from_records(self.collect(), columns=self.columns) dtype = {} for field in self.schema: pandas_type = _to_corrected_pandas_type(field.dataType) # SPARK-21766: if an integer field is nullable and has null values, it can be # inferred by pandas as float column. Once we convert the column with NaN back # to integer type e.g., np.int16, we will hit exception. So we use the inferred # float type, not the corrected type from the schema in this case. if pandas_type is not None and \ not(isinstance(field.dataType, IntegralType) and field.nullable and pdf[field.name].isnull().any()): dtype[field.name] = pandas_type for f, t in dtype.items(): pdf[f] = pdf[f].astype(t, copy=False) if timezone is None: return pdf else: from pyspark.sql.types import _check_series_convert_timestamps_local_tz for field in self.schema: # TODO: handle nested timestamps, such as ArrayType(TimestampType())? if isinstance(field.dataType, TimestampType): pdf[field.name] = \ _check_series_convert_timestamps_local_tz(pdf[field.name], timezone) return pdf
Returns all records as a list of ArrowRecordBatches pyarrow must be installed and available on driver and worker Python environments.
def _collectAsArrow(self): """ Returns all records as a list of ArrowRecordBatches, pyarrow must be installed and available on driver and worker Python environments. .. note:: Experimental. """ with SCCallSiteSync(self._sc) as css: sock_info = self._jdf.collectAsArrowToPython() # Collect list of un-ordered batches where last element is a list of correct order indices results = list(_load_from_socket(sock_info, ArrowCollectSerializer())) batches = results[:-1] batch_order = results[-1] # Re-order the batch list using the correct order return [batches[i] for i in batch_order]
Returns the: class: StatCounter members as a dict.
def asDict(self, sample=False): """Returns the :class:`StatCounter` members as a ``dict``. >>> sc.parallelize([1., 2., 3., 4.]).stats().asDict() {'count': 4L, 'max': 4.0, 'mean': 2.5, 'min': 1.0, 'stdev': 1.2909944487358056, 'sum': 10.0, 'variance': 1.6666666666666667} """ return { 'count': self.count(), 'mean': self.mean(), 'sum': self.sum(), 'min': self.min(), 'max': self.max(), 'stdev': self.stdev() if sample else self.sampleStdev(), 'variance': self.variance() if sample else self.sampleVariance() }
Returns a list of function information via JVM. Sorts wrapped expression infos by name and returns them.
def _list_function_infos(jvm): """ Returns a list of function information via JVM. Sorts wrapped expression infos by name and returns them. """ jinfos = jvm.org.apache.spark.sql.api.python.PythonSQLUtils.listBuiltinFunctionInfos() infos = [] for jinfo in jinfos: name = jinfo.getName() usage = jinfo.getUsage() usage = usage.replace("_FUNC_", name) if usage is not None else usage infos.append(ExpressionInfo( className=jinfo.getClassName(), name=name, usage=usage, arguments=jinfo.getArguments().replace("_FUNC_", name), examples=jinfo.getExamples().replace("_FUNC_", name), note=jinfo.getNote(), since=jinfo.getSince(), deprecated=jinfo.getDeprecated())) return sorted(infos, key=lambda i: i.name)
Makes the usage description pretty and returns a formatted string if usage is not an empty string. Otherwise returns None.
def _make_pretty_usage(usage): """ Makes the usage description pretty and returns a formatted string if `usage` is not an empty string. Otherwise, returns None. """ if usage is not None and usage.strip() != "": usage = "\n".join(map(lambda u: u.strip(), usage.split("\n"))) return "%s\n\n" % usage
Makes the arguments description pretty and returns a formatted string if arguments starts with the argument prefix. Otherwise returns None.
def _make_pretty_arguments(arguments): """ Makes the arguments description pretty and returns a formatted string if `arguments` starts with the argument prefix. Otherwise, returns None. Expected input: Arguments: * arg0 - ... ... * arg0 - ... ... Expected output: **Arguments:** * arg0 - ... ... * arg0 - ... ... """ if arguments.startswith("\n Arguments:"): arguments = "\n".join(map(lambda u: u[6:], arguments.strip().split("\n")[1:])) return "**Arguments:**\n\n%s\n\n" % arguments
Makes the examples description pretty and returns a formatted string if examples starts with the example prefix. Otherwise returns None.
def _make_pretty_examples(examples): """ Makes the examples description pretty and returns a formatted string if `examples` starts with the example prefix. Otherwise, returns None. Expected input: Examples: > SELECT ...; ... > SELECT ...; ... Expected output: **Examples:** ``` > SELECT ...; ... > SELECT ...; ... ``` """ if examples.startswith("\n Examples:"): examples = "\n".join(map(lambda u: u[6:], examples.strip().split("\n")[1:])) return "**Examples:**\n\n```\n%s\n```\n\n" % examples
Makes the note description pretty and returns a formatted string if note is not an empty string. Otherwise returns None.
def _make_pretty_note(note): """ Makes the note description pretty and returns a formatted string if `note` is not an empty string. Otherwise, returns None. Expected input: ... Expected output: **Note:** ... """ if note != "": note = "\n".join(map(lambda n: n[4:], note.split("\n"))) return "**Note:**\n%s\n" % note
Makes the deprecated description pretty and returns a formatted string if deprecated is not an empty string. Otherwise returns None.
def _make_pretty_deprecated(deprecated): """ Makes the deprecated description pretty and returns a formatted string if `deprecated` is not an empty string. Otherwise, returns None. Expected input: ... Expected output: **Deprecated:** ... """ if deprecated != "": deprecated = "\n".join(map(lambda n: n[4:], deprecated.split("\n"))) return "**Deprecated:**\n%s\n" % deprecated
Generates a markdown file after listing the function information. The output file is created in path.
def generate_sql_markdown(jvm, path): """ Generates a markdown file after listing the function information. The output file is created in `path`. Expected output: ### NAME USAGE **Arguments:** ARGUMENTS **Examples:** ``` EXAMPLES ``` **Note:** NOTE **Since:** SINCE **Deprecated:** DEPRECATED <br/> """ with open(path, 'w') as mdfile: for info in _list_function_infos(jvm): name = info.name usage = _make_pretty_usage(info.usage) arguments = _make_pretty_arguments(info.arguments) examples = _make_pretty_examples(info.examples) note = _make_pretty_note(info.note) since = info.since deprecated = _make_pretty_deprecated(info.deprecated) mdfile.write("### %s\n\n" % name) if usage is not None: mdfile.write("%s\n\n" % usage.strip()) if arguments is not None: mdfile.write(arguments) if examples is not None: mdfile.write(examples) if note is not None: mdfile.write(note) if since is not None and since != "": mdfile.write("**Since:** %s\n\n" % since.strip()) if deprecated is not None: mdfile.write(deprecated) mdfile.write("<br/>\n\n")
Predict values for a single data point or an RDD of points using the model trained.
def predict(self, x): """ Predict values for a single data point or an RDD of points using the model trained. """ if isinstance(x, RDD): return x.map(lambda v: self.predict(v)) x = _convert_to_vector(x) if self.numClasses == 2: margin = self.weights.dot(x) + self._intercept if margin > 0: prob = 1 / (1 + exp(-margin)) else: exp_margin = exp(margin) prob = exp_margin / (1 + exp_margin) if self._threshold is None: return prob else: return 1 if prob > self._threshold else 0 else: best_class = 0 max_margin = 0.0 if x.size + 1 == self._dataWithBiasSize: for i in range(0, self._numClasses - 1): margin = x.dot(self._weightsMatrix[i][0:x.size]) + \ self._weightsMatrix[i][x.size] if margin > max_margin: max_margin = margin best_class = i + 1 else: for i in range(0, self._numClasses - 1): margin = x.dot(self._weightsMatrix[i]) if margin > max_margin: max_margin = margin best_class = i + 1 return best_class
Save this model to the given path.
def save(self, sc, path): """ Save this model to the given path. """ java_model = sc._jvm.org.apache.spark.mllib.classification.LogisticRegressionModel( _py2java(sc, self._coeff), self.intercept, self.numFeatures, self.numClasses) java_model.save(sc._jsc.sc(), path)
Train a logistic regression model on the given data.
def train(cls, data, iterations=100, initialWeights=None, regParam=0.0, regType="l2", intercept=False, corrections=10, tolerance=1e-6, validateData=True, numClasses=2): """ Train a logistic regression model on the given data. :param data: The training data, an RDD of LabeledPoint. :param iterations: The number of iterations. (default: 100) :param initialWeights: The initial weights. (default: None) :param regParam: The regularizer parameter. (default: 0.0) :param regType: The type of regularizer used for training our model. Supported values: - "l1" for using L1 regularization - "l2" for using L2 regularization (default) - None for no regularization :param intercept: Boolean parameter which indicates the use or not of the augmented representation for training data (i.e., whether bias features are activated or not). (default: False) :param corrections: The number of corrections used in the LBFGS update. If a known updater is used for binary classification, it calls the ml implementation and this parameter will have no effect. (default: 10) :param tolerance: The convergence tolerance of iterations for L-BFGS. (default: 1e-6) :param validateData: Boolean parameter which indicates if the algorithm should validate data before training. (default: True) :param numClasses: The number of classes (i.e., outcomes) a label can take in Multinomial Logistic Regression. (default: 2) >>> data = [ ... LabeledPoint(0.0, [0.0, 1.0]), ... LabeledPoint(1.0, [1.0, 0.0]), ... ] >>> lrm = LogisticRegressionWithLBFGS.train(sc.parallelize(data), iterations=10) >>> lrm.predict([1.0, 0.0]) 1 >>> lrm.predict([0.0, 1.0]) 0 """ def train(rdd, i): return callMLlibFunc("trainLogisticRegressionModelWithLBFGS", rdd, int(iterations), i, float(regParam), regType, bool(intercept), int(corrections), float(tolerance), bool(validateData), int(numClasses)) if initialWeights is None: if numClasses == 2: initialWeights = [0.0] * len(data.first().features) else: if intercept: initialWeights = [0.0] * (len(data.first().features) + 1) * (numClasses - 1) else: initialWeights = [0.0] * len(data.first().features) * (numClasses - 1) return _regression_train_wrapper(train, LogisticRegressionModel, data, initialWeights)
Predict values for a single data point or an RDD of points using the model trained.
def predict(self, x): """ Predict values for a single data point or an RDD of points using the model trained. """ if isinstance(x, RDD): return x.map(lambda v: self.predict(v)) x = _convert_to_vector(x) margin = self.weights.dot(x) + self.intercept if self._threshold is None: return margin else: return 1 if margin > self._threshold else 0
Save this model to the given path.
def save(self, sc, path): """ Save this model to the given path. """ java_model = sc._jvm.org.apache.spark.mllib.classification.SVMModel( _py2java(sc, self._coeff), self.intercept) java_model.save(sc._jsc.sc(), path)
Load a model from the given path.
def load(cls, sc, path): """ Load a model from the given path. """ java_model = sc._jvm.org.apache.spark.mllib.classification.SVMModel.load( sc._jsc.sc(), path) weights = _java2py(sc, java_model.weights()) intercept = java_model.intercept() threshold = java_model.getThreshold().get() model = SVMModel(weights, intercept) model.setThreshold(threshold) return model
Train a Naive Bayes model given an RDD of ( label features ) vectors.
def train(cls, data, lambda_=1.0): """ Train a Naive Bayes model given an RDD of (label, features) vectors. This is the Multinomial NB (U{http://tinyurl.com/lsdw6p}) which can handle all kinds of discrete data. For example, by converting documents into TF-IDF vectors, it can be used for document classification. By making every vector a 0-1 vector, it can also be used as Bernoulli NB (U{http://tinyurl.com/p7c96j6}). The input feature values must be nonnegative. :param data: RDD of LabeledPoint. :param lambda_: The smoothing parameter. (default: 1.0) """ first = data.first() if not isinstance(first, LabeledPoint): raise ValueError("`data` should be an RDD of LabeledPoint") labels, pi, theta = callMLlibFunc("trainNaiveBayesModel", data, lambda_) return NaiveBayesModel(labels.toArray(), pi.toArray(), numpy.array(theta))
Push item onto heap maintaining the heap invariant.
def heappush(heap, item): """Push item onto heap, maintaining the heap invariant.""" heap.append(item) _siftdown(heap, 0, len(heap)-1)
Pop the smallest item off the heap maintaining the heap invariant.
def heappop(heap): """Pop the smallest item off the heap, maintaining the heap invariant.""" lastelt = heap.pop() # raises appropriate IndexError if heap is empty if heap: returnitem = heap[0] heap[0] = lastelt _siftup(heap, 0) return returnitem return lastelt
Pop and return the current smallest value and add the new item.
def heapreplace(heap, item): """Pop and return the current smallest value, and add the new item. This is more efficient than heappop() followed by heappush(), and can be more appropriate when using a fixed-size heap. Note that the value returned may be larger than item! That constrains reasonable uses of this routine unless written as part of a conditional replacement: if item > heap[0]: item = heapreplace(heap, item) """ returnitem = heap[0] # raises appropriate IndexError if heap is empty heap[0] = item _siftup(heap, 0) return returnitem
Fast version of a heappush followed by a heappop.
def heappushpop(heap, item): """Fast version of a heappush followed by a heappop.""" if heap and heap[0] < item: item, heap[0] = heap[0], item _siftup(heap, 0) return item
Transform list into a heap in - place in O ( len ( x )) time.
def heapify(x): """Transform list into a heap, in-place, in O(len(x)) time.""" n = len(x) # Transform bottom-up. The largest index there's any point to looking at # is the largest with a child index in-range, so must have 2*i + 1 < n, # or i < (n-1)/2. If n is even = 2*j, this is (2*j-1)/2 = j-1/2 so # j-1 is the largest, which is n//2 - 1. If n is odd = 2*j+1, this is # (2*j+1-1)/2 = j so j-1 is the largest, and that's again n//2-1. for i in reversed(range(n//2)): _siftup(x, i)
Maxheap version of a heappop.
def _heappop_max(heap): """Maxheap version of a heappop.""" lastelt = heap.pop() # raises appropriate IndexError if heap is empty if heap: returnitem = heap[0] heap[0] = lastelt _siftup_max(heap, 0) return returnitem return lastelt
Maxheap version of a heappop followed by a heappush.
def _heapreplace_max(heap, item): """Maxheap version of a heappop followed by a heappush.""" returnitem = heap[0] # raises appropriate IndexError if heap is empty heap[0] = item _siftup_max(heap, 0) return returnitem
Transform list into a maxheap in - place in O ( len ( x )) time.
def _heapify_max(x): """Transform list into a maxheap, in-place, in O(len(x)) time.""" n = len(x) for i in reversed(range(n//2)): _siftup_max(x, i)
Maxheap variant of _siftdown
def _siftdown_max(heap, startpos, pos): 'Maxheap variant of _siftdown' newitem = heap[pos] # Follow the path to the root, moving parents down until finding a place # newitem fits. while pos > startpos: parentpos = (pos - 1) >> 1 parent = heap[parentpos] if parent < newitem: heap[pos] = parent pos = parentpos continue break heap[pos] = newitem
Maxheap variant of _siftup
def _siftup_max(heap, pos): 'Maxheap variant of _siftup' endpos = len(heap) startpos = pos newitem = heap[pos] # Bubble up the larger child until hitting a leaf. childpos = 2*pos + 1 # leftmost child position while childpos < endpos: # Set childpos to index of larger child. rightpos = childpos + 1 if rightpos < endpos and not heap[rightpos] < heap[childpos]: childpos = rightpos # Move the larger child up. heap[pos] = heap[childpos] pos = childpos childpos = 2*pos + 1 # The leaf at pos is empty now. Put newitem there, and bubble it up # to its final resting place (by sifting its parents down). heap[pos] = newitem _siftdown_max(heap, startpos, pos)
Merge multiple sorted inputs into a single sorted output.
def merge(iterables, key=None, reverse=False): '''Merge multiple sorted inputs into a single sorted output. Similar to sorted(itertools.chain(*iterables)) but returns a generator, does not pull the data into memory all at once, and assumes that each of the input streams is already sorted (smallest to largest). >>> list(merge([1,3,5,7], [0,2,4,8], [5,10,15,20], [], [25])) [0, 1, 2, 3, 4, 5, 5, 7, 8, 10, 15, 20, 25] If *key* is not None, applies a key function to each element to determine its sort order. >>> list(merge(['dog', 'horse'], ['cat', 'fish', 'kangaroo'], key=len)) ['dog', 'cat', 'fish', 'horse', 'kangaroo'] ''' h = [] h_append = h.append if reverse: _heapify = _heapify_max _heappop = _heappop_max _heapreplace = _heapreplace_max direction = -1 else: _heapify = heapify _heappop = heappop _heapreplace = heapreplace direction = 1 if key is None: for order, it in enumerate(map(iter, iterables)): try: h_append([next(it), order * direction, it]) except StopIteration: pass _heapify(h) while len(h) > 1: try: while True: value, order, it = s = h[0] yield value s[0] = next(it) # raises StopIteration when exhausted _heapreplace(h, s) # restore heap condition except StopIteration: _heappop(h) # remove empty iterator if h: # fast case when only a single iterator remains value, order, it = h[0] yield value for value in it: yield value return for order, it in enumerate(map(iter, iterables)): try: value = next(it) h_append([key(value), order * direction, value, it]) except StopIteration: pass _heapify(h) while len(h) > 1: try: while True: key_value, order, value, it = s = h[0] yield value value = next(it) s[0] = key(value) s[2] = value _heapreplace(h, s) except StopIteration: _heappop(h) if h: key_value, order, value, it = h[0] yield value for value in it: yield value
Find the n smallest elements in a dataset.
def nsmallest(n, iterable, key=None): """Find the n smallest elements in a dataset. Equivalent to: sorted(iterable, key=key)[:n] """ # Short-cut for n==1 is to use min() if n == 1: it = iter(iterable) sentinel = object() if key is None: result = min(it, default=sentinel) else: result = min(it, default=sentinel, key=key) return [] if result is sentinel else [result] # When n>=size, it's faster to use sorted() try: size = len(iterable) except (TypeError, AttributeError): pass else: if n >= size: return sorted(iterable, key=key)[:n] # When key is none, use simpler decoration if key is None: it = iter(iterable) # put the range(n) first so that zip() doesn't # consume one too many elements from the iterator result = [(elem, i) for i, elem in zip(range(n), it)] if not result: return result _heapify_max(result) top = result[0][0] order = n _heapreplace = _heapreplace_max for elem in it: if elem < top: _heapreplace(result, (elem, order)) top = result[0][0] order += 1 result.sort() return [r[0] for r in result] # General case, slowest method it = iter(iterable) result = [(key(elem), i, elem) for i, elem in zip(range(n), it)] if not result: return result _heapify_max(result) top = result[0][0] order = n _heapreplace = _heapreplace_max for elem in it: k = key(elem) if k < top: _heapreplace(result, (k, order, elem)) top = result[0][0] order += 1 result.sort() return [r[2] for r in result]
Find the n largest elements in a dataset.
def nlargest(n, iterable, key=None): """Find the n largest elements in a dataset. Equivalent to: sorted(iterable, key=key, reverse=True)[:n] """ # Short-cut for n==1 is to use max() if n == 1: it = iter(iterable) sentinel = object() if key is None: result = max(it, default=sentinel) else: result = max(it, default=sentinel, key=key) return [] if result is sentinel else [result] # When n>=size, it's faster to use sorted() try: size = len(iterable) except (TypeError, AttributeError): pass else: if n >= size: return sorted(iterable, key=key, reverse=True)[:n] # When key is none, use simpler decoration if key is None: it = iter(iterable) result = [(elem, i) for i, elem in zip(range(0, -n, -1), it)] if not result: return result heapify(result) top = result[0][0] order = -n _heapreplace = heapreplace for elem in it: if top < elem: _heapreplace(result, (elem, order)) top = result[0][0] order -= 1 result.sort(reverse=True) return [r[0] for r in result] # General case, slowest method it = iter(iterable) result = [(key(elem), i, elem) for i, elem in zip(range(0, -n, -1), it)] if not result: return result heapify(result) top = result[0][0] order = -n _heapreplace = heapreplace for elem in it: k = key(elem) if top < k: _heapreplace(result, (k, order, elem)) top = result[0][0] order -= 1 result.sort(reverse=True) return [r[2] for r in result]
Compute the correlation matrix with specified method using dataset.
def corr(dataset, column, method="pearson"): """ Compute the correlation matrix with specified method using dataset. :param dataset: A Dataset or a DataFrame. :param column: The name of the column of vectors for which the correlation coefficient needs to be computed. This must be a column of the dataset, and it must contain Vector objects. :param method: String specifying the method to use for computing correlation. Supported: `pearson` (default), `spearman`. :return: A DataFrame that contains the correlation matrix of the column of vectors. This DataFrame contains a single row and a single column of name '$METHODNAME($COLUMN)'. >>> from pyspark.ml.linalg import Vectors >>> from pyspark.ml.stat import Correlation >>> dataset = [[Vectors.dense([1, 0, 0, -2])], ... [Vectors.dense([4, 5, 0, 3])], ... [Vectors.dense([6, 7, 0, 8])], ... [Vectors.dense([9, 0, 0, 1])]] >>> dataset = spark.createDataFrame(dataset, ['features']) >>> pearsonCorr = Correlation.corr(dataset, 'features', 'pearson').collect()[0][0] >>> print(str(pearsonCorr).replace('nan', 'NaN')) DenseMatrix([[ 1. , 0.0556..., NaN, 0.4004...], [ 0.0556..., 1. , NaN, 0.9135...], [ NaN, NaN, 1. , NaN], [ 0.4004..., 0.9135..., NaN, 1. ]]) >>> spearmanCorr = Correlation.corr(dataset, 'features', method='spearman').collect()[0][0] >>> print(str(spearmanCorr).replace('nan', 'NaN')) DenseMatrix([[ 1. , 0.1054..., NaN, 0.4 ], [ 0.1054..., 1. , NaN, 0.9486... ], [ NaN, NaN, 1. , NaN], [ 0.4 , 0.9486... , NaN, 1. ]]) """ sc = SparkContext._active_spark_context javaCorrObj = _jvm().org.apache.spark.ml.stat.Correlation args = [_py2java(sc, arg) for arg in (dataset, column, method)] return _java2py(sc, javaCorrObj.corr(*args))
Given a list of metrics provides a builder that it turns computes metrics from a column.
def metrics(*metrics): """ Given a list of metrics, provides a builder that it turns computes metrics from a column. See the documentation of [[Summarizer]] for an example. The following metrics are accepted (case sensitive): - mean: a vector that contains the coefficient-wise mean. - variance: a vector tha contains the coefficient-wise variance. - count: the count of all vectors seen. - numNonzeros: a vector with the number of non-zeros for each coefficients - max: the maximum for each coefficient. - min: the minimum for each coefficient. - normL2: the Euclidean norm for each coefficient. - normL1: the L1 norm of each coefficient (sum of the absolute values). :param metrics: metrics that can be provided. :return: an object of :py:class:`pyspark.ml.stat.SummaryBuilder` Note: Currently, the performance of this interface is about 2x~3x slower then using the RDD interface. """ sc = SparkContext._active_spark_context js = JavaWrapper._new_java_obj("org.apache.spark.ml.stat.Summarizer.metrics", _to_seq(sc, metrics)) return SummaryBuilder(js)
Returns an aggregate object that contains the summary of the column with the requested metrics.
def summary(self, featuresCol, weightCol=None): """ Returns an aggregate object that contains the summary of the column with the requested metrics. :param featuresCol: a column that contains features Vector object. :param weightCol: a column that contains weight value. Default weight is 1.0. :return: an aggregate column that contains the statistics. The exact content of this structure is determined during the creation of the builder. """ featuresCol, weightCol = Summarizer._check_param(featuresCol, weightCol) return Column(self._java_obj.summary(featuresCol._jc, weightCol._jc))
Compute the correlation ( matrix ) for the input RDD ( s ) using the specified method. Methods currently supported: I { pearson ( default ) spearman }.
def corr(x, y=None, method=None): """ Compute the correlation (matrix) for the input RDD(s) using the specified method. Methods currently supported: I{pearson (default), spearman}. If a single RDD of Vectors is passed in, a correlation matrix comparing the columns in the input RDD is returned. Use C{method=} to specify the method to be used for single RDD inout. If two RDDs of floats are passed in, a single float is returned. :param x: an RDD of vector for which the correlation matrix is to be computed, or an RDD of float of the same cardinality as y when y is specified. :param y: an RDD of float of the same cardinality as x. :param method: String specifying the method to use for computing correlation. Supported: `pearson` (default), `spearman` :return: Correlation matrix comparing columns in x. >>> x = sc.parallelize([1.0, 0.0, -2.0], 2) >>> y = sc.parallelize([4.0, 5.0, 3.0], 2) >>> zeros = sc.parallelize([0.0, 0.0, 0.0], 2) >>> abs(Statistics.corr(x, y) - 0.6546537) < 1e-7 True >>> Statistics.corr(x, y) == Statistics.corr(x, y, "pearson") True >>> Statistics.corr(x, y, "spearman") 0.5 >>> from math import isnan >>> isnan(Statistics.corr(x, zeros)) True >>> from pyspark.mllib.linalg import Vectors >>> rdd = sc.parallelize([Vectors.dense([1, 0, 0, -2]), Vectors.dense([4, 5, 0, 3]), ... Vectors.dense([6, 7, 0, 8]), Vectors.dense([9, 0, 0, 1])]) >>> pearsonCorr = Statistics.corr(rdd) >>> print(str(pearsonCorr).replace('nan', 'NaN')) [[ 1. 0.05564149 NaN 0.40047142] [ 0.05564149 1. NaN 0.91359586] [ NaN NaN 1. NaN] [ 0.40047142 0.91359586 NaN 1. ]] >>> spearmanCorr = Statistics.corr(rdd, method="spearman") >>> print(str(spearmanCorr).replace('nan', 'NaN')) [[ 1. 0.10540926 NaN 0.4 ] [ 0.10540926 1. NaN 0.9486833 ] [ NaN NaN 1. NaN] [ 0.4 0.9486833 NaN 1. ]] >>> try: ... Statistics.corr(rdd, "spearman") ... print("Method name as second argument without 'method=' shouldn't be allowed.") ... except TypeError: ... pass """ # Check inputs to determine whether a single value or a matrix is needed for output. # Since it's legal for users to use the method name as the second argument, we need to # check if y is used to specify the method name instead. if type(y) == str: raise TypeError("Use 'method=' to specify method name.") if not y: return callMLlibFunc("corr", x.map(_convert_to_vector), method).toArray() else: return callMLlibFunc("corr", x.map(float), y.map(float), method)
Creates a list of callables which can be called from different threads to fit and evaluate an estimator in parallel. Each callable returns an ( index metric ) pair.
def _parallelFitTasks(est, train, eva, validation, epm, collectSubModel): """ Creates a list of callables which can be called from different threads to fit and evaluate an estimator in parallel. Each callable returns an `(index, metric)` pair. :param est: Estimator, the estimator to be fit. :param train: DataFrame, training data set, used for fitting. :param eva: Evaluator, used to compute `metric` :param validation: DataFrame, validation data set, used for evaluation. :param epm: Sequence of ParamMap, params maps to be used during fitting & evaluation. :param collectSubModel: Whether to collect sub model. :return: (int, float, subModel), an index into `epm` and the associated metric value. """ modelIter = est.fitMultiple(train, epm) def singleTask(): index, model = next(modelIter) metric = eva.evaluate(model.transform(validation, epm[index])) return index, metric, model if collectSubModel else None return [singleTask] * len(epm)
Sets the given parameters in this grid to fixed values. Accepts either a parameter dictionary or a list of ( parameter value ) pairs.
def baseOn(self, *args): """ Sets the given parameters in this grid to fixed values. Accepts either a parameter dictionary or a list of (parameter, value) pairs. """ if isinstance(args[0], dict): self.baseOn(*args[0].items()) else: for (param, value) in args: self.addGrid(param, [value]) return self
Builds and returns all combinations of parameters specified by the param grid.
def build(self): """ Builds and returns all combinations of parameters specified by the param grid. """ keys = self._param_grid.keys() grid_values = self._param_grid.values() def to_key_value_pairs(keys, values): return [(key, key.typeConverter(value)) for key, value in zip(keys, values)] return [dict(to_key_value_pairs(keys, prod)) for prod in itertools.product(*grid_values)]