body
stringlengths
26
98.2k
body_hash
int64
-9,222,864,604,528,158,000
9,221,803,474B
docstring
stringlengths
1
16.8k
path
stringlengths
5
230
name
stringlengths
1
96
repository_name
stringlengths
7
89
lang
stringclasses
1 value
body_without_docstring
stringlengths
20
98.2k
@property def A(self): 'scipy.sparse.csr_matrix: csr_matrix to be exponentiated.' return self._A
5,594,488,028,696,724,000
scipy.sparse.csr_matrix: csr_matrix to be exponentiated.
quspin/tools/expm_multiply_parallel_core/expm_multiply_parallel_core.py
A
markusschmitt/QuSpin
python
@property def A(self): return self._A
def set_a(self, a, dtype=None): 'Sets the value of the property `a`.\n\n Parameters\n ----------\n a : scalar\n new value of `a`.\n dtype : numpy.dtype, optional\n dtype specified for this operator. Default is: result_type(A.dtype,min_scalar_type(a),float64)\n\n ...
1,647,312,176,468,744,400
Sets the value of the property `a`. Parameters ---------- a : scalar new value of `a`. dtype : numpy.dtype, optional dtype specified for this operator. Default is: result_type(A.dtype,min_scalar_type(a),float64) Examples -------- .. literalinclude:: ../../doc_examples/expm_multiply_parallel-example.py :l...
quspin/tools/expm_multiply_parallel_core/expm_multiply_parallel_core.py
set_a
markusschmitt/QuSpin
python
def set_a(self, a, dtype=None): 'Sets the value of the property `a`.\n\n Parameters\n ----------\n a : scalar\n new value of `a`.\n dtype : numpy.dtype, optional\n dtype specified for this operator. Default is: result_type(A.dtype,min_scalar_type(a),float64)\n\n ...
def dot(self, v, work_array=None, overwrite_v=False): 'Calculates the action of :math:`\\mathrm{e}^{aA}` on a vector :math:`v`. \n\n Examples\n --------\n\n .. literalinclude:: ../../doc_examples/expm_multiply_parallel-example.py\n :linenos:\n :language: python\n ...
3,259,529,137,262,955,000
Calculates the action of :math:`\mathrm{e}^{aA}` on a vector :math:`v`. Examples -------- .. literalinclude:: ../../doc_examples/expm_multiply_parallel-example.py :linenos: :language: python :lines: 37- Parameters ----------- v : contiguous numpy.ndarray array to apply :math:`\mathrm{e}^{aA}` on. wo...
quspin/tools/expm_multiply_parallel_core/expm_multiply_parallel_core.py
dot
markusschmitt/QuSpin
python
def dot(self, v, work_array=None, overwrite_v=False): 'Calculates the action of :math:`\\mathrm{e}^{aA}` on a vector :math:`v`. \n\n Examples\n --------\n\n .. literalinclude:: ../../doc_examples/expm_multiply_parallel-example.py\n :linenos:\n :language: python\n ...
def __init__(self, A, A_1_norm, a, mu, dtype, ell=2): '\n Provide the operator and some norm-related information.\n\n Parameters\n -----------\n A : linear operator\n The operator of interest.\n A_1_norm : float\n The exact 1-norm of A.\n ell : int, op...
-3,342,848,802,935,478,300
Provide the operator and some norm-related information. Parameters ----------- A : linear operator The operator of interest. A_1_norm : float The exact 1-norm of A. ell : int, optional A technical parameter controlling norm estimation quality.
quspin/tools/expm_multiply_parallel_core/expm_multiply_parallel_core.py
__init__
markusschmitt/QuSpin
python
def __init__(self, A, A_1_norm, a, mu, dtype, ell=2): '\n Provide the operator and some norm-related information.\n\n Parameters\n -----------\n A : linear operator\n The operator of interest.\n A_1_norm : float\n The exact 1-norm of A.\n ell : int, op...
def onenorm(self): '\n Compute the exact 1-norm.\n ' return (_np.abs(self._a) * self._A_1_norm)
3,180,204,854,046,551,600
Compute the exact 1-norm.
quspin/tools/expm_multiply_parallel_core/expm_multiply_parallel_core.py
onenorm
markusschmitt/QuSpin
python
def onenorm(self): '\n \n ' return (_np.abs(self._a) * self._A_1_norm)
def d(self, p): '\n Lazily estimate d_p(A) ~= || A^p ||^(1/p) where ||.|| is the 1-norm.\n ' if (p not in self._d): matvec = (lambda v: (self._a * (self._A.dot(v) - (self._mu * v)))) rmatvec = (lambda v: (_np.conj(self._a) * (self._A.H.dot(v) - (_np.conj(self._mu) * v)))) L...
3,573,397,589,313,746,400
Lazily estimate d_p(A) ~= || A^p ||^(1/p) where ||.|| is the 1-norm.
quspin/tools/expm_multiply_parallel_core/expm_multiply_parallel_core.py
d
markusschmitt/QuSpin
python
def d(self, p): '\n \n ' if (p not in self._d): matvec = (lambda v: (self._a * (self._A.dot(v) - (self._mu * v)))) rmatvec = (lambda v: (_np.conj(self._a) * (self._A.H.dot(v) - (_np.conj(self._mu) * v)))) LO = LinearOperator(self._A.shape, dtype=self._dtype, matvec=matvec, ...
def alpha(self, p): '\n Lazily compute max(d(p), d(p+1)).\n ' return max(self.d(p), self.d((p + 1)))
-7,602,815,063,169,623,000
Lazily compute max(d(p), d(p+1)).
quspin/tools/expm_multiply_parallel_core/expm_multiply_parallel_core.py
alpha
markusschmitt/QuSpin
python
def alpha(self, p): '\n \n ' return max(self.d(p), self.d((p + 1)))
def _reduce_spark_multi(sdf: SparkDataFrame, aggs: List[Column]) -> Any: '\n Performs a reduction on a spark DataFrame, the functions being known sql aggregate functions.\n ' assert isinstance(sdf, SparkDataFrame) sdf0 = sdf.agg(*aggs) lst = sdf0.limit(2).toPandas() assert (len(lst) == 1), (sd...
7,922,910,722,984,314,000
Performs a reduction on a spark DataFrame, the functions being known sql aggregate functions.
python/pyspark/pandas/frame.py
_reduce_spark_multi
Flyangz/spark
python
def _reduce_spark_multi(sdf: SparkDataFrame, aggs: List[Column]) -> Any: '\n \n ' assert isinstance(sdf, SparkDataFrame) sdf0 = sdf.agg(*aggs) lst = sdf0.limit(2).toPandas() assert (len(lst) == 1), (sdf, lst) row = lst.iloc[0] lst2 = list(row) assert (len(lst2) == len(aggs)), (row,...
@property def _pssers(self) -> Dict[(Label, 'Series')]: 'Return a dict of column label -> Series which anchors `self`.' from pyspark.pandas.series import Series if (not hasattr(self, '_psseries')): object.__setattr__(self, '_psseries', {label: Series(data=self, index=label) for label in self._intern...
1,271,166,069,773,295,400
Return a dict of column label -> Series which anchors `self`.
python/pyspark/pandas/frame.py
_pssers
Flyangz/spark
python
@property def _pssers(self) -> Dict[(Label, 'Series')]: from pyspark.pandas.series import Series if (not hasattr(self, '_psseries')): object.__setattr__(self, '_psseries', {label: Series(data=self, index=label) for label in self._internal.column_labels}) else: psseries = cast(Dict[(Labe...
def _update_internal_frame(self, internal: InternalFrame, requires_same_anchor: bool=True) -> None: '\n Update InternalFrame with the given one.\n\n If the column_label is changed or the new InternalFrame is not the same `anchor`,\n disconnect the link to the Series and create a new one.\n\n ...
694,187,229,648,033,900
Update InternalFrame with the given one. If the column_label is changed or the new InternalFrame is not the same `anchor`, disconnect the link to the Series and create a new one. If `requires_same_anchor` is `False`, checking whether or not the same anchor is ignored and force to update the InternalFrame, e.g., repla...
python/pyspark/pandas/frame.py
_update_internal_frame
Flyangz/spark
python
def _update_internal_frame(self, internal: InternalFrame, requires_same_anchor: bool=True) -> None: '\n Update InternalFrame with the given one.\n\n If the column_label is changed or the new InternalFrame is not the same `anchor`,\n disconnect the link to the Series and create a new one.\n\n ...
@property def ndim(self) -> int: "\n Return an int representing the number of array dimensions.\n\n return 2 for DataFrame.\n\n Examples\n --------\n\n >>> df = ps.DataFrame([[1, 2], [4, 5], [7, 8]],\n ... index=['cobra', 'viper', None],\n ... ...
-5,889,926,738,711,093,000
Return an int representing the number of array dimensions. return 2 for DataFrame. Examples -------- >>> df = ps.DataFrame([[1, 2], [4, 5], [7, 8]], ... index=['cobra', 'viper', None], ... columns=['max_speed', 'shield']) >>> df max_speed shield cobra 1 2 vi...
python/pyspark/pandas/frame.py
ndim
Flyangz/spark
python
@property def ndim(self) -> int: "\n Return an int representing the number of array dimensions.\n\n return 2 for DataFrame.\n\n Examples\n --------\n\n >>> df = ps.DataFrame([[1, 2], [4, 5], [7, 8]],\n ... index=['cobra', 'viper', None],\n ... ...
@property def axes(self) -> List: "\n Return a list representing the axes of the DataFrame.\n\n It has the row axis labels and column axis labels as the only members.\n They are returned in that order.\n\n Examples\n --------\n\n >>> df = ps.DataFrame({'col1': [1, 2], 'col2...
3,087,804,529,254,136,300
Return a list representing the axes of the DataFrame. It has the row axis labels and column axis labels as the only members. They are returned in that order. Examples -------- >>> df = ps.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df.axes [Int64Index([0, 1], dtype='int64'), Index(['col1', 'col2'], dtype='object...
python/pyspark/pandas/frame.py
axes
Flyangz/spark
python
@property def axes(self) -> List: "\n Return a list representing the axes of the DataFrame.\n\n It has the row axis labels and column axis labels as the only members.\n They are returned in that order.\n\n Examples\n --------\n\n >>> df = ps.DataFrame({'col1': [1, 2], 'col2...
def _reduce_for_stat_function(self, sfun: Callable[(['Series'], Column)], name: str, axis: Optional[Axis]=None, numeric_only: bool=True, **kwargs: Any) -> 'Series': "\n Applies sfun to each column and returns a pd.Series where the number of rows equal the\n number of columns.\n\n Parameters\n ...
-5,742,163,017,799,007,000
Applies sfun to each column and returns a pd.Series where the number of rows equal the number of columns. Parameters ---------- sfun : either an 1-arg function that takes a Column and returns a Column, or a 2-arg function that takes a Column and its DataType and returns a Column. axis: used only for sanity che...
python/pyspark/pandas/frame.py
_reduce_for_stat_function
Flyangz/spark
python
def _reduce_for_stat_function(self, sfun: Callable[(['Series'], Column)], name: str, axis: Optional[Axis]=None, numeric_only: bool=True, **kwargs: Any) -> 'Series': "\n Applies sfun to each column and returns a pd.Series where the number of rows equal the\n number of columns.\n\n Parameters\n ...
def _psser_for(self, label: Label) -> 'Series': "\n Create Series with a proper column label.\n\n The given label must be verified to exist in `InternalFrame.column_labels`.\n\n For example, in some method, self is like:\n\n >>> self = ps.range(3)\n\n `self._psser_for(label)` can ...
1,867,862,387,058,158,300
Create Series with a proper column label. The given label must be verified to exist in `InternalFrame.column_labels`. For example, in some method, self is like: >>> self = ps.range(3) `self._psser_for(label)` can be used with `InternalFrame.column_labels`: >>> self._psser_for(self._internal.column_labels[0]) 0 ...
python/pyspark/pandas/frame.py
_psser_for
Flyangz/spark
python
def _psser_for(self, label: Label) -> 'Series': "\n Create Series with a proper column label.\n\n The given label must be verified to exist in `InternalFrame.column_labels`.\n\n For example, in some method, self is like:\n\n >>> self = ps.range(3)\n\n `self._psser_for(label)` can ...
def eq(self, other: Any) -> 'DataFrame': "\n Compare if the current value is equal to the other.\n\n >>> df = ps.DataFrame({'a': [1, 2, 3, 4],\n ... 'b': [1, np.nan, 1, np.nan]},\n ... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])\n\n >>> df....
-2,790,439,695,348,336,600
Compare if the current value is equal to the other. >>> df = ps.DataFrame({'a': [1, 2, 3, 4], ... 'b': [1, np.nan, 1, np.nan]}, ... index=['a', 'b', 'c', 'd'], columns=['a', 'b']) >>> df.eq(1) a b a True True b False False c False True d False False
python/pyspark/pandas/frame.py
eq
Flyangz/spark
python
def eq(self, other: Any) -> 'DataFrame': "\n Compare if the current value is equal to the other.\n\n >>> df = ps.DataFrame({'a': [1, 2, 3, 4],\n ... 'b': [1, np.nan, 1, np.nan]},\n ... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])\n\n >>> df....
def gt(self, other: Any) -> 'DataFrame': "\n Compare if the current value is greater than the other.\n\n >>> df = ps.DataFrame({'a': [1, 2, 3, 4],\n ... 'b': [1, np.nan, 1, np.nan]},\n ... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])\n\n >>>...
-7,195,802,182,166,305,000
Compare if the current value is greater than the other. >>> df = ps.DataFrame({'a': [1, 2, 3, 4], ... 'b': [1, np.nan, 1, np.nan]}, ... index=['a', 'b', 'c', 'd'], columns=['a', 'b']) >>> df.gt(2) a b a False False b False False c True False d True False
python/pyspark/pandas/frame.py
gt
Flyangz/spark
python
def gt(self, other: Any) -> 'DataFrame': "\n Compare if the current value is greater than the other.\n\n >>> df = ps.DataFrame({'a': [1, 2, 3, 4],\n ... 'b': [1, np.nan, 1, np.nan]},\n ... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])\n\n >>>...
def ge(self, other: Any) -> 'DataFrame': "\n Compare if the current value is greater than or equal to the other.\n\n >>> df = ps.DataFrame({'a': [1, 2, 3, 4],\n ... 'b': [1, np.nan, 1, np.nan]},\n ... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])\n\...
-5,542,816,325,634,264,000
Compare if the current value is greater than or equal to the other. >>> df = ps.DataFrame({'a': [1, 2, 3, 4], ... 'b': [1, np.nan, 1, np.nan]}, ... index=['a', 'b', 'c', 'd'], columns=['a', 'b']) >>> df.ge(1) a b a True True b True False c True True d True Fal...
python/pyspark/pandas/frame.py
ge
Flyangz/spark
python
def ge(self, other: Any) -> 'DataFrame': "\n Compare if the current value is greater than or equal to the other.\n\n >>> df = ps.DataFrame({'a': [1, 2, 3, 4],\n ... 'b': [1, np.nan, 1, np.nan]},\n ... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])\n\...
def lt(self, other: Any) -> 'DataFrame': "\n Compare if the current value is less than the other.\n\n >>> df = ps.DataFrame({'a': [1, 2, 3, 4],\n ... 'b': [1, np.nan, 1, np.nan]},\n ... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])\n\n >>> df...
-6,288,953,088,293,913,000
Compare if the current value is less than the other. >>> df = ps.DataFrame({'a': [1, 2, 3, 4], ... 'b': [1, np.nan, 1, np.nan]}, ... index=['a', 'b', 'c', 'd'], columns=['a', 'b']) >>> df.lt(1) a b a False False b False False c False False d False False
python/pyspark/pandas/frame.py
lt
Flyangz/spark
python
def lt(self, other: Any) -> 'DataFrame': "\n Compare if the current value is less than the other.\n\n >>> df = ps.DataFrame({'a': [1, 2, 3, 4],\n ... 'b': [1, np.nan, 1, np.nan]},\n ... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])\n\n >>> df...
def le(self, other: Any) -> 'DataFrame': "\n Compare if the current value is less than or equal to the other.\n\n >>> df = ps.DataFrame({'a': [1, 2, 3, 4],\n ... 'b': [1, np.nan, 1, np.nan]},\n ... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])\n\n ...
-6,213,523,783,659,392,000
Compare if the current value is less than or equal to the other. >>> df = ps.DataFrame({'a': [1, 2, 3, 4], ... 'b': [1, np.nan, 1, np.nan]}, ... index=['a', 'b', 'c', 'd'], columns=['a', 'b']) >>> df.le(2) a b a True True b True False c False True d False F...
python/pyspark/pandas/frame.py
le
Flyangz/spark
python
def le(self, other: Any) -> 'DataFrame': "\n Compare if the current value is less than or equal to the other.\n\n >>> df = ps.DataFrame({'a': [1, 2, 3, 4],\n ... 'b': [1, np.nan, 1, np.nan]},\n ... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])\n\n ...
def ne(self, other: Any) -> 'DataFrame': "\n Compare if the current value is not equal to the other.\n\n >>> df = ps.DataFrame({'a': [1, 2, 3, 4],\n ... 'b': [1, np.nan, 1, np.nan]},\n ... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])\n\n >>>...
8,309,935,139,510,682,000
Compare if the current value is not equal to the other. >>> df = ps.DataFrame({'a': [1, 2, 3, 4], ... 'b': [1, np.nan, 1, np.nan]}, ... index=['a', 'b', 'c', 'd'], columns=['a', 'b']) >>> df.ne(1) a b a False False b True True c True False d True True
python/pyspark/pandas/frame.py
ne
Flyangz/spark
python
def ne(self, other: Any) -> 'DataFrame': "\n Compare if the current value is not equal to the other.\n\n >>> df = ps.DataFrame({'a': [1, 2, 3, 4],\n ... 'b': [1, np.nan, 1, np.nan]},\n ... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])\n\n >>>...
def applymap(self, func: Callable[([Any], Any)]) -> 'DataFrame': '\n Apply a function to a Dataframe elementwise.\n\n This method applies a function that accepts and returns a scalar\n to every element of a DataFrame.\n\n .. note:: this API executes the function once to infer the type wh...
2,872,845,046,706,062,000
Apply a function to a Dataframe elementwise. This method applies a function that accepts and returns a scalar to every element of a DataFrame. .. note:: this API executes the function once to infer the type which is potentially expensive, for instance, when the dataset is created after aggregations or sorti...
python/pyspark/pandas/frame.py
applymap
Flyangz/spark
python
def applymap(self, func: Callable[([Any], Any)]) -> 'DataFrame': '\n Apply a function to a Dataframe elementwise.\n\n This method applies a function that accepts and returns a scalar\n to every element of a DataFrame.\n\n .. note:: this API executes the function once to infer the type wh...
def aggregate(self, func: Union[(List[str], Dict[(Name, List[str])])]) -> 'DataFrame': 'Aggregate using one or more operations over the specified axis.\n\n Parameters\n ----------\n func : dict or a list\n a dict mapping from column name (string) to\n aggregate functions...
-858,337,314,020,279,400
Aggregate using one or more operations over the specified axis. Parameters ---------- func : dict or a list a dict mapping from column name (string) to aggregate functions (list of strings). If a list is given, the aggregation is performed against all columns. Returns ------- DataFrame Notes ----...
python/pyspark/pandas/frame.py
aggregate
Flyangz/spark
python
def aggregate(self, func: Union[(List[str], Dict[(Name, List[str])])]) -> 'DataFrame': 'Aggregate using one or more operations over the specified axis.\n\n Parameters\n ----------\n func : dict or a list\n a dict mapping from column name (string) to\n aggregate functions...
def corr(self, method: str='pearson') -> 'DataFrame': "\n Compute pairwise correlation of columns, excluding NA/null values.\n\n Parameters\n ----------\n method : {'pearson', 'spearman'}\n * pearson : standard correlation coefficient\n * spearman : Spearman rank co...
8,847,163,846,708,294,000
Compute pairwise correlation of columns, excluding NA/null values. Parameters ---------- method : {'pearson', 'spearman'} * pearson : standard correlation coefficient * spearman : Spearman rank correlation Returns ------- y : DataFrame See Also -------- Series.corr Examples -------- >>> df = ps.DataFrame([(...
python/pyspark/pandas/frame.py
corr
Flyangz/spark
python
def corr(self, method: str='pearson') -> 'DataFrame': "\n Compute pairwise correlation of columns, excluding NA/null values.\n\n Parameters\n ----------\n method : {'pearson', 'spearman'}\n * pearson : standard correlation coefficient\n * spearman : Spearman rank co...
def iteritems(self) -> Iterator[Tuple[(Name, 'Series')]]: "\n Iterator over (column name, Series) pairs.\n\n Iterates over the DataFrame columns, returning a tuple with\n the column name and the content as a Series.\n\n Returns\n -------\n label : object\n The co...
5,349,945,611,935,761,000
Iterator over (column name, Series) pairs. Iterates over the DataFrame columns, returning a tuple with the column name and the content as a Series. Returns ------- label : object The column names for the DataFrame being iterated over. content : Series The column entries belonging to each label, as a Series. ...
python/pyspark/pandas/frame.py
iteritems
Flyangz/spark
python
def iteritems(self) -> Iterator[Tuple[(Name, 'Series')]]: "\n Iterator over (column name, Series) pairs.\n\n Iterates over the DataFrame columns, returning a tuple with\n the column name and the content as a Series.\n\n Returns\n -------\n label : object\n The co...
def iterrows(self) -> Iterator[Tuple[(Name, pd.Series)]]: "\n Iterate over DataFrame rows as (index, Series) pairs.\n\n Yields\n ------\n index : label or tuple of label\n The index of the row. A tuple for a `MultiIndex`.\n data : pandas.Series\n The data of ...
1,428,964,907,911,900,200
Iterate over DataFrame rows as (index, Series) pairs. Yields ------ index : label or tuple of label The index of the row. A tuple for a `MultiIndex`. data : pandas.Series The data of the row as a Series. it : generator A generator that iterates over the rows of the frame. Notes ----- 1. Because ``iterro...
python/pyspark/pandas/frame.py
iterrows
Flyangz/spark
python
def iterrows(self) -> Iterator[Tuple[(Name, pd.Series)]]: "\n Iterate over DataFrame rows as (index, Series) pairs.\n\n Yields\n ------\n index : label or tuple of label\n The index of the row. A tuple for a `MultiIndex`.\n data : pandas.Series\n The data of ...
def itertuples(self, index: bool=True, name: Optional[str]='PandasOnSpark') -> Iterator[Tuple]: '\n Iterate over DataFrame rows as namedtuples.\n\n Parameters\n ----------\n index : bool, default True\n If True, return the index as the first element of the tuple.\n name...
-2,867,164,090,168,643,600
Iterate over DataFrame rows as namedtuples. Parameters ---------- index : bool, default True If True, return the index as the first element of the tuple. name : str or None, default "PandasOnSpark" The name of the returned namedtuples or None to return regular tuples. Returns ------- iterator An objec...
python/pyspark/pandas/frame.py
itertuples
Flyangz/spark
python
def itertuples(self, index: bool=True, name: Optional[str]='PandasOnSpark') -> Iterator[Tuple]: '\n Iterate over DataFrame rows as namedtuples.\n\n Parameters\n ----------\n index : bool, default True\n If True, return the index as the first element of the tuple.\n name...
def items(self) -> Iterator[Tuple[(Name, 'Series')]]: 'This is an alias of ``iteritems``.' return self.iteritems()
6,259,565,771,771,294,000
This is an alias of ``iteritems``.
python/pyspark/pandas/frame.py
items
Flyangz/spark
python
def items(self) -> Iterator[Tuple[(Name, 'Series')]]: return self.iteritems()
def to_clipboard(self, excel: bool=True, sep: Optional[str]=None, **kwargs: Any) -> None: "\n Copy object to the system clipboard.\n\n Write a text representation of object to the system clipboard.\n This can be pasted into Excel, for example.\n\n .. note:: This method should only be use...
5,270,269,083,777,499,000
Copy object to the system clipboard. Write a text representation of object to the system clipboard. This can be pasted into Excel, for example. .. note:: This method should only be used if the resulting DataFrame is expected to be small, as all the data is loaded into the driver's memory. Parameters ---------- e...
python/pyspark/pandas/frame.py
to_clipboard
Flyangz/spark
python
def to_clipboard(self, excel: bool=True, sep: Optional[str]=None, **kwargs: Any) -> None: "\n Copy object to the system clipboard.\n\n Write a text representation of object to the system clipboard.\n This can be pasted into Excel, for example.\n\n .. note:: This method should only be use...
def to_html(self, buf: Optional[IO[str]]=None, columns: Optional[Sequence[Name]]=None, col_space: Optional[Union[(str, int, Dict[(Name, Union[(str, int)])])]]=None, header: bool=True, index: bool=True, na_rep: str='NaN', formatters: Optional[Union[(List[Callable[([Any], str)]], Dict[(Name, Callable[([Any], str)])])]]=N...
7,113,171,304,014,801,000
Render a DataFrame as an HTML table. .. note:: This method should only be used if the resulting pandas object is expected to be small, as all the data is loaded into the driver's memory. If the input is large, set max_rows parameter. Parameters ---------- buf : StringIO-like, optional Buffer t...
python/pyspark/pandas/frame.py
to_html
Flyangz/spark
python
def to_html(self, buf: Optional[IO[str]]=None, columns: Optional[Sequence[Name]]=None, col_space: Optional[Union[(str, int, Dict[(Name, Union[(str, int)])])]]=None, header: bool=True, index: bool=True, na_rep: str='NaN', formatters: Optional[Union[(List[Callable[([Any], str)]], Dict[(Name, Callable[([Any], str)])])]]=N...
def to_string(self, buf: Optional[IO[str]]=None, columns: Optional[Sequence[Name]]=None, col_space: Optional[Union[(str, int, Dict[(Name, Union[(str, int)])])]]=None, header: bool=True, index: bool=True, na_rep: str='NaN', formatters: Optional[Union[(List[Callable[([Any], str)]], Dict[(Name, Callable[([Any], str)])])]]...
-5,788,305,338,753,440,000
Render a DataFrame to a console-friendly tabular output. .. note:: This method should only be used if the resulting pandas object is expected to be small, as all the data is loaded into the driver's memory. If the input is large, set max_rows parameter. Parameters ---------- buf : StringIO-like, o...
python/pyspark/pandas/frame.py
to_string
Flyangz/spark
python
def to_string(self, buf: Optional[IO[str]]=None, columns: Optional[Sequence[Name]]=None, col_space: Optional[Union[(str, int, Dict[(Name, Union[(str, int)])])]]=None, header: bool=True, index: bool=True, na_rep: str='NaN', formatters: Optional[Union[(List[Callable[([Any], str)]], Dict[(Name, Callable[([Any], str)])])]]...
def to_dict(self, orient: str='dict', into: Type=dict) -> Union[(List, Mapping)]: "\n Convert the DataFrame to a dictionary.\n\n The type of the key-value pairs can be customized with the parameters\n (see below).\n\n .. note:: This method should only be used if the resulting pandas Data...
1,214,464,030,283,405,000
Convert the DataFrame to a dictionary. The type of the key-value pairs can be customized with the parameters (see below). .. note:: This method should only be used if the resulting pandas DataFrame is expected to be small, as all the data is loaded into the driver's memory. Parameters ---------- orient : str {'d...
python/pyspark/pandas/frame.py
to_dict
Flyangz/spark
python
def to_dict(self, orient: str='dict', into: Type=dict) -> Union[(List, Mapping)]: "\n Convert the DataFrame to a dictionary.\n\n The type of the key-value pairs can be customized with the parameters\n (see below).\n\n .. note:: This method should only be used if the resulting pandas Data...
def to_latex(self, buf: Optional[IO[str]]=None, columns: Optional[List[Name]]=None, col_space: Optional[int]=None, header: bool=True, index: bool=True, na_rep: str='NaN', formatters: Optional[Union[(List[Callable[([Any], str)]], Dict[(Name, Callable[([Any], str)])])]]=None, float_format: Optional[Callable[([float], str...
1,324,847,439,956,079,400
Render an object to a LaTeX tabular environment table. Render an object to a tabular environment table. You can splice this into a LaTeX document. Requires usepackage{booktabs}. .. note:: This method should only be used if the resulting pandas object is expected to be small, as all the data is loaded into t...
python/pyspark/pandas/frame.py
to_latex
Flyangz/spark
python
def to_latex(self, buf: Optional[IO[str]]=None, columns: Optional[List[Name]]=None, col_space: Optional[int]=None, header: bool=True, index: bool=True, na_rep: str='NaN', formatters: Optional[Union[(List[Callable[([Any], str)]], Dict[(Name, Callable[([Any], str)])])]]=None, float_format: Optional[Callable[([float], str...
def transpose(self) -> 'DataFrame': "\n Transpose index and columns.\n\n Reflect the DataFrame over its main diagonal by writing rows as columns\n and vice-versa. The property :attr:`.T` is an accessor to the method\n :meth:`transpose`.\n\n .. note:: This method is based on an exp...
5,282,941,633,021,687,000
Transpose index and columns. Reflect the DataFrame over its main diagonal by writing rows as columns and vice-versa. The property :attr:`.T` is an accessor to the method :meth:`transpose`. .. note:: This method is based on an expensive operation due to the nature of big data. Internally it needs to generate each ...
python/pyspark/pandas/frame.py
transpose
Flyangz/spark
python
def transpose(self) -> 'DataFrame': "\n Transpose index and columns.\n\n Reflect the DataFrame over its main diagonal by writing rows as columns\n and vice-versa. The property :attr:`.T` is an accessor to the method\n :meth:`transpose`.\n\n .. note:: This method is based on an exp...
def apply(self, func: Callable, axis: Axis=0, args: Sequence[Any]=(), **kwds: Any) -> Union[('Series', 'DataFrame', 'Index')]: '\n Apply a function along an axis of the DataFrame.\n\n Objects passed to the function are Series objects whose index is\n either the DataFrame\'s index (``axis=0``) o...
2,361,285,583,190,661,600
Apply a function along an axis of the DataFrame. Objects passed to the function are Series objects whose index is either the DataFrame's index (``axis=0``) or the DataFrame's columns (``axis=1``). See also `Transform and apply a function <https://koalas.readthedocs.io/en/latest/user_guide/transform_apply.html>`_. .....
python/pyspark/pandas/frame.py
apply
Flyangz/spark
python
def apply(self, func: Callable, axis: Axis=0, args: Sequence[Any]=(), **kwds: Any) -> Union[('Series', 'DataFrame', 'Index')]: '\n Apply a function along an axis of the DataFrame.\n\n Objects passed to the function are Series objects whose index is\n either the DataFrame\'s index (``axis=0``) o...
def transform(self, func: Callable[(..., 'Series')], axis: Axis=0, *args: Any, **kwargs: Any) -> 'DataFrame': "\n Call ``func`` on self producing a Series with transformed values\n and that has the same length as its input.\n\n See also `Transform and apply a function\n <https://koalas.r...
1,455,522,935,585,915,100
Call ``func`` on self producing a Series with transformed values and that has the same length as its input. See also `Transform and apply a function <https://koalas.readthedocs.io/en/latest/user_guide/transform_apply.html>`_. .. note:: this API executes the function once to infer the type which is potentially ex...
python/pyspark/pandas/frame.py
transform
Flyangz/spark
python
def transform(self, func: Callable[(..., 'Series')], axis: Axis=0, *args: Any, **kwargs: Any) -> 'DataFrame': "\n Call ``func`` on self producing a Series with transformed values\n and that has the same length as its input.\n\n See also `Transform and apply a function\n <https://koalas.r...
def pop(self, item: Name) -> 'DataFrame': "\n Return item and drop from frame. Raise KeyError if not found.\n\n Parameters\n ----------\n item : str\n Label of column to be popped.\n\n Returns\n -------\n Series\n\n Examples\n --------\n ...
-7,348,325,957,458,014,000
Return item and drop from frame. Raise KeyError if not found. Parameters ---------- item : str Label of column to be popped. Returns ------- Series Examples -------- >>> df = ps.DataFrame([('falcon', 'bird', 389.0), ... ('parrot', 'bird', 24.0), ... ('lion', 'mammal', 80.5),...
python/pyspark/pandas/frame.py
pop
Flyangz/spark
python
def pop(self, item: Name) -> 'DataFrame': "\n Return item and drop from frame. Raise KeyError if not found.\n\n Parameters\n ----------\n item : str\n Label of column to be popped.\n\n Returns\n -------\n Series\n\n Examples\n --------\n ...
def xs(self, key: Name, axis: Axis=0, level: Optional[int]=None) -> DataFrameOrSeries: "\n Return cross-section from the DataFrame.\n\n This method takes a `key` argument to select data at a particular\n level of a MultiIndex.\n\n Parameters\n ----------\n key : label or tu...
8,990,384,505,172,742,000
Return cross-section from the DataFrame. This method takes a `key` argument to select data at a particular level of a MultiIndex. Parameters ---------- key : label or tuple of label Label contained in the index, or partially in a MultiIndex. axis : 0 or 'index', default 0 Axis to retrieve cross-section on. ...
python/pyspark/pandas/frame.py
xs
Flyangz/spark
python
def xs(self, key: Name, axis: Axis=0, level: Optional[int]=None) -> DataFrameOrSeries: "\n Return cross-section from the DataFrame.\n\n This method takes a `key` argument to select data at a particular\n level of a MultiIndex.\n\n Parameters\n ----------\n key : label or tu...
def between_time(self, start_time: Union[(datetime.time, str)], end_time: Union[(datetime.time, str)], include_start: bool=True, include_end: bool=True, axis: Axis=0) -> 'DataFrame': "\n Select values between particular times of the day (example: 9:00-9:30 AM).\n\n By setting ``start_time`` to be late...
-4,282,171,030,924,375,000
Select values between particular times of the day (example: 9:00-9:30 AM). By setting ``start_time`` to be later than ``end_time``, you can get the times that are *not* between the two times. Parameters ---------- start_time : datetime.time or str Initial time as a time filter limit. end_time : datetime.time or s...
python/pyspark/pandas/frame.py
between_time
Flyangz/spark
python
def between_time(self, start_time: Union[(datetime.time, str)], end_time: Union[(datetime.time, str)], include_start: bool=True, include_end: bool=True, axis: Axis=0) -> 'DataFrame': "\n Select values between particular times of the day (example: 9:00-9:30 AM).\n\n By setting ``start_time`` to be late...
def at_time(self, time: Union[(datetime.time, str)], asof: bool=False, axis: Axis=0) -> 'DataFrame': "\n Select values at particular time of day (example: 9:30AM).\n\n Parameters\n ----------\n time : datetime.time or str\n axis : {0 or 'index', 1 or 'columns'}, default 0\n\n ...
4,169,738,953,941,191,000
Select values at particular time of day (example: 9:30AM). Parameters ---------- time : datetime.time or str axis : {0 or 'index', 1 or 'columns'}, default 0 Returns ------- DataFrame Raises ------ TypeError If the index is not a :class:`DatetimeIndex` See Also -------- between_time : Select values between par...
python/pyspark/pandas/frame.py
at_time
Flyangz/spark
python
def at_time(self, time: Union[(datetime.time, str)], asof: bool=False, axis: Axis=0) -> 'DataFrame': "\n Select values at particular time of day (example: 9:30AM).\n\n Parameters\n ----------\n time : datetime.time or str\n axis : {0 or 'index', 1 or 'columns'}, default 0\n\n ...
def where(self, cond: DataFrameOrSeries, other: Union[(DataFrameOrSeries, Any)]=np.nan, axis: Axis=None) -> 'DataFrame': '\n Replace values where the condition is False.\n\n Parameters\n ----------\n cond : boolean DataFrame\n Where cond is True, keep the original value. Where...
-4,389,328,793,971,374,600
Replace values where the condition is False. Parameters ---------- cond : boolean DataFrame Where cond is True, keep the original value. Where False, replace with corresponding value from other. other : scalar, DataFrame Entries where cond is False are replaced with corresponding value from other. axis : i...
python/pyspark/pandas/frame.py
where
Flyangz/spark
python
def where(self, cond: DataFrameOrSeries, other: Union[(DataFrameOrSeries, Any)]=np.nan, axis: Axis=None) -> 'DataFrame': '\n Replace values where the condition is False.\n\n Parameters\n ----------\n cond : boolean DataFrame\n Where cond is True, keep the original value. Where...
def mask(self, cond: DataFrameOrSeries, other: Union[(DataFrameOrSeries, Any)]=np.nan) -> 'DataFrame': '\n Replace values where the condition is True.\n\n Parameters\n ----------\n cond : boolean DataFrame\n Where cond is False, keep the original value. Where True,\n ...
2,378,262,501,612,776,400
Replace values where the condition is True. Parameters ---------- cond : boolean DataFrame Where cond is False, keep the original value. Where True, replace with corresponding value from other. other : scalar, DataFrame Entries where cond is True are replaced with corresponding value from other. Returns -...
python/pyspark/pandas/frame.py
mask
Flyangz/spark
python
def mask(self, cond: DataFrameOrSeries, other: Union[(DataFrameOrSeries, Any)]=np.nan) -> 'DataFrame': '\n Replace values where the condition is True.\n\n Parameters\n ----------\n cond : boolean DataFrame\n Where cond is False, keep the original value. Where True,\n ...
@property def index(self) -> 'Index': 'The index (row labels) Column of the DataFrame.\n\n Currently not supported when the DataFrame has no index.\n\n See Also\n --------\n Index\n ' from pyspark.pandas.indexes.base import Index return Index._new_instance(self)
7,843,778,894,878,117,000
The index (row labels) Column of the DataFrame. Currently not supported when the DataFrame has no index. See Also -------- Index
python/pyspark/pandas/frame.py
index
Flyangz/spark
python
@property def index(self) -> 'Index': 'The index (row labels) Column of the DataFrame.\n\n Currently not supported when the DataFrame has no index.\n\n See Also\n --------\n Index\n ' from pyspark.pandas.indexes.base import Index return Index._new_instance(self)
@property def empty(self) -> bool: "\n Returns true if the current DataFrame is empty. Otherwise, returns false.\n\n Examples\n --------\n >>> ps.range(10).empty\n False\n\n >>> ps.range(0).empty\n True\n\n >>> ps.DataFrame({}, index=list('abc')).empty\n ...
661,226,960,287,139,000
Returns true if the current DataFrame is empty. Otherwise, returns false. Examples -------- >>> ps.range(10).empty False >>> ps.range(0).empty True >>> ps.DataFrame({}, index=list('abc')).empty True
python/pyspark/pandas/frame.py
empty
Flyangz/spark
python
@property def empty(self) -> bool: "\n Returns true if the current DataFrame is empty. Otherwise, returns false.\n\n Examples\n --------\n >>> ps.range(10).empty\n False\n\n >>> ps.range(0).empty\n True\n\n >>> ps.DataFrame({}, index=list('abc')).empty\n ...
@property def style(self) -> 'Styler': '\n Property returning a Styler object containing methods for\n building a styled HTML representation for the DataFrame.\n\n .. note:: currently it collects top 1000 rows and return its\n pandas `pandas.io.formats.style.Styler` instance.\n\n ...
-856,584,549,295,894,000
Property returning a Styler object containing methods for building a styled HTML representation for the DataFrame. .. note:: currently it collects top 1000 rows and return its pandas `pandas.io.formats.style.Styler` instance. Examples -------- >>> ps.range(1001).style # doctest: +SKIP <pandas.io.formats.style.St...
python/pyspark/pandas/frame.py
style
Flyangz/spark
python
@property def style(self) -> 'Styler': '\n Property returning a Styler object containing methods for\n building a styled HTML representation for the DataFrame.\n\n .. note:: currently it collects top 1000 rows and return its\n pandas `pandas.io.formats.style.Styler` instance.\n\n ...
def set_index(self, keys: Union[(Name, List[Name])], drop: bool=True, append: bool=False, inplace: bool=False) -> Optional['DataFrame']: 'Set the DataFrame index (row labels) using one or more existing columns.\n\n Set the DataFrame index (row labels) using one or more existing\n columns or arrays (of...
6,711,258,925,545,952,000
Set the DataFrame index (row labels) using one or more existing columns. Set the DataFrame index (row labels) using one or more existing columns or arrays (of the correct length). The index can replace the existing index or expand on it. Parameters ---------- keys : label or array-like or list of labels/arrays Th...
python/pyspark/pandas/frame.py
set_index
Flyangz/spark
python
def set_index(self, keys: Union[(Name, List[Name])], drop: bool=True, append: bool=False, inplace: bool=False) -> Optional['DataFrame']: 'Set the DataFrame index (row labels) using one or more existing columns.\n\n Set the DataFrame index (row labels) using one or more existing\n columns or arrays (of...
def reset_index(self, level: Optional[Union[(int, Name, Sequence[Union[(int, Name)]])]]=None, drop: bool=False, inplace: bool=False, col_level: int=0, col_fill: str='') -> Optional['DataFrame']: "Reset the index, or a level of it.\n\n For DataFrame with multi-level index, return new DataFrame with labeling i...
5,446,117,962,936,664,000
Reset the index, or a level of it. For DataFrame with multi-level index, return new DataFrame with labeling information in the columns under the index names, defaulting to 'level_0', 'level_1', etc. if any are None. For a standard index, the index name will be used (if set), otherwise a default 'index' or 'level_0' (i...
python/pyspark/pandas/frame.py
reset_index
Flyangz/spark
python
def reset_index(self, level: Optional[Union[(int, Name, Sequence[Union[(int, Name)]])]]=None, drop: bool=False, inplace: bool=False, col_level: int=0, col_fill: str=) -> Optional['DataFrame']: "Reset the index, or a level of it.\n\n For DataFrame with multi-level index, return new DataFrame with labeling inf...
def isnull(self) -> 'DataFrame': "\n Detects missing values for items in the current Dataframe.\n\n Return a boolean same-sized Dataframe indicating if the values are NA.\n NA values, such as None or numpy.NaN, gets mapped to True values.\n Everything else gets mapped to False values.\n\...
9,060,744,655,024,991,000
Detects missing values for items in the current Dataframe. Return a boolean same-sized Dataframe indicating if the values are NA. NA values, such as None or numpy.NaN, gets mapped to True values. Everything else gets mapped to False values. See Also -------- DataFrame.notnull Examples -------- >>> df = ps.DataFrame(...
python/pyspark/pandas/frame.py
isnull
Flyangz/spark
python
def isnull(self) -> 'DataFrame': "\n Detects missing values for items in the current Dataframe.\n\n Return a boolean same-sized Dataframe indicating if the values are NA.\n NA values, such as None or numpy.NaN, gets mapped to True values.\n Everything else gets mapped to False values.\n\...
def notnull(self) -> 'DataFrame': "\n Detects non-missing values for items in the current Dataframe.\n\n This function takes a dataframe and indicates whether it's\n values are valid (not missing, which is ``NaN`` in numeric\n datatypes, ``None`` or ``NaN`` in objects and ``NaT`` in date...
-3,189,777,233,647,179,300
Detects non-missing values for items in the current Dataframe. This function takes a dataframe and indicates whether it's values are valid (not missing, which is ``NaN`` in numeric datatypes, ``None`` or ``NaN`` in objects and ``NaT`` in datetimelike). See Also -------- DataFrame.isnull Examples -------- >>> df = ps...
python/pyspark/pandas/frame.py
notnull
Flyangz/spark
python
def notnull(self) -> 'DataFrame': "\n Detects non-missing values for items in the current Dataframe.\n\n This function takes a dataframe and indicates whether it's\n values are valid (not missing, which is ``NaN`` in numeric\n datatypes, ``None`` or ``NaN`` in objects and ``NaT`` in date...
def insert(self, loc: int, column: Name, value: Union[(Scalar, 'Series', Iterable)], allow_duplicates: bool=False) -> None: '\n Insert column into DataFrame at specified location.\n\n Raises a ValueError if `column` is already contained in the DataFrame,\n unless `allow_duplicates` is set to Tr...
-821,030,439,595,169,900
Insert column into DataFrame at specified location. Raises a ValueError if `column` is already contained in the DataFrame, unless `allow_duplicates` is set to True. Parameters ---------- loc : int Insertion index. Must verify 0 <= loc <= len(columns). column : str, number, or hashable object Label of the inse...
python/pyspark/pandas/frame.py
insert
Flyangz/spark
python
def insert(self, loc: int, column: Name, value: Union[(Scalar, 'Series', Iterable)], allow_duplicates: bool=False) -> None: '\n Insert column into DataFrame at specified location.\n\n Raises a ValueError if `column` is already contained in the DataFrame,\n unless `allow_duplicates` is set to Tr...
def shift(self, periods: int=1, fill_value: Optional[Any]=None) -> 'DataFrame': "\n Shift DataFrame by desired number of periods.\n\n .. note:: the current implementation of shift uses Spark's Window without\n specifying partition specification. This leads to move all data into\n ...
1,002,297,955,590,259,000
Shift DataFrame by desired number of periods. .. note:: the current implementation of shift uses Spark's Window without specifying partition specification. This leads to move all data into single partition in single machine and could cause serious performance degradation. Avoid this method against very lar...
python/pyspark/pandas/frame.py
shift
Flyangz/spark
python
def shift(self, periods: int=1, fill_value: Optional[Any]=None) -> 'DataFrame': "\n Shift DataFrame by desired number of periods.\n\n .. note:: the current implementation of shift uses Spark's Window without\n specifying partition specification. This leads to move all data into\n ...
def diff(self, periods: int=1, axis: Axis=0) -> 'DataFrame': "\n First discrete difference of element.\n\n Calculates the difference of a DataFrame element compared with another element in the\n DataFrame (default is the element in the same column of the previous row).\n\n .. note:: the ...
-2,800,644,616,705,503,700
First discrete difference of element. Calculates the difference of a DataFrame element compared with another element in the DataFrame (default is the element in the same column of the previous row). .. note:: the current implementation of diff uses Spark's Window without specifying partition specification. This l...
python/pyspark/pandas/frame.py
diff
Flyangz/spark
python
def diff(self, periods: int=1, axis: Axis=0) -> 'DataFrame': "\n First discrete difference of element.\n\n Calculates the difference of a DataFrame element compared with another element in the\n DataFrame (default is the element in the same column of the previous row).\n\n .. note:: the ...
def nunique(self, axis: Axis=0, dropna: bool=True, approx: bool=False, rsd: float=0.05) -> 'Series': "\n Return number of unique elements in the object.\n\n Excludes NA values by default.\n\n Parameters\n ----------\n axis : int, default 0 or 'index'\n Can only be set t...
1,471,450,883,314,847,200
Return number of unique elements in the object. Excludes NA values by default. Parameters ---------- axis : int, default 0 or 'index' Can only be set to 0 at the moment. dropna : bool, default True Don’t include NaN in the count. approx: bool, default False If False, will use the exact algorithm and retur...
python/pyspark/pandas/frame.py
nunique
Flyangz/spark
python
def nunique(self, axis: Axis=0, dropna: bool=True, approx: bool=False, rsd: float=0.05) -> 'Series': "\n Return number of unique elements in the object.\n\n Excludes NA values by default.\n\n Parameters\n ----------\n axis : int, default 0 or 'index'\n Can only be set t...
def round(self, decimals: Union[(int, Dict[(Name, int)], 'Series')]=0) -> 'DataFrame': "\n Round a DataFrame to a variable number of decimal places.\n\n Parameters\n ----------\n decimals : int, dict, Series\n Number of decimal places to round each column to. If an int is\n ...
5,604,764,155,469,943,000
Round a DataFrame to a variable number of decimal places. Parameters ---------- decimals : int, dict, Series Number of decimal places to round each column to. If an int is given, round each column to the same number of places. Otherwise dict and Series round to variable numbers of places. Column names ...
python/pyspark/pandas/frame.py
round
Flyangz/spark
python
def round(self, decimals: Union[(int, Dict[(Name, int)], 'Series')]=0) -> 'DataFrame': "\n Round a DataFrame to a variable number of decimal places.\n\n Parameters\n ----------\n decimals : int, dict, Series\n Number of decimal places to round each column to. If an int is\n ...
def duplicated(self, subset: Optional[Union[(Name, List[Name])]]=None, keep: Union[(bool, str)]='first') -> 'Series': "\n Return boolean Series denoting duplicate rows, optionally only considering certain columns.\n\n Parameters\n ----------\n subset : column label or sequence of labels,...
912,777,601,891,668,600
Return boolean Series denoting duplicate rows, optionally only considering certain columns. Parameters ---------- subset : column label or sequence of labels, optional Only consider certain columns for identifying duplicates, by default use all of the columns keep : {'first', 'last', False}, default 'first' ...
python/pyspark/pandas/frame.py
duplicated
Flyangz/spark
python
def duplicated(self, subset: Optional[Union[(Name, List[Name])]]=None, keep: Union[(bool, str)]='first') -> 'Series': "\n Return boolean Series denoting duplicate rows, optionally only considering certain columns.\n\n Parameters\n ----------\n subset : column label or sequence of labels,...
def dot(self, other: 'Series') -> 'Series': '\n Compute the matrix multiplication between the DataFrame and other.\n\n This method computes the matrix product between the DataFrame and the\n values of an other Series\n\n It can also be called using ``self @ other`` in Python >= 3.5.\n\n ...
-1,559,024,477,338,388,500
Compute the matrix multiplication between the DataFrame and other. This method computes the matrix product between the DataFrame and the values of an other Series It can also be called using ``self @ other`` in Python >= 3.5. .. note:: This method is based on an expensive operation due to the nature of big data....
python/pyspark/pandas/frame.py
dot
Flyangz/spark
python
def dot(self, other: 'Series') -> 'Series': '\n Compute the matrix multiplication between the DataFrame and other.\n\n This method computes the matrix product between the DataFrame and the\n values of an other Series\n\n It can also be called using ``self @ other`` in Python >= 3.5.\n\n ...
def __matmul__(self, other: 'Series') -> 'Series': '\n Matrix multiplication using binary `@` operator in Python>=3.5.\n ' return self.dot(other)
1,389,403,817,055,163,000
Matrix multiplication using binary `@` operator in Python>=3.5.
python/pyspark/pandas/frame.py
__matmul__
Flyangz/spark
python
def __matmul__(self, other: 'Series') -> 'Series': '\n \n ' return self.dot(other)
def to_delta(self, path: str, mode: str='w', partition_cols: Optional[Union[(str, List[str])]]=None, index_col: Optional[Union[(str, List[str])]]=None, **options: 'OptionalPrimitiveType') -> None: '\n Write the DataFrame out as a Delta Lake table.\n\n Parameters\n ----------\n path : str...
-1,846,093,383,728,173,800
Write the DataFrame out as a Delta Lake table. Parameters ---------- path : str, required Path to write to. mode : str Python write mode, default 'w'. .. note:: mode can accept the strings for Spark writing mode. Such as 'append', 'overwrite', 'ignore', 'error', 'errorifexists'. - 'append...
python/pyspark/pandas/frame.py
to_delta
Flyangz/spark
python
def to_delta(self, path: str, mode: str='w', partition_cols: Optional[Union[(str, List[str])]]=None, index_col: Optional[Union[(str, List[str])]]=None, **options: 'OptionalPrimitiveType') -> None: '\n Write the DataFrame out as a Delta Lake table.\n\n Parameters\n ----------\n path : str...
def to_parquet(self, path: str, mode: str='w', partition_cols: Optional[Union[(str, List[str])]]=None, compression: Optional[str]=None, index_col: Optional[Union[(str, List[str])]]=None, **options: Any) -> None: "\n Write the DataFrame out as a Parquet file or directory.\n\n Parameters\n ------...
7,255,174,748,984,033,000
Write the DataFrame out as a Parquet file or directory. Parameters ---------- path : str, required Path to write to. mode : str Python write mode, default 'w'. .. note:: mode can accept the strings for Spark writing mode. Such as 'append', 'overwrite', 'ignore', 'error', 'errorifexists'. ...
python/pyspark/pandas/frame.py
to_parquet
Flyangz/spark
python
def to_parquet(self, path: str, mode: str='w', partition_cols: Optional[Union[(str, List[str])]]=None, compression: Optional[str]=None, index_col: Optional[Union[(str, List[str])]]=None, **options: Any) -> None: "\n Write the DataFrame out as a Parquet file or directory.\n\n Parameters\n ------...
def to_orc(self, path: str, mode: str='w', partition_cols: Optional[Union[(str, List[str])]]=None, index_col: Optional[Union[(str, List[str])]]=None, **options: 'OptionalPrimitiveType') -> None: "\n Write the DataFrame out as a ORC file or directory.\n\n Parameters\n ----------\n path : ...
-2,983,613,375,367,457,300
Write the DataFrame out as a ORC file or directory. Parameters ---------- path : str, required Path to write to. mode : str Python write mode, default 'w'. .. note:: mode can accept the strings for Spark writing mode. Such as 'append', 'overwrite', 'ignore', 'error', 'errorifexists'. - 'a...
python/pyspark/pandas/frame.py
to_orc
Flyangz/spark
python
def to_orc(self, path: str, mode: str='w', partition_cols: Optional[Union[(str, List[str])]]=None, index_col: Optional[Union[(str, List[str])]]=None, **options: 'OptionalPrimitiveType') -> None: "\n Write the DataFrame out as a ORC file or directory.\n\n Parameters\n ----------\n path : ...
def to_spark_io(self, path: Optional[str]=None, format: Optional[str]=None, mode: str='overwrite', partition_cols: Optional[Union[(str, List[str])]]=None, index_col: Optional[Union[(str, List[str])]]=None, **options: 'OptionalPrimitiveType') -> None: 'An alias for :func:`DataFrame.spark.to_spark_io`.\n See :...
-9,186,847,900,837,489,000
An alias for :func:`DataFrame.spark.to_spark_io`. See :meth:`pyspark.pandas.spark.accessors.SparkFrameMethods.to_spark_io`. .. deprecated:: 3.2.0 Use :func:`DataFrame.spark.to_spark_io` instead.
python/pyspark/pandas/frame.py
to_spark_io
Flyangz/spark
python
def to_spark_io(self, path: Optional[str]=None, format: Optional[str]=None, mode: str='overwrite', partition_cols: Optional[Union[(str, List[str])]]=None, index_col: Optional[Union[(str, List[str])]]=None, **options: 'OptionalPrimitiveType') -> None: 'An alias for :func:`DataFrame.spark.to_spark_io`.\n See :...
def _to_spark(self, index_col: Optional[Union[(str, List[str])]]=None) -> SparkDataFrame: '\n Same as `to_spark()`, without issueing the advice log when `index_col` is not specified\n for internal usage.\n ' return self.spark.frame(index_col)
3,150,529,469,738,035,000
Same as `to_spark()`, without issueing the advice log when `index_col` is not specified for internal usage.
python/pyspark/pandas/frame.py
_to_spark
Flyangz/spark
python
def _to_spark(self, index_col: Optional[Union[(str, List[str])]]=None) -> SparkDataFrame: '\n Same as `to_spark()`, without issueing the advice log when `index_col` is not specified\n for internal usage.\n ' return self.spark.frame(index_col)
def to_pandas(self) -> pd.DataFrame: "\n Return a pandas DataFrame.\n\n .. note:: This method should only be used if the resulting pandas DataFrame is expected\n to be small, as all the data is loaded into the driver's memory.\n\n Examples\n --------\n >>> df = ps.DataF...
7,510,011,846,533,468,000
Return a pandas DataFrame. .. note:: This method should only be used if the resulting pandas DataFrame is expected to be small, as all the data is loaded into the driver's memory. Examples -------- >>> df = ps.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)], ... columns=['dogs', 'cats']) >>> ...
python/pyspark/pandas/frame.py
to_pandas
Flyangz/spark
python
def to_pandas(self) -> pd.DataFrame: "\n Return a pandas DataFrame.\n\n .. note:: This method should only be used if the resulting pandas DataFrame is expected\n to be small, as all the data is loaded into the driver's memory.\n\n Examples\n --------\n >>> df = ps.DataF...
def _to_pandas(self) -> pd.DataFrame: '\n Same as `to_pandas()`, without issueing the advice log for internal usage.\n ' return self._internal.to_pandas_frame.copy()
1,384,124,970,409,361
Same as `to_pandas()`, without issueing the advice log for internal usage.
python/pyspark/pandas/frame.py
_to_pandas
Flyangz/spark
python
def _to_pandas(self) -> pd.DataFrame: '\n \n ' return self._internal.to_pandas_frame.copy()
def assign(self, **kwargs: Any) -> 'DataFrame': "\n Assign new columns to a DataFrame.\n\n Returns a new object with all original columns in addition to new ones.\n Existing columns that are re-assigned will be overwritten.\n\n Parameters\n ----------\n **kwargs : dict of {...
4,465,799,761,529,371,000
Assign new columns to a DataFrame. Returns a new object with all original columns in addition to new ones. Existing columns that are re-assigned will be overwritten. Parameters ---------- **kwargs : dict of {str: callable, Series or Index} The column names are keywords. If the values are callable, they are co...
python/pyspark/pandas/frame.py
assign
Flyangz/spark
python
def assign(self, **kwargs: Any) -> 'DataFrame': "\n Assign new columns to a DataFrame.\n\n Returns a new object with all original columns in addition to new ones.\n Existing columns that are re-assigned will be overwritten.\n\n Parameters\n ----------\n **kwargs : dict of {...
@staticmethod def from_records(data: Union[(np.ndarray, List[tuple], dict, pd.DataFrame)], index: Union[(str, list, np.ndarray)]=None, exclude: list=None, columns: list=None, coerce_float: bool=False, nrows: int=None) -> 'DataFrame': "\n Convert structured or record ndarray to DataFrame.\n\n Parameter...
8,813,867,570,616,891,000
Convert structured or record ndarray to DataFrame. Parameters ---------- data : ndarray (structured dtype), list of tuples, dict, or DataFrame index : string, list of fields, array-like Field of array to use as the index, alternately a specific set of input labels to use exclude : sequence, default None Column...
python/pyspark/pandas/frame.py
from_records
Flyangz/spark
python
@staticmethod def from_records(data: Union[(np.ndarray, List[tuple], dict, pd.DataFrame)], index: Union[(str, list, np.ndarray)]=None, exclude: list=None, columns: list=None, coerce_float: bool=False, nrows: int=None) -> 'DataFrame': "\n Convert structured or record ndarray to DataFrame.\n\n Parameter...
def to_records(self, index: bool=True, column_dtypes: Optional[Union[(str, Dtype, Dict[(Name, Union[(str, Dtype)])])]]=None, index_dtypes: Optional[Union[(str, Dtype, Dict[(Name, Union[(str, Dtype)])])]]=None) -> np.recarray: '\n Convert DataFrame to a NumPy record array.\n\n Index will be included as...
-2,026,692,314,611,704,600
Convert DataFrame to a NumPy record array. Index will be included as the first field of the record array if requested. .. note:: This method should only be used if the resulting NumPy ndarray is expected to be small, as all the data is loaded into the driver's memory. Parameters ---------- index : bool, default ...
python/pyspark/pandas/frame.py
to_records
Flyangz/spark
python
def to_records(self, index: bool=True, column_dtypes: Optional[Union[(str, Dtype, Dict[(Name, Union[(str, Dtype)])])]]=None, index_dtypes: Optional[Union[(str, Dtype, Dict[(Name, Union[(str, Dtype)])])]]=None) -> np.recarray: '\n Convert DataFrame to a NumPy record array.\n\n Index will be included as...
def copy(self, deep: bool=True) -> 'DataFrame': "\n Make a copy of this object's indices and data.\n\n Parameters\n ----------\n deep : bool, default True\n this parameter is not supported but just dummy parameter to match pandas.\n\n Returns\n -------\n c...
-6,249,444,629,079,683,000
Make a copy of this object's indices and data. Parameters ---------- deep : bool, default True this parameter is not supported but just dummy parameter to match pandas. Returns ------- copy : DataFrame Examples -------- >>> df = ps.DataFrame({'x': [1, 2], 'y': [3, 4], 'z': [5, 6], 'w': [7, 8]}, ... ...
python/pyspark/pandas/frame.py
copy
Flyangz/spark
python
def copy(self, deep: bool=True) -> 'DataFrame': "\n Make a copy of this object's indices and data.\n\n Parameters\n ----------\n deep : bool, default True\n this parameter is not supported but just dummy parameter to match pandas.\n\n Returns\n -------\n c...
def dropna(self, axis: Axis=0, how: str='any', thresh: Optional[int]=None, subset: Optional[Union[(Name, List[Name])]]=None, inplace: bool=False) -> Optional['DataFrame']: '\n Remove missing values.\n\n Parameters\n ----------\n axis : {0 or \'index\'}, default 0\n Determine i...
1,784,172,941,946,276,000
Remove missing values. Parameters ---------- axis : {0 or 'index'}, default 0 Determine if rows or columns which contain missing values are removed. * 0, or 'index' : Drop rows which contain missing values. how : {'any', 'all'}, default 'any' Determine if row or column is removed from DataFrame, when ...
python/pyspark/pandas/frame.py
dropna
Flyangz/spark
python
def dropna(self, axis: Axis=0, how: str='any', thresh: Optional[int]=None, subset: Optional[Union[(Name, List[Name])]]=None, inplace: bool=False) -> Optional['DataFrame']: '\n Remove missing values.\n\n Parameters\n ----------\n axis : {0 or \'index\'}, default 0\n Determine i...
def fillna(self, value: Optional[Union[(Any, Dict[(Name, Any)])]]=None, method: Optional[str]=None, axis: Optional[Axis]=None, inplace: bool=False, limit: Optional[int]=None) -> Optional['DataFrame']: "Fill NA/NaN values.\n\n .. note:: the current implementation of 'method' parameter in fillna uses Spark's W...
1,108,271,819,624,094,200
Fill NA/NaN values. .. note:: the current implementation of 'method' parameter in fillna uses Spark's Window without specifying partition specification. This leads to move all data into single partition in single machine and could cause serious performance degradation. Avoid this method against very large ...
python/pyspark/pandas/frame.py
fillna
Flyangz/spark
python
def fillna(self, value: Optional[Union[(Any, Dict[(Name, Any)])]]=None, method: Optional[str]=None, axis: Optional[Axis]=None, inplace: bool=False, limit: Optional[int]=None) -> Optional['DataFrame']: "Fill NA/NaN values.\n\n .. note:: the current implementation of 'method' parameter in fillna uses Spark's W...
def replace(self, to_replace: Optional[Union[(Any, List, Tuple, Dict)]]=None, value: Optional[Any]=None, inplace: bool=False, limit: Optional[int]=None, regex: bool=False, method: str='pad') -> Optional['DataFrame']: '\n Returns a new DataFrame replacing a value with another value.\n\n Parameters\n ...
-3,898,269,011,529,222,000
Returns a new DataFrame replacing a value with another value. Parameters ---------- to_replace : int, float, string, list, tuple or dict Value to be replaced. value : int, float, string, list or tuple Value to use to replace holes. The replacement value must be an int, float, or string. If value is a l...
python/pyspark/pandas/frame.py
replace
Flyangz/spark
python
def replace(self, to_replace: Optional[Union[(Any, List, Tuple, Dict)]]=None, value: Optional[Any]=None, inplace: bool=False, limit: Optional[int]=None, regex: bool=False, method: str='pad') -> Optional['DataFrame']: '\n Returns a new DataFrame replacing a value with another value.\n\n Parameters\n ...
def clip(self, lower: Union[(float, int)]=None, upper: Union[(float, int)]=None) -> 'DataFrame': '\n Trim values at input threshold(s).\n\n Assigns values outside boundary to boundary values.\n\n Parameters\n ----------\n lower : float or int, default None\n Minimum thr...
2,908,171,955,569,142,000
Trim values at input threshold(s). Assigns values outside boundary to boundary values. Parameters ---------- lower : float or int, default None Minimum threshold value. All values below this threshold will be set to it. upper : float or int, default None Maximum threshold value. All values above this threshol...
python/pyspark/pandas/frame.py
clip
Flyangz/spark
python
def clip(self, lower: Union[(float, int)]=None, upper: Union[(float, int)]=None) -> 'DataFrame': '\n Trim values at input threshold(s).\n\n Assigns values outside boundary to boundary values.\n\n Parameters\n ----------\n lower : float or int, default None\n Minimum thr...
def head(self, n: int=5) -> 'DataFrame': "\n Return the first `n` rows.\n\n This function returns the first `n` rows for the object based\n on position. It is useful for quickly testing if your object\n has the right type of data in it.\n\n Parameters\n ----------\n ...
-6,046,103,581,036,672,000
Return the first `n` rows. This function returns the first `n` rows for the object based on position. It is useful for quickly testing if your object has the right type of data in it. Parameters ---------- n : int, default 5 Number of rows to select. Returns ------- obj_head : same type as caller The first `...
python/pyspark/pandas/frame.py
head
Flyangz/spark
python
def head(self, n: int=5) -> 'DataFrame': "\n Return the first `n` rows.\n\n This function returns the first `n` rows for the object based\n on position. It is useful for quickly testing if your object\n has the right type of data in it.\n\n Parameters\n ----------\n ...
def last(self, offset: Union[(str, DateOffset)]) -> 'DataFrame': "\n Select final periods of time series data based on a date offset.\n\n When having a DataFrame with dates as index, this function can\n select the last few rows based on a date offset.\n\n Parameters\n ----------\n...
4,773,362,931,813,385,000
Select final periods of time series data based on a date offset. When having a DataFrame with dates as index, this function can select the last few rows based on a date offset. Parameters ---------- offset : str or DateOffset The offset length of the data that will be selected. For instance, '3D' will display...
python/pyspark/pandas/frame.py
last
Flyangz/spark
python
def last(self, offset: Union[(str, DateOffset)]) -> 'DataFrame': "\n Select final periods of time series data based on a date offset.\n\n When having a DataFrame with dates as index, this function can\n select the last few rows based on a date offset.\n\n Parameters\n ----------\n...
def first(self, offset: Union[(str, DateOffset)]) -> 'DataFrame': "\n Select first periods of time series data based on a date offset.\n\n When having a DataFrame with dates as index, this function can\n select the first few rows based on a date offset.\n\n Parameters\n ----------...
-1,184,452,550,105,267,200
Select first periods of time series data based on a date offset. When having a DataFrame with dates as index, this function can select the first few rows based on a date offset. Parameters ---------- offset : str or DateOffset The offset length of the data that will be selected. For instance, '3D' will displa...
python/pyspark/pandas/frame.py
first
Flyangz/spark
python
def first(self, offset: Union[(str, DateOffset)]) -> 'DataFrame': "\n Select first periods of time series data based on a date offset.\n\n When having a DataFrame with dates as index, this function can\n select the first few rows based on a date offset.\n\n Parameters\n ----------...
def pivot_table(self, values: Optional[Union[(Name, List[Name])]]=None, index: Optional[List[Name]]=None, columns: Optional[Name]=None, aggfunc: Union[(str, Dict[(Name, str)])]='mean', fill_value: Optional[Any]=None) -> 'DataFrame': '\n Create a spreadsheet-style pivot table as a DataFrame. The levels in\n ...
9,017,028,467,064,092,000
Create a spreadsheet-style pivot table as a DataFrame. The levels in the pivot table will be stored in MultiIndex objects (hierarchical indexes) on the index and columns of the result DataFrame. Parameters ---------- values : column to aggregate. They should be either a list less than three or a string. index : co...
python/pyspark/pandas/frame.py
pivot_table
Flyangz/spark
python
def pivot_table(self, values: Optional[Union[(Name, List[Name])]]=None, index: Optional[List[Name]]=None, columns: Optional[Name]=None, aggfunc: Union[(str, Dict[(Name, str)])]='mean', fill_value: Optional[Any]=None) -> 'DataFrame': '\n Create a spreadsheet-style pivot table as a DataFrame. The levels in\n ...
def pivot(self, index: Optional[Name]=None, columns: Optional[Name]=None, values: Optional[Name]=None) -> 'DataFrame': '\n Return reshaped DataFrame organized by given index / column values.\n\n Reshape data (produce a "pivot" table) based on column values. Uses\n unique values from specified `...
-4,641,697,249,891,161,000
Return reshaped DataFrame organized by given index / column values. Reshape data (produce a "pivot" table) based on column values. Uses unique values from specified `index` / `columns` to form axes of the resulting DataFrame. This function does not support data aggregation. Parameters ---------- index : string, optio...
python/pyspark/pandas/frame.py
pivot
Flyangz/spark
python
def pivot(self, index: Optional[Name]=None, columns: Optional[Name]=None, values: Optional[Name]=None) -> 'DataFrame': '\n Return reshaped DataFrame organized by given index / column values.\n\n Reshape data (produce a "pivot" table) based on column values. Uses\n unique values from specified `...
@property def columns(self) -> pd.Index: 'The column labels of the DataFrame.' names = [(name if ((name is None) or (len(name) > 1)) else name[0]) for name in self._internal.column_label_names] if (self._internal.column_labels_level > 1): columns = pd.MultiIndex.from_tuples(self._internal.column_lab...
-4,037,255,681,569,786,400
The column labels of the DataFrame.
python/pyspark/pandas/frame.py
columns
Flyangz/spark
python
@property def columns(self) -> pd.Index: names = [(name if ((name is None) or (len(name) > 1)) else name[0]) for name in self._internal.column_label_names] if (self._internal.column_labels_level > 1): columns = pd.MultiIndex.from_tuples(self._internal.column_labels, names=names) else: c...
@property def dtypes(self) -> pd.Series: "Return the dtypes in the DataFrame.\n\n This returns a Series with the data type of each column. The result's index is the original\n DataFrame's columns. Columns with mixed types are stored with the object dtype.\n\n Returns\n -------\n p...
-7,807,592,717,761,942,000
Return the dtypes in the DataFrame. This returns a Series with the data type of each column. The result's index is the original DataFrame's columns. Columns with mixed types are stored with the object dtype. Returns ------- pd.Series The data type of each column. Examples -------- >>> df = ps.DataFrame({'a': lis...
python/pyspark/pandas/frame.py
dtypes
Flyangz/spark
python
@property def dtypes(self) -> pd.Series: "Return the dtypes in the DataFrame.\n\n This returns a Series with the data type of each column. The result's index is the original\n DataFrame's columns. Columns with mixed types are stored with the object dtype.\n\n Returns\n -------\n p...
def select_dtypes(self, include: Optional[Union[(str, List[str])]]=None, exclude: Optional[Union[(str, List[str])]]=None) -> 'DataFrame': "\n Return a subset of the DataFrame's columns based on the column dtypes.\n\n Parameters\n ----------\n include, exclude : scalar or list-like\n ...
2,451,480,011,340,020,700
Return a subset of the DataFrame's columns based on the column dtypes. Parameters ---------- include, exclude : scalar or list-like A selection of dtypes or strings to be included/excluded. At least one of these parameters must be supplied. It also takes Spark SQL DDL type strings, for instance, 'string' a...
python/pyspark/pandas/frame.py
select_dtypes
Flyangz/spark
python
def select_dtypes(self, include: Optional[Union[(str, List[str])]]=None, exclude: Optional[Union[(str, List[str])]]=None) -> 'DataFrame': "\n Return a subset of the DataFrame's columns based on the column dtypes.\n\n Parameters\n ----------\n include, exclude : scalar or list-like\n ...
def droplevel(self, level: Union[(int, Name, List[Union[(int, Name)]])], axis: Axis=0) -> 'DataFrame': '\n Return DataFrame with requested index / column level(s) removed.\n\n Parameters\n ----------\n level: int, str, or list-like\n If a string is given, must be the name of a...
6,361,529,296,815,516,000
Return DataFrame with requested index / column level(s) removed. Parameters ---------- level: int, str, or list-like If a string is given, must be the name of a level If list-like, elements must be names or positional indexes of levels. axis: {0 or ‘index’, 1 or ‘columns’}, default 0 Returns ------- DataFram...
python/pyspark/pandas/frame.py
droplevel
Flyangz/spark
python
def droplevel(self, level: Union[(int, Name, List[Union[(int, Name)]])], axis: Axis=0) -> 'DataFrame': '\n Return DataFrame with requested index / column level(s) removed.\n\n Parameters\n ----------\n level: int, str, or list-like\n If a string is given, must be the name of a...
def drop(self, labels: Optional[Union[(Name, List[Name])]]=None, axis: Optional[Axis]=0, index: Union[(Name, List[Name])]=None, columns: Union[(Name, List[Name])]=None) -> 'DataFrame': "\n Drop specified labels from columns.\n\n Remove rows and/or columns by specifying label names and corresponding ax...
4,914,197,836,815,870,000
Drop specified labels from columns. Remove rows and/or columns by specifying label names and corresponding axis, or by specifying directly index and/or column names. Drop rows of a MultiIndex DataFrame is not supported yet. Parameters ---------- labels : single label or list-like Column labels to drop. axis : {0 ...
python/pyspark/pandas/frame.py
drop
Flyangz/spark
python
def drop(self, labels: Optional[Union[(Name, List[Name])]]=None, axis: Optional[Axis]=0, index: Union[(Name, List[Name])]=None, columns: Union[(Name, List[Name])]=None) -> 'DataFrame': "\n Drop specified labels from columns.\n\n Remove rows and/or columns by specifying label names and corresponding ax...
def sort_values(self, by: Union[(Name, List[Name])], ascending: Union[(bool, List[bool])]=True, inplace: bool=False, na_position: str='last', ignore_index: bool=False) -> Optional['DataFrame']: "\n Sort by the values along either axis.\n\n Parameters\n ----------\n by : str or list of st...
7,202,430,524,788,683,000
Sort by the values along either axis. Parameters ---------- by : str or list of str ascending : bool or list of bool, default True Sort ascending vs. descending. Specify list for multiple sort orders. If this is a list of bools, must match the length of the by. inplace : bool, default False if Tru...
python/pyspark/pandas/frame.py
sort_values
Flyangz/spark
python
def sort_values(self, by: Union[(Name, List[Name])], ascending: Union[(bool, List[bool])]=True, inplace: bool=False, na_position: str='last', ignore_index: bool=False) -> Optional['DataFrame']: "\n Sort by the values along either axis.\n\n Parameters\n ----------\n by : str or list of st...
def sort_index(self, axis: Axis=0, level: Optional[Union[(int, List[int])]]=None, ascending: bool=True, inplace: bool=False, kind: str=None, na_position: str='last') -> Optional['DataFrame']: "\n Sort object by labels (along an axis)\n\n Parameters\n ----------\n axis : index, columns to...
591,558,806,448,782,100
Sort object by labels (along an axis) Parameters ---------- axis : index, columns to direct sorting. Currently, only axis = 0 is supported. level : int or level name or list of ints or list of level names if not None, sort on values in specified index level(s) ascending : boolean, default True Sort ascending v...
python/pyspark/pandas/frame.py
sort_index
Flyangz/spark
python
def sort_index(self, axis: Axis=0, level: Optional[Union[(int, List[int])]]=None, ascending: bool=True, inplace: bool=False, kind: str=None, na_position: str='last') -> Optional['DataFrame']: "\n Sort object by labels (along an axis)\n\n Parameters\n ----------\n axis : index, columns to...
def swaplevel(self, i: Union[(int, Name)]=(- 2), j: Union[(int, Name)]=(- 1), axis: Axis=0) -> 'DataFrame': "\n Swap levels i and j in a MultiIndex on a particular axis.\n\n Parameters\n ----------\n i, j : int or str\n Levels of the indices to be swapped. Can pass level name ...
-51,800,121,682,277,130
Swap levels i and j in a MultiIndex on a particular axis. Parameters ---------- i, j : int or str Levels of the indices to be swapped. Can pass level name as string. axis : {0 or 'index', 1 or 'columns'}, default 0 The axis to swap levels on. 0 or 'index' for row-wise, 1 or 'columns' for column-wise. Retu...
python/pyspark/pandas/frame.py
swaplevel
Flyangz/spark
python
def swaplevel(self, i: Union[(int, Name)]=(- 2), j: Union[(int, Name)]=(- 1), axis: Axis=0) -> 'DataFrame': "\n Swap levels i and j in a MultiIndex on a particular axis.\n\n Parameters\n ----------\n i, j : int or str\n Levels of the indices to be swapped. Can pass level name ...
def swapaxes(self, i: Axis, j: Axis, copy: bool=True) -> 'DataFrame': "\n Interchange axes and swap values axes appropriately.\n\n .. note:: This method is based on an expensive operation due to the nature\n of big data. Internally it needs to generate each row for each value, and\n ...
6,023,020,263,360,316,000
Interchange axes and swap values axes appropriately. .. note:: This method is based on an expensive operation due to the nature of big data. Internally it needs to generate each row for each value, and then group twice - it is a huge operation. To prevent misusage, this method has the 'compute.max_rows' de...
python/pyspark/pandas/frame.py
swapaxes
Flyangz/spark
python
def swapaxes(self, i: Axis, j: Axis, copy: bool=True) -> 'DataFrame': "\n Interchange axes and swap values axes appropriately.\n\n .. note:: This method is based on an expensive operation due to the nature\n of big data. Internally it needs to generate each row for each value, and\n ...
def nlargest(self, n: int, columns: Union[(Name, List[Name])], keep: str='first') -> 'DataFrame': '\n Return the first `n` rows ordered by `columns` in descending order.\n\n Return the first `n` rows with the largest values in `columns`, in\n descending order. The columns that are not specified...
-1,321,435,559,041,933,600
Return the first `n` rows ordered by `columns` in descending order. Return the first `n` rows with the largest values in `columns`, in descending order. The columns that are not specified are returned as well, but not used for ordering. This method is equivalent to ``df.sort_values(columns, ascending=False).head(n)``...
python/pyspark/pandas/frame.py
nlargest
Flyangz/spark
python
def nlargest(self, n: int, columns: Union[(Name, List[Name])], keep: str='first') -> 'DataFrame': '\n Return the first `n` rows ordered by `columns` in descending order.\n\n Return the first `n` rows with the largest values in `columns`, in\n descending order. The columns that are not specified...
def nsmallest(self, n: int, columns: Union[(Name, List[Name])], keep: str='first') -> 'DataFrame': '\n Return the first `n` rows ordered by `columns` in ascending order.\n\n Return the first `n` rows with the smallest values in `columns`, in\n ascending order. The columns that are not specified...
-4,372,649,593,661,523,500
Return the first `n` rows ordered by `columns` in ascending order. Return the first `n` rows with the smallest values in `columns`, in ascending order. The columns that are not specified are returned as well, but not used for ordering. This method is equivalent to ``df.sort_values(columns, ascending=True).head(n)``, ...
python/pyspark/pandas/frame.py
nsmallest
Flyangz/spark
python
def nsmallest(self, n: int, columns: Union[(Name, List[Name])], keep: str='first') -> 'DataFrame': '\n Return the first `n` rows ordered by `columns` in ascending order.\n\n Return the first `n` rows with the smallest values in `columns`, in\n ascending order. The columns that are not specified...
def isin(self, values: Union[(List, Dict)]) -> 'DataFrame': "\n Whether each element in the DataFrame is contained in values.\n\n Parameters\n ----------\n values : iterable or dict\n The sequence of values to test. If values is a dict,\n the keys must be the column n...
8,959,033,474,880,497,000
Whether each element in the DataFrame is contained in values. Parameters ---------- values : iterable or dict The sequence of values to test. If values is a dict, the keys must be the column names, which must match. Series and DataFrame are not supported. Returns ------- DataFrame DataFrame of booleans s...
python/pyspark/pandas/frame.py
isin
Flyangz/spark
python
def isin(self, values: Union[(List, Dict)]) -> 'DataFrame': "\n Whether each element in the DataFrame is contained in values.\n\n Parameters\n ----------\n values : iterable or dict\n The sequence of values to test. If values is a dict,\n the keys must be the column n...
@property def shape(self) -> Tuple[(int, int)]: "\n Return a tuple representing the dimensionality of the DataFrame.\n\n Examples\n --------\n >>> df = ps.DataFrame({'col1': [1, 2], 'col2': [3, 4]})\n >>> df.shape\n (2, 2)\n\n >>> df = ps.DataFrame({'col1': [1, 2], '...
7,635,316,900,167,971,000
Return a tuple representing the dimensionality of the DataFrame. Examples -------- >>> df = ps.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df.shape (2, 2) >>> df = ps.DataFrame({'col1': [1, 2], 'col2': [3, 4], ... 'col3': [5, 6]}) >>> df.shape (2, 3)
python/pyspark/pandas/frame.py
shape
Flyangz/spark
python
@property def shape(self) -> Tuple[(int, int)]: "\n Return a tuple representing the dimensionality of the DataFrame.\n\n Examples\n --------\n >>> df = ps.DataFrame({'col1': [1, 2], 'col2': [3, 4]})\n >>> df.shape\n (2, 2)\n\n >>> df = ps.DataFrame({'col1': [1, 2], '...
def merge(self, right: 'DataFrame', how: str='inner', on: Optional[Union[(Name, List[Name])]]=None, left_on: Optional[Union[(Name, List[Name])]]=None, right_on: Optional[Union[(Name, List[Name])]]=None, left_index: bool=False, right_index: bool=False, suffixes: Tuple[(str, str)]=('_x', '_y')) -> 'DataFrame': "\n ...
-4,141,360,236,492,709,400
Merge DataFrame objects with a database-style join. The index of the resulting DataFrame will be one of the following: - 0...n if no index is used for merging - Index of the left DataFrame if merged only on the index of the right DataFrame - Index of the right DataFrame if merged only on the index of the l...
python/pyspark/pandas/frame.py
merge
Flyangz/spark
python
def merge(self, right: 'DataFrame', how: str='inner', on: Optional[Union[(Name, List[Name])]]=None, left_on: Optional[Union[(Name, List[Name])]]=None, right_on: Optional[Union[(Name, List[Name])]]=None, left_index: bool=False, right_index: bool=False, suffixes: Tuple[(str, str)]=('_x', '_y')) -> 'DataFrame': "\n ...
def join(self, right: 'DataFrame', on: Optional[Union[(Name, List[Name])]]=None, how: str='left', lsuffix: str='', rsuffix: str='') -> 'DataFrame': "\n Join columns of another DataFrame.\n\n Join columns with `right` DataFrame either on index or on a key column. Efficiently join\n multiple Data...
3,580,601,826,865,726,500
Join columns of another DataFrame. Join columns with `right` DataFrame either on index or on a key column. Efficiently join multiple DataFrame objects by index at once by passing a list. Parameters ---------- right: DataFrame, Series on: str, list of str, or array-like, optional Column or index level name(s) in t...
python/pyspark/pandas/frame.py
join
Flyangz/spark
python
def join(self, right: 'DataFrame', on: Optional[Union[(Name, List[Name])]]=None, how: str='left', lsuffix: str=, rsuffix: str=) -> 'DataFrame': "\n Join columns of another DataFrame.\n\n Join columns with `right` DataFrame either on index or on a key column. Efficiently join\n multiple DataFram...
def combine_first(self, other: 'DataFrame') -> 'DataFrame': '\n Update null elements with value in the same location in `other`.\n\n Combine two DataFrame objects by filling null values in one DataFrame\n with non-null values from other DataFrame. The row and column indexes\n of the resu...
-5,688,754,360,827,215,000
Update null elements with value in the same location in `other`. Combine two DataFrame objects by filling null values in one DataFrame with non-null values from other DataFrame. The row and column indexes of the resulting DataFrame will be the union of the two. .. versionadded:: 3.3.0 Parameters ---------- other : D...
python/pyspark/pandas/frame.py
combine_first
Flyangz/spark
python
def combine_first(self, other: 'DataFrame') -> 'DataFrame': '\n Update null elements with value in the same location in `other`.\n\n Combine two DataFrame objects by filling null values in one DataFrame\n with non-null values from other DataFrame. The row and column indexes\n of the resu...
def append(self, other: 'DataFrame', ignore_index: bool=False, verify_integrity: bool=False, sort: bool=False) -> 'DataFrame': "\n Append rows of other to the end of caller, returning a new object.\n\n Columns in other that are not in the caller are added as new columns.\n\n Parameters\n ...
-4,467,937,677,897,888,000
Append rows of other to the end of caller, returning a new object. Columns in other that are not in the caller are added as new columns. Parameters ---------- other : DataFrame or Series/dict-like object, or list of these The data to append. ignore_index : boolean, default False If True, do not use the index...
python/pyspark/pandas/frame.py
append
Flyangz/spark
python
def append(self, other: 'DataFrame', ignore_index: bool=False, verify_integrity: bool=False, sort: bool=False) -> 'DataFrame': "\n Append rows of other to the end of caller, returning a new object.\n\n Columns in other that are not in the caller are added as new columns.\n\n Parameters\n ...
def update(self, other: 'DataFrame', join: str='left', overwrite: bool=True) -> None: "\n Modify in place using non-NA values from another DataFrame.\n Aligns on indices. There is no return value.\n\n Parameters\n ----------\n other : DataFrame, or Series\n join : 'left', d...
-386,408,235,323,198,900
Modify in place using non-NA values from another DataFrame. Aligns on indices. There is no return value. Parameters ---------- other : DataFrame, or Series join : 'left', default 'left' Only left join is implemented, keeping the index and columns of the original object. overwrite : bool, default True How to ha...
python/pyspark/pandas/frame.py
update
Flyangz/spark
python
def update(self, other: 'DataFrame', join: str='left', overwrite: bool=True) -> None: "\n Modify in place using non-NA values from another DataFrame.\n Aligns on indices. There is no return value.\n\n Parameters\n ----------\n other : DataFrame, or Series\n join : 'left', d...
def cov(self, min_periods: Optional[int]=None) -> 'DataFrame': "\n Compute pairwise covariance of columns, excluding NA/null values.\n\n Compute the pairwise covariance among the series of a DataFrame.\n The returned data frame is the `covariance matrix\n <https://en.wikipedia.org/wiki/C...
2,049,463,742,787,191,000
Compute pairwise covariance of columns, excluding NA/null values. Compute the pairwise covariance among the series of a DataFrame. The returned data frame is the `covariance matrix <https://en.wikipedia.org/wiki/Covariance_matrix>`__ of the columns of the DataFrame. Both NA and null values are automatically excluded ...
python/pyspark/pandas/frame.py
cov
Flyangz/spark
python
def cov(self, min_periods: Optional[int]=None) -> 'DataFrame': "\n Compute pairwise covariance of columns, excluding NA/null values.\n\n Compute the pairwise covariance among the series of a DataFrame.\n The returned data frame is the `covariance matrix\n <https://en.wikipedia.org/wiki/C...
def sample(self, n: Optional[int]=None, frac: Optional[float]=None, replace: bool=False, random_state: Optional[int]=None) -> 'DataFrame': "\n Return a random sample of items from an axis of object.\n\n Please call this function using named argument by specifying the ``frac`` argument.\n\n You ...
-4,820,223,393,553,322,000
Return a random sample of items from an axis of object. Please call this function using named argument by specifying the ``frac`` argument. You can use `random_state` for reproducibility. However, note that different from pandas, specifying a seed in pandas-on-Spark/Spark does not guarantee the sampled rows will be f...
python/pyspark/pandas/frame.py
sample
Flyangz/spark
python
def sample(self, n: Optional[int]=None, frac: Optional[float]=None, replace: bool=False, random_state: Optional[int]=None) -> 'DataFrame': "\n Return a random sample of items from an axis of object.\n\n Please call this function using named argument by specifying the ``frac`` argument.\n\n You ...
def astype(self, dtype: Union[(str, Dtype, Dict[(Name, Union[(str, Dtype)])])]) -> 'DataFrame': "\n Cast a pandas-on-Spark object to a specified dtype ``dtype``.\n\n Parameters\n ----------\n dtype : data type, or dict of column name -> data type\n Use a numpy.dtype or Python ...
-1,261,696,734,745,360,000
Cast a pandas-on-Spark object to a specified dtype ``dtype``. Parameters ---------- dtype : data type, or dict of column name -> data type Use a numpy.dtype or Python type to cast entire pandas-on-Spark object to the same type. Alternatively, use {col: dtype, ...}, where col is a column label and dtype is ...
python/pyspark/pandas/frame.py
astype
Flyangz/spark
python
def astype(self, dtype: Union[(str, Dtype, Dict[(Name, Union[(str, Dtype)])])]) -> 'DataFrame': "\n Cast a pandas-on-Spark object to a specified dtype ``dtype``.\n\n Parameters\n ----------\n dtype : data type, or dict of column name -> data type\n Use a numpy.dtype or Python ...
def add_prefix(self, prefix: str) -> 'DataFrame': "\n Prefix labels with string `prefix`.\n\n For Series, the row labels are prefixed.\n For DataFrame, the column labels are prefixed.\n\n Parameters\n ----------\n prefix : str\n The string to add before each label...
-6,918,965,661,863,779,000
Prefix labels with string `prefix`. For Series, the row labels are prefixed. For DataFrame, the column labels are prefixed. Parameters ---------- prefix : str The string to add before each label. Returns ------- DataFrame New DataFrame with updated labels. See Also -------- Series.add_prefix: Prefix row label...
python/pyspark/pandas/frame.py
add_prefix
Flyangz/spark
python
def add_prefix(self, prefix: str) -> 'DataFrame': "\n Prefix labels with string `prefix`.\n\n For Series, the row labels are prefixed.\n For DataFrame, the column labels are prefixed.\n\n Parameters\n ----------\n prefix : str\n The string to add before each label...
def add_suffix(self, suffix: str) -> 'DataFrame': "\n Suffix labels with string `suffix`.\n\n For Series, the row labels are suffixed.\n For DataFrame, the column labels are suffixed.\n\n Parameters\n ----------\n suffix : str\n The string to add before each label...
4,785,632,729,825,555,000
Suffix labels with string `suffix`. For Series, the row labels are suffixed. For DataFrame, the column labels are suffixed. Parameters ---------- suffix : str The string to add before each label. Returns ------- DataFrame New DataFrame with updated labels. See Also -------- Series.add_prefix: Prefix row label...
python/pyspark/pandas/frame.py
add_suffix
Flyangz/spark
python
def add_suffix(self, suffix: str) -> 'DataFrame': "\n Suffix labels with string `suffix`.\n\n For Series, the row labels are suffixed.\n For DataFrame, the column labels are suffixed.\n\n Parameters\n ----------\n suffix : str\n The string to add before each label...
def describe(self, percentiles: Optional[List[float]]=None) -> 'DataFrame': "\n Generate descriptive statistics that summarize the central tendency,\n dispersion and shape of a dataset's distribution, excluding\n ``NaN`` values.\n\n Analyzes both numeric and object series, as well\n ...
-3,598,272,818,452,397,000
Generate descriptive statistics that summarize the central tendency, dispersion and shape of a dataset's distribution, excluding ``NaN`` values. Analyzes both numeric and object series, as well as ``DataFrame`` column sets of mixed data types. The output will vary depending on what is provided. Refer to the notes belo...
python/pyspark/pandas/frame.py
describe
Flyangz/spark
python
def describe(self, percentiles: Optional[List[float]]=None) -> 'DataFrame': "\n Generate descriptive statistics that summarize the central tendency,\n dispersion and shape of a dataset's distribution, excluding\n ``NaN`` values.\n\n Analyzes both numeric and object series, as well\n ...