title stringlengths 1 185 | diff stringlengths 0 32.2M | body stringlengths 0 123k ⌀ | url stringlengths 57 58 | created_at stringlengths 20 20 | closed_at stringlengths 20 20 | merged_at stringlengths 20 20 ⌀ | updated_at stringlengths 20 20 |
|---|---|---|---|---|---|---|---|
REF: IntervalIndex.equals defer to IntervalArray.equals | diff --git a/pandas/core/arrays/_mixins.py b/pandas/core/arrays/_mixins.py
index 4d13a18c8ef0b..95a003efbe1d0 100644
--- a/pandas/core/arrays/_mixins.py
+++ b/pandas/core/arrays/_mixins.py
@@ -8,7 +8,9 @@
from pandas.util._decorators import cache_readonly, doc
from pandas.util._validators import validate_fillna_kwargs
+from pandas.core.dtypes.common import is_dtype_equal
from pandas.core.dtypes.inference import is_array_like
+from pandas.core.dtypes.missing import array_equivalent
from pandas.core import missing
from pandas.core.algorithms import take, unique
@@ -115,6 +117,13 @@ def T(self: _T) -> _T:
# ------------------------------------------------------------------------
+ def equals(self, other) -> bool:
+ if type(self) is not type(other):
+ return False
+ if not is_dtype_equal(self.dtype, other.dtype):
+ return False
+ return bool(array_equivalent(self._ndarray, other._ndarray))
+
def _values_for_argsort(self):
return self._ndarray
diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py
index 5105b5b9cc57b..555d6e32c7b9d 100644
--- a/pandas/core/arrays/interval.py
+++ b/pandas/core/arrays/interval.py
@@ -696,6 +696,16 @@ def astype(self, dtype, copy=True):
msg = f"Cannot cast {type(self).__name__} to dtype {dtype}"
raise TypeError(msg) from err
+ def equals(self, other) -> bool:
+ if type(self) != type(other):
+ return False
+
+ return bool(
+ self.closed == other.closed
+ and self.left.equals(other.left)
+ and self.right.equals(other.right)
+ )
+
@classmethod
def _concat_same_type(cls, to_concat):
"""
diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py
index 8855d987af745..fc77876af41f1 100644
--- a/pandas/core/indexes/interval.py
+++ b/pandas/core/indexes/interval.py
@@ -983,19 +983,10 @@ def equals(self, other: object) -> bool:
if self.is_(other):
return True
- # if we can coerce to an IntervalIndex then we can compare
if not isinstance(other, IntervalIndex):
- if not is_interval_dtype(other):
- return False
- other = Index(other)
- if not isinstance(other, IntervalIndex):
- return False
+ return False
- return (
- self.left.equals(other.left)
- and self.right.equals(other.right)
- and self.closed == other.closed
- )
+ return self._data.equals(other._data)
# --------------------------------------------------------------------
# Set Operations
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
Small behavior change in that `interval_index.equals(series[interval])` will now return False, matching other Index subclasses. | https://api.github.com/repos/pandas-dev/pandas/pulls/36871 | 2020-10-04T23:34:49Z | 2020-10-06T22:52:57Z | 2020-10-06T22:52:57Z | 2020-10-06T23:10:30Z |
TST: xfailed arithmetic tests | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index a269580bc4453..f0840ac3d2aea 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -333,6 +333,7 @@ Numeric
- Bug in :class:`Series` where two :class:`Series` each have a :class:`DatetimeIndex` with different timezones having those indexes incorrectly changed when performing arithmetic operations (:issue:`33671`)
- Bug in :meth:`pd._testing.assert_almost_equal` was incorrect for complex numeric types (:issue:`28235`)
- Bug in :meth:`DataFrame.__rmatmul__` error handling reporting transposed shapes (:issue:`21581`)
+- Bug in :class:`IntegerArray` multiplication with ``timedelta`` and ``np.timedelta64`` objects (:issue:`36870`)
Conversion
^^^^^^^^^^
diff --git a/pandas/core/arrays/integer.py b/pandas/core/arrays/integer.py
index af521a8efacc7..354a519e23df6 100644
--- a/pandas/core/arrays/integer.py
+++ b/pandas/core/arrays/integer.py
@@ -1,10 +1,11 @@
+from datetime import timedelta
import numbers
from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Type, Union
import warnings
import numpy as np
-from pandas._libs import lib, missing as libmissing
+from pandas._libs import Timedelta, iNaT, lib, missing as libmissing
from pandas._typing import ArrayLike, DtypeObj
from pandas.compat import set_function_name
from pandas.compat.numpy import function as nv
@@ -581,6 +582,12 @@ def _maybe_mask_result(self, result, mask, other, op_name: str):
result[mask] = np.nan
return result
+ if result.dtype == "timedelta64[ns]":
+ from pandas.core.arrays import TimedeltaArray
+
+ result[mask] = iNaT
+ return TimedeltaArray._simple_new(result)
+
return type(self)(result, mask, copy=False)
@classmethod
@@ -609,6 +616,9 @@ def integer_arithmetic_method(self, other):
if not (is_float_dtype(other) or is_integer_dtype(other)):
raise TypeError("can only perform ops with numeric values")
+ elif isinstance(other, (timedelta, np.timedelta64)):
+ other = Timedelta(other)
+
else:
if not (is_float(other) or is_integer(other) or other is libmissing.NA):
raise TypeError("can only perform ops with numeric values")
diff --git a/pandas/core/arrays/numpy_.py b/pandas/core/arrays/numpy_.py
index 05139783456b9..c56cccf2e4a93 100644
--- a/pandas/core/arrays/numpy_.py
+++ b/pandas/core/arrays/numpy_.py
@@ -218,6 +218,16 @@ def __array_ufunc__(self, ufunc, method: str, *inputs, **kwargs):
if not isinstance(x, self._HANDLED_TYPES + (PandasArray,)):
return NotImplemented
+ if ufunc not in [np.logical_or, np.bitwise_or, np.bitwise_xor]:
+ # For binary ops, use our custom dunder methods
+ # We haven't implemented logical dunder funcs, so exclude these
+ # to avoid RecursionError
+ result = ops.maybe_dispatch_ufunc_to_dunder_op(
+ self, ufunc, method, *inputs, **kwargs
+ )
+ if result is not NotImplemented:
+ return result
+
# Defer to the implementation of the ufunc on unwrapped values.
inputs = tuple(x._ndarray if isinstance(x, PandasArray) else x for x in inputs)
if out:
@@ -377,19 +387,28 @@ def arithmetic_method(self, other):
if isinstance(a, np.ndarray):
# for e.g. op vs TimedeltaArray, we may already
# have an ExtensionArray, in which case we do not wrap
- return cls(a), cls(b)
+ return self._wrap_ndarray_result(a), self._wrap_ndarray_result(b)
return a, b
if isinstance(result, np.ndarray):
# for e.g. multiplication vs TimedeltaArray, we may already
# have an ExtensionArray, in which case we do not wrap
- return cls(result)
+ return self._wrap_ndarray_result(result)
return result
return compat.set_function_name(arithmetic_method, f"__{op.__name__}__", cls)
_create_comparison_method = _create_arithmetic_method
+ def _wrap_ndarray_result(self, result: np.ndarray):
+ # If we have timedelta64[ns] result, return a TimedeltaArray instead
+ # of a PandasArray
+ if result.dtype == "timedelta64[ns]":
+ from pandas.core.arrays import TimedeltaArray
+
+ return TimedeltaArray._simple_new(result)
+ return type(self)(result)
+
# ------------------------------------------------------------------------
# String methods interface
_str_na_value = np.nan
diff --git a/pandas/tests/arithmetic/common.py b/pandas/tests/arithmetic/common.py
index a663c2f3a0175..e26bb513838a5 100644
--- a/pandas/tests/arithmetic/common.py
+++ b/pandas/tests/arithmetic/common.py
@@ -6,6 +6,7 @@
from pandas import DataFrame, Index, Series, array as pd_array
import pandas._testing as tm
+from pandas.core.arrays import PandasArray
def assert_invalid_addsub_type(left, right, msg=None):
@@ -56,18 +57,25 @@ def assert_invalid_comparison(left, right, box):
# Note: not quite the same as how we do this for tm.box_expected
xbox = box if box not in [Index, pd_array] else np.array
- result = left == right
+ def xbox2(x):
+ # Eventually we'd like this to be tighter, but for now we'll
+ # just exclude PandasArray[bool]
+ if isinstance(x, PandasArray):
+ return x._ndarray
+ return x
+
+ result = xbox2(left == right)
expected = xbox(np.zeros(result.shape, dtype=np.bool_))
tm.assert_equal(result, expected)
- result = right == left
+ result = xbox2(right == left)
tm.assert_equal(result, expected)
- result = left != right
+ result = xbox2(left != right)
tm.assert_equal(result, ~expected)
- result = right != left
+ result = xbox2(right != left)
tm.assert_equal(result, ~expected)
msg = "|".join(
diff --git a/pandas/tests/arithmetic/conftest.py b/pandas/tests/arithmetic/conftest.py
index 6286711ac6113..3f161b46b34b1 100644
--- a/pandas/tests/arithmetic/conftest.py
+++ b/pandas/tests/arithmetic/conftest.py
@@ -221,15 +221,6 @@ def mismatched_freq(request):
# ------------------------------------------------------------------
-@pytest.fixture(params=[pd.Index, pd.Series, pd.DataFrame], ids=id_func)
-def box(request):
- """
- Several array-like containers that should have effectively identical
- behavior with respect to arithmetic operations.
- """
- return request.param
-
-
@pytest.fixture(params=[pd.Index, pd.Series, pd.DataFrame, pd.array], ids=id_func)
def box_with_array(request):
"""
diff --git a/pandas/tests/arithmetic/test_datetime64.py b/pandas/tests/arithmetic/test_datetime64.py
index e9dc83d106651..c0ae36017f47a 100644
--- a/pandas/tests/arithmetic/test_datetime64.py
+++ b/pandas/tests/arithmetic/test_datetime64.py
@@ -205,8 +205,6 @@ def test_nat_comparisons(self, dtype, index_or_series, reverse, pair):
def test_comparison_invalid(self, tz_naive_fixture, box_with_array):
# GH#4968
# invalid date/int comparisons
- if box_with_array is pd.array:
- pytest.xfail("assert_invalid_comparison doesnt handle BooleanArray yet")
tz = tz_naive_fixture
ser = Series(range(5))
ser2 = Series(pd.date_range("20010101", periods=5, tz=tz))
@@ -226,32 +224,36 @@ def test_comparison_invalid(self, tz_naive_fixture, box_with_array):
)
@pytest.mark.parametrize("dtype", [None, object])
def test_nat_comparisons_scalar(self, dtype, data, box_with_array):
+ box = box_with_array
if box_with_array is tm.to_array and dtype is object:
# dont bother testing ndarray comparison methods as this fails
# on older numpys (since they check object identity)
return
- if box_with_array is pd.array and dtype is object:
- pytest.xfail("reversed comparisons give BooleanArray, not ndarray")
- xbox = (
- box_with_array if box_with_array not in [pd.Index, pd.array] else np.ndarray
- )
+ xbox = box if box not in [pd.Index, pd.array] else np.ndarray
left = Series(data, dtype=dtype)
- left = tm.box_expected(left, box_with_array)
+ left = tm.box_expected(left, box)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
+ if box is pd.array and dtype is object:
+ expected = pd.array(expected, dtype="bool")
+
tm.assert_equal(left == NaT, expected)
tm.assert_equal(NaT == left, expected)
expected = [True, True, True]
expected = tm.box_expected(expected, xbox)
+ if box is pd.array and dtype is object:
+ expected = pd.array(expected, dtype="bool")
tm.assert_equal(left != NaT, expected)
tm.assert_equal(NaT != left, expected)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
+ if box is pd.array and dtype is object:
+ expected = pd.array(expected, dtype="bool")
tm.assert_equal(left < NaT, expected)
tm.assert_equal(NaT > left, expected)
tm.assert_equal(left <= NaT, expected)
diff --git a/pandas/tests/arithmetic/test_numeric.py b/pandas/tests/arithmetic/test_numeric.py
index df98b43e11f4a..d6ece84d0a329 100644
--- a/pandas/tests/arithmetic/test_numeric.py
+++ b/pandas/tests/arithmetic/test_numeric.py
@@ -187,10 +187,6 @@ def test_ops_series(self):
def test_numeric_arr_mul_tdscalar(self, scalar_td, numeric_idx, box_with_array):
# GH#19333
box = box_with_array
- if box is pd.array:
- pytest.xfail(
- "we get a PandasArray[timedelta64[ns]] instead of TimedeltaArray"
- )
index = numeric_idx
expected = pd.TimedeltaIndex([pd.Timedelta(days=n) for n in range(5)])
@@ -214,8 +210,6 @@ def test_numeric_arr_mul_tdscalar(self, scalar_td, numeric_idx, box_with_array):
)
def test_numeric_arr_mul_tdscalar_numexpr_path(self, scalar_td, box_with_array):
box = box_with_array
- if box is pd.array:
- pytest.xfail("IntegerArray.__mul__ doesnt handle timedeltas")
arr = np.arange(2 * 10 ** 4).astype(np.int64)
obj = tm.box_expected(arr, box, transpose=False)
@@ -231,8 +225,6 @@ def test_numeric_arr_mul_tdscalar_numexpr_path(self, scalar_td, box_with_array):
def test_numeric_arr_rdiv_tdscalar(self, three_days, numeric_idx, box_with_array):
box = box_with_array
- if box is pd.array:
- pytest.xfail("We get PandasArray[td64] instead of TimedeltaArray")
index = numeric_idx[1:3]
@@ -263,8 +255,6 @@ def test_numeric_arr_rdiv_tdscalar(self, three_days, numeric_idx, box_with_array
)
def test_add_sub_timedeltalike_invalid(self, numeric_idx, other, box_with_array):
box = box_with_array
- if box is pd.array:
- pytest.xfail("PandasArray[int].__add__ doesnt raise on td64")
left = tm.box_expected(numeric_idx, box)
msg = (
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/36870 | 2020-10-04T22:56:03Z | 2020-10-06T22:06:03Z | 2020-10-06T22:06:03Z | 2020-10-06T22:07:35Z |
DOC: Distinguish between different types of boolean indexing #10492 | diff --git a/doc/source/user_guide/indexing.rst b/doc/source/user_guide/indexing.rst
index 530fdfba7d12c..2bc7e13e39ec4 100644
--- a/doc/source/user_guide/indexing.rst
+++ b/doc/source/user_guide/indexing.rst
@@ -933,6 +933,24 @@ and :ref:`Advanced Indexing <advanced>` you may select along more than one axis
df2.loc[criterion & (df2['b'] == 'x'), 'b':'c']
+.. warning::
+
+ ``iloc`` supports two kinds of boolean indexing. If the indexer is a boolean ``Series``,
+ an error will be raised. For instance, in the following example, ``df.iloc[s.values, 1]`` is ok.
+ The boolean indexer is an array. But ``df.iloc[s, 1]`` would raise ``ValueError``.
+
+ .. ipython:: python
+
+ df = pd.DataFrame([[1, 2], [3, 4], [5, 6]],
+ index=list('abc'),
+ columns=['A', 'B'])
+ s = (df['A'] > 2)
+ s
+
+ df.loc[s, 'B']
+
+ df.iloc[s.values, 1]
+
.. _indexing.basics.indexing_isin:
Indexing with isin
| - [x] closes #10492
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
| https://api.github.com/repos/pandas-dev/pandas/pulls/36869 | 2020-10-04T21:00:55Z | 2020-10-08T01:23:14Z | 2020-10-08T01:23:14Z | 2020-10-15T06:09:22Z |
BUG: df.replace over pd.Period columns (#34871) | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index 28f7df98cb86b..d56f3c978384c 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -562,6 +562,8 @@ Strings
Interval
^^^^^^^^
+
+- Bug in :meth:`DataFrame.replace` and :meth:`Series.replace` where :class:`Interval` dtypes would be converted to object dtypes (:issue:`34871`)
- Bug in :meth:`IntervalIndex.take` with negative indices and ``fill_value=None`` (:issue:`37330`)
-
-
@@ -626,6 +628,11 @@ I/O
- :meth:`to_excel` and :meth:`to_markdown` support writing to fsspec URLs such as S3 and Google Cloud Storage (:issue:`33987`)
- Bug in :meth:`read_fw` was not skipping blank lines (even with ``skip_blank_lines=True``) (:issue:`37758`)
+Period
+^^^^^^
+
+- Bug in :meth:`DataFrame.replace` and :meth:`Series.replace` where :class:`Period` dtypes would be converted to object dtypes (:issue:`34871`)
+
Plotting
^^^^^^^^
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index 967e218078a28..f1759fbda0b8c 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -2041,6 +2041,16 @@ class ObjectValuesExtensionBlock(ExtensionBlock):
def external_values(self):
return self.values.astype(object)
+ def _can_hold_element(self, element: Any) -> bool:
+ if is_valid_nat_for_dtype(element, self.dtype):
+ return True
+ if isinstance(element, list) and len(element) == 0:
+ return True
+ tipo = maybe_infer_dtype_type(element)
+ if tipo is not None:
+ return issubclass(tipo.type, self.dtype.type)
+ return isinstance(element, self.dtype.type)
+
class NumericBlock(Block):
__slots__ = ()
diff --git a/pandas/tests/frame/methods/test_replace.py b/pandas/tests/frame/methods/test_replace.py
index 8f3dcc96ddc3d..8e59dd959ab57 100644
--- a/pandas/tests/frame/methods/test_replace.py
+++ b/pandas/tests/frame/methods/test_replace.py
@@ -1523,12 +1523,10 @@ def test_replace_with_duplicate_columns(self, replacement):
tm.assert_frame_equal(result, expected)
- def test_replace_period_ignore_float(self, frame_or_series):
- """
- Regression test for GH#34871: if df.replace(1.0, 0.0) is called on a df
- with a Period column the old, faulty behavior is to raise TypeError.
- """
- obj = DataFrame({"Per": [pd.Period("2020-01")] * 3})
+ @pytest.mark.parametrize("value", [pd.Period("2020-01"), pd.Interval(0, 5)])
+ def test_replace_ea_ignore_float(self, frame_or_series, value):
+ # GH#34871
+ obj = DataFrame({"Per": [value] * 3})
if frame_or_series is not DataFrame:
obj = obj["Per"]
| - [x] closes #34871
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
This commit ensures that PeriodArrays return False for _can_hold_element for any element that is not a pd.Period. This prevents upstream code from casting the dtype to object. Also un-xfail test written in #34904 | https://api.github.com/repos/pandas-dev/pandas/pulls/36867 | 2020-10-04T19:01:03Z | 2020-11-17T01:17:29Z | 2020-11-17T01:17:29Z | 2020-11-17T01:31:42Z |
TST: insert 'match' to bare pytest raises in pandas/tests/tseries/off… | diff --git a/pandas/tests/tseries/offsets/test_offsets.py b/pandas/tests/tseries/offsets/test_offsets.py
index 35fef0637dc76..6a87c05384689 100644
--- a/pandas/tests/tseries/offsets/test_offsets.py
+++ b/pandas/tests/tseries/offsets/test_offsets.py
@@ -266,9 +266,10 @@ class TestCommon(Base):
def test_immutable(self, offset_types):
# GH#21341 check that __setattr__ raises
offset = self._get_offset(offset_types)
- with pytest.raises(AttributeError):
+ msg = "objects is not writable|DateOffset objects are immutable"
+ with pytest.raises(AttributeError, match=msg):
offset.normalize = True
- with pytest.raises(AttributeError):
+ with pytest.raises(AttributeError, match=msg):
offset.n = 91
def test_return_type(self, offset_types):
@@ -2328,11 +2329,14 @@ def setup_method(self, method):
def test_constructor_errors(self):
from datetime import time as dt_time
- with pytest.raises(ValueError):
+ msg = "time data must be specified only with hour and minute"
+ with pytest.raises(ValueError, match=msg):
CustomBusinessHour(start=dt_time(11, 0, 5))
- with pytest.raises(ValueError):
+ msg = "time data must match '%H:%M' format"
+ with pytest.raises(ValueError, match=msg):
CustomBusinessHour(start="AAA")
- with pytest.raises(ValueError):
+ msg = "time data must match '%H:%M' format"
+ with pytest.raises(ValueError, match=msg):
CustomBusinessHour(start="14:00:05")
def test_different_normalize_equals(self):
@@ -3195,7 +3199,7 @@ def test_repr(self):
assert repr(Week(n=-2, weekday=0)) == "<-2 * Weeks: weekday=0>"
def test_corner(self):
- with pytest.raises(ValueError):
+ with pytest.raises(ValueError, match="Day must be"):
Week(weekday=7)
with pytest.raises(ValueError, match="Day must be"):
@@ -4315,7 +4319,8 @@ def test_valid_month_attributes(kwd, month_classes):
# GH#18226
cls = month_classes
# check that we cannot create e.g. MonthEnd(weeks=3)
- with pytest.raises(TypeError):
+ msg = rf"__init__\(\) got an unexpected keyword argument '{kwd}'"
+ with pytest.raises(TypeError, match=msg):
cls(**{kwd: 3})
@@ -4338,24 +4343,25 @@ def test_valid_tick_attributes(kwd, tick_classes):
# GH#18226
cls = tick_classes
# check that we cannot create e.g. Hour(weeks=3)
- with pytest.raises(TypeError):
+ msg = rf"__init__\(\) got an unexpected keyword argument '{kwd}'"
+ with pytest.raises(TypeError, match=msg):
cls(**{kwd: 3})
def test_validate_n_error():
- with pytest.raises(TypeError):
+ with pytest.raises(TypeError, match="argument must be an integer"):
DateOffset(n="Doh!")
- with pytest.raises(TypeError):
+ with pytest.raises(TypeError, match="argument must be an integer"):
MonthBegin(n=timedelta(1))
- with pytest.raises(TypeError):
+ with pytest.raises(TypeError, match="argument must be an integer"):
BDay(n=np.array([1, 2], dtype=np.int64))
def test_require_integers(offset_types):
cls = offset_types
- with pytest.raises(ValueError):
+ with pytest.raises(ValueError, match="argument must be an integer"):
cls(n=1.5)
@@ -4363,7 +4369,8 @@ def test_tick_normalize_raises(tick_classes):
# check that trying to create a Tick object with normalize=True raises
# GH#21427
cls = tick_classes
- with pytest.raises(ValueError):
+ msg = "Tick offset with `normalize=True` are not allowed."
+ with pytest.raises(ValueError, match=msg):
cls(n=3, normalize=True)
| …sets/test_offsets.py
- [ ] ref https://github.com/pandas-dev/pandas/issues/30999
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/36865 | 2020-10-04T18:27:10Z | 2020-10-11T19:21:27Z | 2020-10-11T19:21:27Z | 2020-10-11T19:21:32Z |
CI: Update error message for np_dev | diff --git a/pandas/tests/series/indexing/test_indexing.py b/pandas/tests/series/indexing/test_indexing.py
index 1fafdf00393e1..fbdac2bb2d8e8 100644
--- a/pandas/tests/series/indexing/test_indexing.py
+++ b/pandas/tests/series/indexing/test_indexing.py
@@ -367,14 +367,17 @@ def test_2d_to_1d_assignment_raises():
x = np.random.randn(2, 2)
y = pd.Series(range(2))
- msg = (
- r"shape mismatch: value array of shape \(2,2\) could not be "
- r"broadcast to indexing result of shape \(2,\)"
+ msg = "|".join(
+ [
+ r"shape mismatch: value array of shape \(2,2\) could not be "
+ r"broadcast to indexing result of shape \(2,\)",
+ r"cannot reshape array of size 4 into shape \(2,\)",
+ ]
)
with pytest.raises(ValueError, match=msg):
y.loc[range(2)] = x
- msg = r"could not broadcast input array from shape \(2,2\) into shape \(2\)"
+ msg = r"could not broadcast input array from shape \(2,2\) into shape \(2,?\)"
with pytest.raises(ValueError, match=msg):
y.loc[:] = x
| https://dev.azure.com/pandas-dev/pandas/_build/results?buildId=44153&view=logs&jobId=eab14f69-13b6-5db7-daeb-7b778629410b&j=eab14f69-13b6-5db7-daeb-7b778629410b&t=ce687173-08c6-5301-838d-71b2dda24510 | https://api.github.com/repos/pandas-dev/pandas/pulls/36864 | 2020-10-04T18:20:23Z | 2020-10-04T22:41:02Z | 2020-10-04T22:41:02Z | 2020-10-05T10:47:02Z |
CI: Show ipython directive errors | diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 149acef72db26..46de8d466dd11 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -125,7 +125,7 @@ jobs:
# This can be removed when the ipython directive fails when there are errors,
# including the `tee sphinx.log` in te previous step (https://github.com/ipython/ipython/issues/11547)
- name: Check ipython directive errors
- run: "! grep -B1 \"^<<<-------------------------------------------------------------------------$\" sphinx.log"
+ run: "! grep -B10 \"^<<<-------------------------------------------------------------------------$\" sphinx.log"
- name: Install ssh key
run: |
| Current output of ipython directive errors in CI is essentially a blank line, so adding a bit more to try to show what error is happening.
```
(pandas-dev) ➜ ~ grep -B1 "^<<<-------------------------------------------------------------------------$" sphinx.log
<<<-------------------------------------------------------------------------
```
vs.
```
(pandas-dev) ➜ ~ grep -B10 "^<<<-------------------------------------------------------------------------$" sphinx.log
>>>-------------------------------------------------------------------------
Exception in /Users/danielsaxton/pandas/doc/source/user_guide/enhancingperf.rst at block ending on line 834
Specify :okexcept: as an option in the ipython:: block to suppress this message
File "<ipython-input-66-d3ab2b9618fd>", line 1
df.query("strings == "a" and nums == 1")
^
SyntaxError: invalid syntax
<<<-------------------------------------------------------------------------
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/36863 | 2020-10-04T17:29:04Z | 2020-10-06T00:06:59Z | 2020-10-06T00:06:59Z | 2020-10-06T00:07:04Z |
DOC: make return type documentation of series methods consistent #35409 | diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index 1a8861af10ed1..8dc5e6c0ff2aa 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -723,8 +723,8 @@ def as_ordered(self, inplace=False):
Returns
-------
- Categorical
- Ordered Categorical.
+ Categorical or None
+ Ordered Categorical or None if ``inplace=True``.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
return self.set_ordered(True, inplace=inplace)
@@ -741,8 +741,8 @@ def as_unordered(self, inplace=False):
Returns
-------
- Categorical
- Unordered Categorical.
+ Categorical or None
+ Unordered Categorical or None if ``inplace=True``.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
return self.set_ordered(False, inplace=inplace)
@@ -848,8 +848,7 @@ def rename_categories(self, new_categories, inplace=False):
Returns
-------
cat : Categorical or None
- With ``inplace=False``, the new categorical is returned.
- With ``inplace=True``, there is no return value.
+ Categorical with removed categories or None if ``inplace=True``.
Raises
------
@@ -917,7 +916,8 @@ def reorder_categories(self, new_categories, ordered=None, inplace=False):
Returns
-------
- cat : Categorical with reordered categories or None if inplace.
+ cat : Categorical or None
+ Categorical with removed categories or None if ``inplace=True``.
Raises
------
@@ -957,7 +957,8 @@ def add_categories(self, new_categories, inplace=False):
Returns
-------
- cat : Categorical with new categories added or None if inplace.
+ cat : Categorical or None
+ Categorical with new categories added or None if ``inplace=True``.
Raises
------
@@ -1007,7 +1008,8 @@ def remove_categories(self, removals, inplace=False):
Returns
-------
- cat : Categorical with removed categories or None if inplace.
+ cat : Categorical or None
+ Categorical with removed categories or None if ``inplace=True``.
Raises
------
@@ -1054,7 +1056,8 @@ def remove_unused_categories(self, inplace=False):
Returns
-------
- cat : Categorical with unused categories dropped or None if inplace.
+ cat : Categorical or None
+ Categorical with unused categories dropped or None if ``inplace=True``.
See Also
--------
diff --git a/pandas/core/computation/eval.py b/pandas/core/computation/eval.py
index 913f135b449f3..b77204861f0a4 100644
--- a/pandas/core/computation/eval.py
+++ b/pandas/core/computation/eval.py
@@ -242,7 +242,8 @@ def eval(
Returns
-------
- ndarray, numeric scalar, DataFrame, Series
+ ndarray, numeric scalar, DataFrame, Series, or None
+ The completion value of evaluating the given code or None if ``inplace=True``.
Raises
------
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 1f9987d9d3f5b..8cddad2df0746 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -3251,8 +3251,9 @@ def query(self, expr, inplace=False, **kwargs):
Returns
-------
- DataFrame
- DataFrame resulting from the provided query expression.
+ DataFrame or None
+ DataFrame resulting from the provided query expression or
+ None if ``inplace=True``.
See Also
--------
@@ -3399,8 +3400,8 @@ def eval(self, expr, inplace=False, **kwargs):
Returns
-------
- ndarray, scalar, or pandas object
- The result of the evaluation.
+ ndarray, scalar, pandas object, or None
+ The result of the evaluation or None if ``inplace=True``.
See Also
--------
@@ -4117,8 +4118,9 @@ def drop(
Returns
-------
- DataFrame
- DataFrame without the removed index or column labels.
+ DataFrame or None
+ DataFrame without the removed index or column labels or
+ None if ``inplace=True``.
Raises
------
@@ -4272,8 +4274,8 @@ def rename(
Returns
-------
- DataFrame
- DataFrame with the renamed axis labels.
+ DataFrame or None
+ DataFrame with the renamed axis labels or None if ``inplace=True``.
Raises
------
@@ -4501,8 +4503,8 @@ def set_index(
Returns
-------
- DataFrame
- Changed row labels.
+ DataFrame or None
+ Changed row labels or None if ``inplace=True``.
See Also
--------
@@ -4964,8 +4966,8 @@ def dropna(self, axis=0, how="any", thresh=None, subset=None, inplace=False):
Returns
-------
- DataFrame
- DataFrame with NA entries dropped from it.
+ DataFrame or None
+ DataFrame with NA entries dropped from it or None if ``inplace=True``.
See Also
--------
@@ -5100,7 +5102,7 @@ def drop_duplicates(
Returns
-------
- DataFrame
+ DataFrame or None
DataFrame with duplicates removed or None if ``inplace=True``.
See Also
@@ -5423,8 +5425,8 @@ def sort_index(
Returns
-------
- DataFrame
- The original DataFrame sorted by the labels.
+ DataFrame or None
+ The original DataFrame sorted by the labels or None if ``inplace=True``.
See Also
--------
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 04e1fc91c5fd4..93ba035b58376 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -649,7 +649,7 @@ def set_axis(self, labels, axis: Axis = 0, inplace: bool = False):
Returns
-------
renamed : %(klass)s or None
- An object of type %(klass)s if inplace=False, None otherwise.
+ An object of type %(klass)s or None if ``inplace=True``.
See Also
--------
@@ -1096,7 +1096,7 @@ def rename_axis(self, mapper=lib.no_default, **kwargs):
Returns
-------
Series, DataFrame, or None
- The same type as the caller or None if `inplace` is True.
+ The same type as the caller or None if ``inplace=True``.
See Also
--------
@@ -4326,7 +4326,7 @@ def sort_values(
Returns
-------
DataFrame or None
- DataFrame with sorted values if inplace=False, None otherwise.
+ DataFrame with sorted values or None if ``inplace=True``.
See Also
--------
@@ -6461,8 +6461,8 @@ def replace(
Returns
-------
- {klass}
- Object after replacement.
+ {klass} or None
+ Object after replacement or None if ``inplace=True``.
Raises
------
@@ -6896,9 +6896,9 @@ def interpolate(
Returns
-------
- Series or DataFrame
+ Series or DataFrame or None
Returns the same object type as the caller, interpolated at
- some or all ``NaN`` values.
+ some or all ``NaN`` values or None if ``inplace=True``.
See Also
--------
@@ -7494,9 +7494,9 @@ def clip(
Returns
-------
- Series or DataFrame
+ Series or DataFrame or None
Same type as calling object with the values outside the
- clip boundaries replaced.
+ clip boundaries replaced or None if ``inplace=True``.
See Also
--------
@@ -9050,7 +9050,7 @@ def where(
Returns
-------
- Same type as caller
+ Same type as caller or None if ``inplace=True``.
See Also
--------
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index ff3d8bf05f9a5..84c33f9d0ea4a 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -1345,8 +1345,8 @@ def set_names(self, names, level=None, inplace: bool = False):
Returns
-------
- Index
- The same type as the caller or None if inplace is True.
+ Index or None
+ The same type as the caller or None if ``inplace=True``.
See Also
--------
@@ -1421,8 +1421,8 @@ def rename(self, name, inplace=False):
Returns
-------
- Index
- The same type as the caller or None if inplace is True.
+ Index or None
+ The same type as the caller or None if ``inplace=True``.
See Also
--------
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index a157fdfdde447..c81175c577cc4 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -782,7 +782,8 @@ def set_levels(self, levels, level=None, inplace=None, verify_integrity=True):
Returns
-------
- new index (of same type and class...etc)
+ new index (of same type and class...etc) or None
+ The same type as the caller or None if ``inplace=True``.
Examples
--------
@@ -961,7 +962,8 @@ def set_codes(self, codes, level=None, inplace=None, verify_integrity=True):
Returns
-------
- new index (of same type and class...etc)
+ new index (of same type and class...etc) or None
+ The same type as the caller or None if ``inplace=True``.
Examples
--------
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 2b972d33d7cdd..41d88ba4c705c 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -1192,7 +1192,7 @@ def reset_index(self, level=None, drop=False, name=None, inplace=False):
Returns
-------
- Series or DataFrame
+ Series or DataFrame or None
When `drop` is False (the default), a DataFrame is returned.
The newly created columns will come first in the DataFrame,
followed by the original Series values.
@@ -1911,8 +1911,8 @@ def drop_duplicates(self, keep="first", inplace=False) -> Optional["Series"]:
Returns
-------
- Series
- Series with duplicates dropped.
+ Series or None
+ Series with duplicates dropped or None if ``inplace=True``.
See Also
--------
@@ -3129,8 +3129,8 @@ def sort_values(
Returns
-------
- Series
- Series ordered by values.
+ Series or None
+ Series ordered by values or None if ``inplace=True``.
See Also
--------
@@ -3375,8 +3375,8 @@ def sort_index(
Returns
-------
- Series
- The original Series sorted by the labels.
+ Series or None
+ The original Series sorted by the labels or None if ``inplace=True``.
See Also
--------
@@ -4304,8 +4304,8 @@ def rename(
Returns
-------
- Series
- Series with index labels or name altered.
+ Series or None
+ Series with index labels or name altered or None if ``inplace=True``.
See Also
--------
@@ -4418,8 +4418,8 @@ def drop(
Returns
-------
- Series
- Series with specified index labels removed.
+ Series or None
+ Series with specified index labels removed or None if ``inplace=True``.
Raises
------
@@ -4807,8 +4807,8 @@ def dropna(self, axis=0, inplace=False, how=None):
Returns
-------
- Series
- Series with NA entries dropped from it.
+ Series or None
+ Series with NA entries dropped from it or None if ``inplace=True``.
See Also
--------
| - [x] closes #35409
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` | https://api.github.com/repos/pandas-dev/pandas/pulls/36862 | 2020-10-04T16:49:03Z | 2020-10-10T19:41:57Z | 2020-10-10T19:41:57Z | 2020-10-15T06:09:08Z |
REF/CLN: pandas/io/pytables.py | diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index a3d6975c00a95..3e3330fa4378f 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -2,12 +2,24 @@
High level interface to PyTables for reading and writing pandas data structures
to disk
"""
+from contextlib import suppress
import copy
from datetime import date, tzinfo
import itertools
import os
import re
-from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Type, Union
+from textwrap import dedent
+from typing import (
+ TYPE_CHECKING,
+ Any,
+ Dict,
+ List,
+ Optional,
+ Sequence,
+ Tuple,
+ Type,
+ Union,
+)
import warnings
import numpy as np
@@ -202,12 +214,10 @@ def _tables():
# set the file open policy
# return the file open policy; this changes as of pytables 3.1
# depending on the HDF5 version
- try:
+ with suppress(AttributeError):
_table_file_open_policy_is_strict = (
tables.file._FILE_OPEN_POLICY == "strict"
)
- except AttributeError:
- pass
return _table_mod
@@ -423,10 +433,8 @@ def read_hdf(
except (ValueError, TypeError, KeyError):
if not isinstance(path_or_buf, HDFStore):
# if there is an error, close the store if we opened it.
- try:
+ with suppress(AttributeError):
store.close()
- except AttributeError:
- pass
raise
@@ -667,12 +675,10 @@ def open(self, mode: str = "a", **kwargs):
tables = _tables()
if self._mode != mode:
-
# if we are changing a write mode to read, ok
if self._mode in ["a", "w"] and mode in ["r", "r+"]:
pass
elif mode in ["w"]:
-
# this would truncate, raise here
if self.is_open:
raise PossibleDataLossError(
@@ -691,41 +697,14 @@ def open(self, mode: str = "a", **kwargs):
self._complevel, self._complib, fletcher32=self._fletcher32
)
- try:
- self._handle = tables.open_file(self._path, self._mode, **kwargs)
- except OSError as err: # pragma: no cover
- if "can not be written" in str(err):
- print(f"Opening {self._path} in read-only mode")
- self._handle = tables.open_file(self._path, "r", **kwargs)
- else:
- raise
-
- except ValueError as err:
-
- # trap PyTables >= 3.1 FILE_OPEN_POLICY exception
- # to provide an updated message
- if "FILE_OPEN_POLICY" in str(err):
- hdf_version = tables.get_hdf5_version()
- err = ValueError(
- f"PyTables [{tables.__version__}] no longer supports "
- "opening multiple files\n"
- "even in read-only mode on this HDF5 version "
- f"[{hdf_version}]. You can accept this\n"
- "and not open the same file multiple times at once,\n"
- "upgrade the HDF5 version, or downgrade to PyTables 3.0.0 "
- "which allows\n"
- "files to be opened multiple times at once\n"
- )
-
- raise err
-
- except Exception as err:
+ if _table_file_open_policy_is_strict and self.is_open:
+ msg = (
+ "Cannot open HDF5 file, which is already opened, "
+ "even in read-only mode."
+ )
+ raise ValueError(msg)
- # trying to read from a non-existent file causes an error which
- # is not part of IOError, make it one
- if self._mode == "r" and "Unable to open/create file" in str(err):
- raise OSError(str(err)) from err
- raise
+ self._handle = tables.open_file(self._path, self._mode, **kwargs)
def close(self):
"""
@@ -763,10 +742,8 @@ def flush(self, fsync: bool = False):
if self._handle is not None:
self._handle.flush()
if fsync:
- try:
+ with suppress(OSError):
os.fsync(self._handle.fileno())
- except OSError:
- pass
def get(self, key: str):
"""
@@ -814,20 +791,20 @@ def select(
Parameters
----------
key : str
- Object being retrieved from file.
- where : list, default None
- List of Term (or convertible) objects, optional.
- start : int, default None
- Row number to start selection.
+ Object being retrieved from file.
+ where : list or None
+ List of Term (or convertible) objects, optional.
+ start : int or None
+ Row number to start selection.
stop : int, default None
- Row number to stop selection.
- columns : list, default None
- A list of columns that if not None, will limit the return columns.
- iterator : bool, default False
- Returns an iterator.
- chunksize : int, default None
- Number or rows to include in iteration, return an iterator.
- auto_close : bool, default False
+ Row number to stop selection.
+ columns : list or None
+ A list of columns that if not None, will limit the return columns.
+ iterator : bool or False
+ Returns an iterator.
+ chunksize : int or None
+ Number or rows to include in iteration, return an iterator.
+ auto_close : bool or False
Should automatically close the store when finished.
Returns
@@ -1090,17 +1067,14 @@ def put(
Table format. Write as a PyTables Table structure which may perform
worse but allow more flexible operations like searching / selecting
subsets of the data.
- append : bool, default False
- This will force Table format, append the input data to the
- existing.
+ append : bool, default False
+ This will force Table format, append the input data to the existing.
data_columns : list, default None
- List of columns to create as data columns, or True to
- use all columns. See `here
+ List of columns to create as data columns, or True to use all columns.
+ See `here
<https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html#query-via-data-columns>`__.
encoding : str, default None
Provide an encoding for strings.
- dropna : bool, default False, do not write an ALL nan row to
- The store settable by the option 'io.hdf.dropna_table'.
track_times : bool, default True
Parameter is propagated to 'create_table' method of 'PyTables'.
If set to False it enables to have the same h5 files (same hashes)
@@ -1521,11 +1495,12 @@ def copy(
Parameters
----------
- propindexes: bool, default True
+ propindexes : bool, default True
Restore indexes in copied file.
- keys : list of keys to include in the copy (defaults to all)
- overwrite : overwrite (remove and replace) existing nodes in the
- new store (default is True)
+ keys : list, optional
+ List of keys to include in the copy (defaults to all).
+ overwrite : bool, default True
+ Whether to overwrite (remove and replace) existing nodes in the new store.
mode, complib, complevel, fletcher32 same as in HDFStore.__init__
Returns
@@ -1648,7 +1623,6 @@ def error(t):
# infer the pt from the passed value
if pt is None:
if value is None:
-
_tables()
assert _table_mod is not None # for mypy
if getattr(group, "table", None) or isinstance(
@@ -1680,10 +1654,8 @@ def error(t):
# existing node (and must be a table)
if tt is None:
-
# if we are a writer, determine the tt
if value is not None:
-
if pt == "series_table":
index = getattr(value, "index", None)
if index is not None:
@@ -1735,38 +1707,12 @@ def _write_to_group(
errors: str = "strict",
track_times: bool = True,
):
- group = self.get_node(key)
-
- # we make this assertion for mypy; the get_node call will already
- # have raised if this is incorrect
- assert self._handle is not None
-
- # remove the node if we are not appending
- if group is not None and not append:
- self._handle.remove_node(group, recursive=True)
- group = None
-
# we don't want to store a table node at all if our object is 0-len
# as there are not dtypes
if getattr(value, "empty", None) and (format == "table" or append):
return
- if group is None:
- paths = key.split("/")
-
- # recursively create the groups
- path = "/"
- for p in paths:
- if not len(p):
- continue
- new_path = path
- if not path.endswith("/"):
- new_path += "/"
- new_path += p
- group = self.get_node(new_path)
- if group is None:
- group = self._handle.create_group(path, p)
- path = new_path
+ group = self._identify_group(key, append)
s = self._create_storer(group, format, value, encoding=encoding, errors=errors)
if append:
@@ -1807,6 +1753,45 @@ def _read_group(self, group: "Node"):
s.infer_axes()
return s.read()
+ def _identify_group(self, key: str, append: bool) -> "Node":
+ """Identify HDF5 group based on key, delete/create group if needed."""
+ group = self.get_node(key)
+
+ # we make this assertion for mypy; the get_node call will already
+ # have raised if this is incorrect
+ assert self._handle is not None
+
+ # remove the node if we are not appending
+ if group is not None and not append:
+ self._handle.remove_node(group, recursive=True)
+ group = None
+
+ if group is None:
+ group = self._create_nodes_and_group(key)
+
+ return group
+
+ def _create_nodes_and_group(self, key: str) -> "Node":
+ """Create nodes from key and return group name."""
+ # assertion for mypy
+ assert self._handle is not None
+
+ paths = key.split("/")
+ # recursively create the groups
+ path = "/"
+ for p in paths:
+ if not len(p):
+ continue
+ new_path = path
+ if not path.endswith("/"):
+ new_path += "/"
+ new_path += p
+ group = self.get_node(new_path)
+ if group is None:
+ group = self._handle.create_group(path, p)
+ path = new_path
+ return group
+
class TableIterator:
"""
@@ -1875,11 +1860,9 @@ def __init__(
self.auto_close = auto_close
def __iter__(self):
-
# iterate
current = self.start
while current < self.stop:
-
stop = min(current + self.chunksize, self.stop)
value = self.func(None, None, self.coordinates[current:stop])
current = stop
@@ -1895,7 +1878,6 @@ def close(self):
self.store.close()
def get_result(self, coordinates: bool = False):
-
# return the actual iterator
if self.chunksize is not None:
if not isinstance(self.s, Table):
@@ -2094,7 +2076,6 @@ def maybe_set_size(self, min_itemsize=None):
with an integer size
"""
if _ensure_decoded(self.kind) == "string":
-
if isinstance(min_itemsize, dict):
min_itemsize = min_itemsize.get(self.name)
@@ -2152,7 +2133,6 @@ def update_info(self, info):
existing_value = idx.get(key)
if key in idx and value is not None and existing_value != value:
-
# frequency/name just warn
if key in ["freq", "index_name"]:
ws = attribute_conflict_doc % (key, existing_value, value)
@@ -2345,10 +2325,8 @@ def _get_atom(cls, values: ArrayLike) -> "Col":
atom = cls.get_atom_timedelta64(shape)
elif is_complex_dtype(dtype):
atom = _tables().ComplexCol(itemsize=itemsize, shape=shape[0])
-
elif is_string_dtype(dtype):
atom = cls.get_atom_string(shape, itemsize)
-
else:
atom = cls.get_atom_data(shape, kind=dtype.name)
@@ -2454,7 +2432,6 @@ def convert(self, values: np.ndarray, nan_rep, encoding: str, errors: str):
# reverse converts
if dtype == "datetime64":
-
# recreate with tz if indicated
converted = _set_tz(converted, tz, coerce=True)
@@ -2471,7 +2448,6 @@ def convert(self, values: np.ndarray, nan_rep, encoding: str, errors: str):
)
elif meta == "category":
-
# we have a categorical
categories = metadata
codes = converted.ravel()
@@ -2826,7 +2802,6 @@ def read_array(
ret = node[start:stop]
if dtype == "datetime64":
-
# reconstruct a timezone if indicated
tz = getattr(attrs, "tz", None)
ret = _set_tz(ret, tz, coerce=True)
@@ -3011,11 +2986,9 @@ def write_array(self, key: str, value: ArrayLike, items: Optional[Index] = None)
atom = None
if self._filters is not None:
- try:
+ with suppress(ValueError):
# get the atom for this datatype
atom = _tables().Atom.from_dtype(value.dtype)
- except ValueError:
- pass
if atom is not None:
# We only get here if self._filters is non-None and
@@ -3032,7 +3005,6 @@ def write_array(self, key: str, value: ArrayLike, items: Optional[Index] = None)
self.write_array_empty(key, value)
elif value.dtype.type == np.object_:
-
# infer the type, warn if we have a non-string type here (for
# performance)
inferred_type = lib.infer_dtype(value, skipna=False)
@@ -3716,7 +3688,6 @@ def validate_data_columns(self, data_columns, min_itemsize, non_index_axes):
# if min_itemsize is a dict, add the keys (exclude 'values')
if isinstance(min_itemsize, dict):
-
existing_data_columns = set(data_columns)
data_columns = list(data_columns) # ensure we do not modify
data_columns.extend(
@@ -4152,7 +4123,6 @@ def read_column(
# find the axes
for a in self.axes:
if column == a.name:
-
if not a.is_data_indexable:
raise ValueError(
f"column [{column}] can not be extracted individually; "
@@ -4278,9 +4248,7 @@ def write_data(self, chunksize: Optional[int], dropna: bool = False):
# if dropna==True, then drop ALL nan rows
masks = []
if dropna:
-
for a in self.values_axes:
-
# figure the mask: only do if we can successfully process this
# column, otherwise ignore the mask
mask = isna(a.data).all(axis=0)
@@ -4860,7 +4828,6 @@ def _unconvert_index(
def _maybe_convert_for_string_atom(
name: str, block, existing_col, min_itemsize, nan_rep, encoding, errors
):
-
if not block.is_object:
return block.values
@@ -4893,7 +4860,6 @@ def _maybe_convert_for_string_atom(
# we cannot serialize this data, so report an exception on a column
# by column basis
for i in range(len(block.shape[0])):
-
col = block.iget(i)
inferred_type = lib.infer_dtype(col, skipna=False)
if inferred_type != "string":
@@ -5018,7 +4984,7 @@ def _need_convert(kind: str) -> bool:
return False
-def _maybe_adjust_name(name: str, version) -> str:
+def _maybe_adjust_name(name: str, version: Sequence[int]) -> str:
"""
Prior to 0.10.1, we named values blocks like: values_block_0 an the
name values_0, adjust the given name if necessary.
@@ -5032,14 +4998,14 @@ def _maybe_adjust_name(name: str, version) -> str:
-------
str
"""
- try:
- if version[0] == 0 and version[1] <= 10 and version[2] == 0:
- m = re.search(r"values_block_(\d+)", name)
- if m:
- grp = m.groups()[0]
- name = f"values_{grp}"
- except IndexError:
- pass
+ if isinstance(version, str) or len(version) < 3:
+ raise ValueError("Version is incorrect, expected sequence of 3 integers.")
+
+ if version[0] == 0 and version[1] <= 10 and version[2] == 0:
+ m = re.search(r"values_block_(\d+)", name)
+ if m:
+ grp = m.groups()[0]
+ name = f"values_{grp}"
return name
@@ -5129,7 +5095,7 @@ def __init__(
if is_list_like(where):
# see if we have a passed coordinate like
- try:
+ with suppress(ValueError):
inferred = lib.infer_dtype(where, skipna=False)
if inferred == "integer" or inferred == "boolean":
where = np.asarray(where)
@@ -5149,9 +5115,6 @@ def __init__(
)
self.coordinates = where
- except ValueError:
- pass
-
if self.coordinates is None:
self.terms = self.generate(where)
@@ -5172,15 +5135,16 @@ def generate(self, where):
# raise a nice message, suggesting that the user should use
# data_columns
qkeys = ",".join(q.keys())
- raise ValueError(
- f"The passed where expression: {where}\n"
- " contains an invalid variable reference\n"
- " all of the variable references must be a "
- "reference to\n"
- " an axis (e.g. 'index' or 'columns'), or a "
- "data_column\n"
- f" The currently defined references are: {qkeys}\n"
- ) from err
+ msg = dedent(
+ f"""\
+ The passed where expression: {where}
+ contains an invalid variable reference
+ all of the variable references must be a reference to
+ an axis (e.g. 'index' or 'columns'), or a data_column
+ The currently defined references are: {qkeys}
+ """
+ )
+ raise ValueError(msg) from err
def select(self):
"""
diff --git a/pandas/tests/io/pytables/test_store.py b/pandas/tests/io/pytables/test_store.py
index c1938db12a0bc..1e1c9e91faa4b 100644
--- a/pandas/tests/io/pytables/test_store.py
+++ b/pandas/tests/io/pytables/test_store.py
@@ -49,6 +49,7 @@
HDFStore,
PossibleDataLossError,
Term,
+ _maybe_adjust_name,
read_hdf,
)
@@ -4921,3 +4922,10 @@ def test_unsuppored_hdf_file_error(self, datapath):
with pytest.raises(ValueError, match=message):
pd.read_hdf(data_path)
+
+
+@pytest.mark.parametrize("bad_version", [(1, 2), (1,), [], "12", "123"])
+def test_maybe_adjust_name_bad_version_raises(bad_version):
+ msg = "Version is incorrect, expected sequence of 3 integers"
+ with pytest.raises(ValueError, match=msg):
+ _maybe_adjust_name("values_block_0", version=bad_version)
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
Clean-up and minor refactor of ``pandas/io/pytables.py``.
- Extract method ``_identify_group``
- Clean-up some docstrings (spacing)
- Use suppress instead of try/except/pass
- Format error message using dedent
- Remove unnecessary empty lines as they compromise readability of some blocks | https://api.github.com/repos/pandas-dev/pandas/pulls/36859 | 2020-10-04T16:17:03Z | 2020-10-06T22:44:23Z | 2020-10-06T22:44:23Z | 2020-10-07T09:44:10Z |
DOC: doc/source/whatsnew | diff --git a/doc/source/whatsnew/v0.10.0.rst b/doc/source/whatsnew/v0.10.0.rst
index 443250592a4a7..aa2749c85a232 100644
--- a/doc/source/whatsnew/v0.10.0.rst
+++ b/doc/source/whatsnew/v0.10.0.rst
@@ -49,8 +49,8 @@ talking about:
:okwarning:
import pandas as pd
- df = pd.DataFrame(np.random.randn(6, 4),
- index=pd.date_range('1/1/2000', periods=6))
+
+ df = pd.DataFrame(np.random.randn(6, 4), index=pd.date_range("1/1/2000", periods=6))
df
# deprecated now
df - df[0]
@@ -184,12 +184,14 @@ labeled the aggregated group with the end of the interval: the next day).
import io
- data = ('a,b,c\n'
- '1,Yes,2\n'
- '3,No,4')
+ data = """
+ a,b,c
+ 1,Yes,2
+ 3,No,4
+ """
print(data)
pd.read_csv(io.StringIO(data), header=None)
- pd.read_csv(io.StringIO(data), header=None, prefix='X')
+ pd.read_csv(io.StringIO(data), header=None, prefix="X")
- Values like ``'Yes'`` and ``'No'`` are not interpreted as boolean by default,
though this can be controlled by new ``true_values`` and ``false_values``
@@ -199,7 +201,7 @@ labeled the aggregated group with the end of the interval: the next day).
print(data)
pd.read_csv(io.StringIO(data))
- pd.read_csv(io.StringIO(data), true_values=['Yes'], false_values=['No'])
+ pd.read_csv(io.StringIO(data), true_values=["Yes"], false_values=["No"])
- The file parsers will not recognize non-string values arising from a
converter function as NA if passed in the ``na_values`` argument. It's better
@@ -210,10 +212,10 @@ labeled the aggregated group with the end of the interval: the next day).
.. ipython:: python
- s = pd.Series([np.nan, 1., 2., np.nan, 4])
+ s = pd.Series([np.nan, 1.0, 2.0, np.nan, 4])
s
s.fillna(0)
- s.fillna(method='pad')
+ s.fillna(method="pad")
Convenience methods ``ffill`` and ``bfill`` have been added:
@@ -229,7 +231,8 @@ Convenience methods ``ffill`` and ``bfill`` have been added:
.. ipython:: python
def f(x):
- return pd.Series([x, x**2], index=['x', 'x^2'])
+ return pd.Series([x, x ** 2], index=["x", "x^2"])
+
s = pd.Series(np.random.rand(5))
s
@@ -272,20 +275,20 @@ The old behavior of printing out summary information can be achieved via the
.. ipython:: python
- pd.set_option('expand_frame_repr', False)
+ pd.set_option("expand_frame_repr", False)
wide_frame
.. ipython:: python
:suppress:
- pd.reset_option('expand_frame_repr')
+ pd.reset_option("expand_frame_repr")
The width of each line can be changed via 'line_width' (80 by default):
.. code-block:: python
- pd.set_option('line_width', 40)
+ pd.set_option("line_width", 40)
wide_frame
diff --git a/doc/source/whatsnew/v0.10.1.rst b/doc/source/whatsnew/v0.10.1.rst
index 3dc680c46a4d9..d71a0d5ca68cd 100644
--- a/doc/source/whatsnew/v0.10.1.rst
+++ b/doc/source/whatsnew/v0.10.1.rst
@@ -45,29 +45,31 @@ You may need to upgrade your existing data files. Please visit the
import os
- os.remove('store.h5')
+ os.remove("store.h5")
You can designate (and index) certain columns that you want to be able to
perform queries on a table, by passing a list to ``data_columns``
.. ipython:: python
- store = pd.HDFStore('store.h5')
- df = pd.DataFrame(np.random.randn(8, 3),
- index=pd.date_range('1/1/2000', periods=8),
- columns=['A', 'B', 'C'])
- df['string'] = 'foo'
- df.loc[df.index[4:6], 'string'] = np.nan
- df.loc[df.index[7:9], 'string'] = 'bar'
- df['string2'] = 'cool'
+ store = pd.HDFStore("store.h5")
+ df = pd.DataFrame(
+ np.random.randn(8, 3),
+ index=pd.date_range("1/1/2000", periods=8),
+ columns=["A", "B", "C"],
+ )
+ df["string"] = "foo"
+ df.loc[df.index[4:6], "string"] = np.nan
+ df.loc[df.index[7:9], "string"] = "bar"
+ df["string2"] = "cool"
df
# on-disk operations
- store.append('df', df, data_columns=['B', 'C', 'string', 'string2'])
- store.select('df', "B>0 and string=='foo'")
+ store.append("df", df, data_columns=["B", "C", "string", "string2"])
+ store.select("df", "B>0 and string=='foo'")
# this is in-memory version of this type of selection
- df[(df.B > 0) & (df.string == 'foo')]
+ df[(df.B > 0) & (df.string == "foo")]
Retrieving unique values in an indexable or data column.
@@ -75,19 +77,19 @@ Retrieving unique values in an indexable or data column.
# note that this is deprecated as of 0.14.0
# can be replicated by: store.select_column('df','index').unique()
- store.unique('df', 'index')
- store.unique('df', 'string')
+ store.unique("df", "index")
+ store.unique("df", "string")
You can now store ``datetime64`` in data columns
.. ipython:: python
df_mixed = df.copy()
- df_mixed['datetime64'] = pd.Timestamp('20010102')
- df_mixed.loc[df_mixed.index[3:4], ['A', 'B']] = np.nan
+ df_mixed["datetime64"] = pd.Timestamp("20010102")
+ df_mixed.loc[df_mixed.index[3:4], ["A", "B"]] = np.nan
- store.append('df_mixed', df_mixed)
- df_mixed1 = store.select('df_mixed')
+ store.append("df_mixed", df_mixed)
+ df_mixed1 = store.select("df_mixed")
df_mixed1
df_mixed1.dtypes.value_counts()
@@ -97,7 +99,7 @@ columns, this is equivalent to passing a
.. ipython:: python
- store.select('df', columns=['A', 'B'])
+ store.select("df", columns=["A", "B"])
``HDFStore`` now serializes MultiIndex dataframes when appending tables.
@@ -160,29 +162,31 @@ combined result, by using ``where`` on a selector table.
.. ipython:: python
- df_mt = pd.DataFrame(np.random.randn(8, 6),
- index=pd.date_range('1/1/2000', periods=8),
- columns=['A', 'B', 'C', 'D', 'E', 'F'])
- df_mt['foo'] = 'bar'
+ df_mt = pd.DataFrame(
+ np.random.randn(8, 6),
+ index=pd.date_range("1/1/2000", periods=8),
+ columns=["A", "B", "C", "D", "E", "F"],
+ )
+ df_mt["foo"] = "bar"
# you can also create the tables individually
- store.append_to_multiple({'df1_mt': ['A', 'B'], 'df2_mt': None},
- df_mt, selector='df1_mt')
+ store.append_to_multiple(
+ {"df1_mt": ["A", "B"], "df2_mt": None}, df_mt, selector="df1_mt"
+ )
store
# individual tables were created
- store.select('df1_mt')
- store.select('df2_mt')
+ store.select("df1_mt")
+ store.select("df2_mt")
# as a multiple
- store.select_as_multiple(['df1_mt', 'df2_mt'], where=['A>0', 'B>0'],
- selector='df1_mt')
+ store.select_as_multiple(["df1_mt", "df2_mt"], where=["A>0", "B>0"], selector="df1_mt")
.. ipython:: python
:suppress:
store.close()
- os.remove('store.h5')
+ os.remove("store.h5")
**Enhancements**
diff --git a/doc/source/whatsnew/v0.12.0.rst b/doc/source/whatsnew/v0.12.0.rst
index 9971ae22822f6..4de76510c6bc1 100644
--- a/doc/source/whatsnew/v0.12.0.rst
+++ b/doc/source/whatsnew/v0.12.0.rst
@@ -47,7 +47,7 @@ API changes
.. ipython:: python
- p = pd.DataFrame({'first': [4, 5, 8], 'second': [0, 0, 3]})
+ p = pd.DataFrame({"first": [4, 5, 8], "second": [0, 0, 3]})
p % 0
p % p
p / p
@@ -95,8 +95,8 @@ API changes
.. ipython:: python
- df = pd.DataFrame(range(5), index=list('ABCDE'), columns=['a'])
- mask = (df.a % 2 == 0)
+ df = pd.DataFrame(range(5), index=list("ABCDE"), columns=["a"])
+ mask = df.a % 2 == 0
mask
# this is what you should use
@@ -141,21 +141,24 @@ API changes
.. code-block:: python
from pandas.io.parsers import ExcelFile
- xls = ExcelFile('path_to_file.xls')
- xls.parse('Sheet1', index_col=None, na_values=['NA'])
+
+ xls = ExcelFile("path_to_file.xls")
+ xls.parse("Sheet1", index_col=None, na_values=["NA"])
With
.. code-block:: python
import pandas as pd
- pd.read_excel('path_to_file.xls', 'Sheet1', index_col=None, na_values=['NA'])
+
+ pd.read_excel("path_to_file.xls", "Sheet1", index_col=None, na_values=["NA"])
- added top-level function ``read_sql`` that is equivalent to the following
.. code-block:: python
from pandas.io.sql import read_frame
+
read_frame(...)
- ``DataFrame.to_html`` and ``DataFrame.to_latex`` now accept a path for
@@ -200,7 +203,7 @@ IO enhancements
.. ipython:: python
:okwarning:
- df = pd.DataFrame({'a': range(3), 'b': list('abc')})
+ df = pd.DataFrame({"a": range(3), "b": list("abc")})
print(df)
html = df.to_html()
alist = pd.read_html(html, index_col=0)
@@ -248,16 +251,18 @@ IO enhancements
.. ipython:: python
from pandas._testing import makeCustomDataframe as mkdf
+
df = mkdf(5, 3, r_idx_nlevels=2, c_idx_nlevels=4)
- df.to_csv('mi.csv')
- print(open('mi.csv').read())
- pd.read_csv('mi.csv', header=[0, 1, 2, 3], index_col=[0, 1])
+ df.to_csv("mi.csv")
+ print(open("mi.csv").read())
+ pd.read_csv("mi.csv", header=[0, 1, 2, 3], index_col=[0, 1])
.. ipython:: python
:suppress:
import os
- os.remove('mi.csv')
+
+ os.remove("mi.csv")
- Support for ``HDFStore`` (via ``PyTables 3.0.0``) on Python3
@@ -304,8 +309,8 @@ Other enhancements
.. ipython:: python
- df = pd.DataFrame({'a': list('ab..'), 'b': [1, 2, 3, 4]})
- df.replace(regex=r'\s*\.\s*', value=np.nan)
+ df = pd.DataFrame({"a": list("ab.."), "b": [1, 2, 3, 4]})
+ df.replace(regex=r"\s*\.\s*", value=np.nan)
to replace all occurrences of the string ``'.'`` with zero or more
instances of surrounding white space with ``NaN``.
@@ -314,7 +319,7 @@ Other enhancements
.. ipython:: python
- df.replace('.', np.nan)
+ df.replace(".", np.nan)
to replace all occurrences of the string ``'.'`` with ``NaN``.
@@ -359,8 +364,8 @@ Other enhancements
.. ipython:: python
- dff = pd.DataFrame({'A': np.arange(8), 'B': list('aabbbbcc')})
- dff.groupby('B').filter(lambda x: len(x) > 2)
+ dff = pd.DataFrame({"A": np.arange(8), "B": list("aabbbbcc")})
+ dff.groupby("B").filter(lambda x: len(x) > 2)
Alternatively, instead of dropping the offending groups, we can return a
like-indexed objects where the groups that do not pass the filter are
@@ -368,7 +373,7 @@ Other enhancements
.. ipython:: python
- dff.groupby('B').filter(lambda x: len(x) > 2, dropna=False)
+ dff.groupby("B").filter(lambda x: len(x) > 2, dropna=False)
- Series and DataFrame hist methods now take a ``figsize`` argument (:issue:`3834`)
@@ -397,17 +402,18 @@ Experimental features
from pandas.tseries.offsets import CustomBusinessDay
from datetime import datetime
+
# As an interesting example, let's look at Egypt where
# a Friday-Saturday weekend is observed.
- weekmask_egypt = 'Sun Mon Tue Wed Thu'
+ weekmask_egypt = "Sun Mon Tue Wed Thu"
# They also observe International Workers' Day so let's
# add that for a couple of years
- holidays = ['2012-05-01', datetime(2013, 5, 1), np.datetime64('2014-05-01')]
+ holidays = ["2012-05-01", datetime(2013, 5, 1), np.datetime64("2014-05-01")]
bday_egypt = CustomBusinessDay(holidays=holidays, weekmask=weekmask_egypt)
dt = datetime(2013, 4, 30)
print(dt + 2 * bday_egypt)
dts = pd.date_range(dt, periods=5, freq=bday_egypt)
- print(pd.Series(dts.weekday, dts).map(pd.Series('Mon Tue Wed Thu Fri Sat Sun'.split())))
+ print(pd.Series(dts.weekday, dts).map(pd.Series("Mon Tue Wed Thu Fri Sat Sun".split())))
Bug fixes
~~~~~~~~~
@@ -430,14 +436,14 @@ Bug fixes
.. ipython:: python
:okwarning:
- strs = 'go', 'bow', 'joe', 'slow'
+ strs = "go", "bow", "joe", "slow"
ds = pd.Series(strs)
for s in ds.str:
print(s)
s
- s.dropna().values.item() == 'w'
+ s.dropna().values.item() == "w"
The last element yielded by the iterator will be a ``Series`` containing
the last element of the longest string in the ``Series`` with all other
diff --git a/doc/source/whatsnew/v0.13.1.rst b/doc/source/whatsnew/v0.13.1.rst
index 9e416f8eeb3f1..1215786b4cccc 100644
--- a/doc/source/whatsnew/v0.13.1.rst
+++ b/doc/source/whatsnew/v0.13.1.rst
@@ -31,16 +31,16 @@ Highlights include:
.. ipython:: python
- df = pd.DataFrame({'A': np.array(['foo', 'bar', 'bah', 'foo', 'bar'])})
- df['A'].iloc[0] = np.nan
+ df = pd.DataFrame({"A": np.array(["foo", "bar", "bah", "foo", "bar"])})
+ df["A"].iloc[0] = np.nan
df
The recommended way to do this type of assignment is:
.. ipython:: python
- df = pd.DataFrame({'A': np.array(['foo', 'bar', 'bah', 'foo', 'bar'])})
- df.loc[0, 'A'] = np.nan
+ df = pd.DataFrame({"A": np.array(["foo", "bar", "bah", "foo", "bar"])})
+ df.loc[0, "A"] = np.nan
df
Output formatting enhancements
@@ -52,24 +52,27 @@ Output formatting enhancements
.. ipython:: python
- max_info_rows = pd.get_option('max_info_rows')
+ max_info_rows = pd.get_option("max_info_rows")
- df = pd.DataFrame({'A': np.random.randn(10),
- 'B': np.random.randn(10),
- 'C': pd.date_range('20130101', periods=10)
- })
+ df = pd.DataFrame(
+ {
+ "A": np.random.randn(10),
+ "B": np.random.randn(10),
+ "C": pd.date_range("20130101", periods=10),
+ }
+ )
df.iloc[3:6, [0, 2]] = np.nan
.. ipython:: python
# set to not display the null counts
- pd.set_option('max_info_rows', 0)
+ pd.set_option("max_info_rows", 0)
df.info()
.. ipython:: python
# this is the default (same as in 0.13.0)
- pd.set_option('max_info_rows', max_info_rows)
+ pd.set_option("max_info_rows", max_info_rows)
df.info()
- Add ``show_dimensions`` display option for the new DataFrame repr to control whether the dimensions print.
@@ -77,10 +80,10 @@ Output formatting enhancements
.. ipython:: python
df = pd.DataFrame([[1, 2], [3, 4]])
- pd.set_option('show_dimensions', False)
+ pd.set_option("show_dimensions", False)
df
- pd.set_option('show_dimensions', True)
+ pd.set_option("show_dimensions", True)
df
- The ``ArrayFormatter`` for ``datetime`` and ``timedelta64`` now intelligently
@@ -98,10 +101,9 @@ Output formatting enhancements
.. ipython:: python
- df = pd.DataFrame([pd.Timestamp('20010101'),
- pd.Timestamp('20040601')], columns=['age'])
- df['today'] = pd.Timestamp('20130419')
- df['diff'] = df['today'] - df['age']
+ df = pd.DataFrame([pd.Timestamp("20010101"), pd.Timestamp("20040601")], columns=["age"])
+ df["today"] = pd.Timestamp("20130419")
+ df["diff"] = df["today"] - df["age"]
df
API changes
@@ -115,8 +117,8 @@ API changes
.. ipython:: python
- s = pd.Series(['a', 'a|b', np.nan, 'a|c'])
- s.str.get_dummies(sep='|')
+ s = pd.Series(["a", "a|b", np.nan, "a|c"])
+ s.str.get_dummies(sep="|")
- Added the ``NDFrame.equals()`` method to compare if two NDFrames are
equal have equal axes, dtypes, and values. Added the
@@ -126,8 +128,8 @@ API changes
.. code-block:: python
- df = pd.DataFrame({'col': ['foo', 0, np.nan]})
- df2 = pd.DataFrame({'col': [np.nan, 0, 'foo']}, index=[2, 1, 0])
+ df = pd.DataFrame({"col": ["foo", 0, np.nan]})
+ df2 = pd.DataFrame({"col": [np.nan, 0, "foo"]}, index=[2, 1, 0])
df.equals(df2)
df.equals(df2.sort_index())
@@ -204,8 +206,7 @@ Enhancements
.. code-block:: python
# Try to infer the format for the index column
- df = pd.read_csv('foo.csv', index_col=0, parse_dates=True,
- infer_datetime_format=True)
+ df = pd.read_csv("foo.csv", index_col=0, parse_dates=True, infer_datetime_format=True)
- ``date_format`` and ``datetime_format`` keywords can now be specified when writing to ``excel``
files (:issue:`4133`)
@@ -215,10 +216,10 @@ Enhancements
.. ipython:: python
- shades = ['light', 'dark']
- colors = ['red', 'green', 'blue']
+ shades = ["light", "dark"]
+ colors = ["red", "green", "blue"]
- pd.MultiIndex.from_product([shades, colors], names=['shade', 'color'])
+ pd.MultiIndex.from_product([shades, colors], names=["shade", "color"])
- Panel :meth:`~pandas.Panel.apply` will work on non-ufuncs. See :ref:`the docs<basics.apply>`.
diff --git a/doc/source/whatsnew/v0.14.1.rst b/doc/source/whatsnew/v0.14.1.rst
index 354d67a525d0e..78fd182ea86c3 100644
--- a/doc/source/whatsnew/v0.14.1.rst
+++ b/doc/source/whatsnew/v0.14.1.rst
@@ -68,7 +68,8 @@ API changes
:suppress:
import pandas.tseries.offsets as offsets
- d = pd.Timestamp('2014-01-01 09:00')
+
+ d = pd.Timestamp("2014-01-01 09:00")
.. ipython:: python
@@ -100,10 +101,10 @@ Enhancements
import pandas.tseries.offsets as offsets
day = offsets.Day()
- day.apply(pd.Timestamp('2014-01-01 09:00'))
+ day.apply(pd.Timestamp("2014-01-01 09:00"))
day = offsets.Day(normalize=True)
- day.apply(pd.Timestamp('2014-01-01 09:00'))
+ day.apply(pd.Timestamp("2014-01-01 09:00"))
- ``PeriodIndex`` is represented as the same format as ``DatetimeIndex`` (:issue:`7601`)
- ``StringMethods`` now work on empty Series (:issue:`7242`)
@@ -123,8 +124,7 @@ Enhancements
.. ipython:: python
- rng = pd.date_range('3/6/2012 00:00', periods=10, freq='D',
- tz='dateutil/Europe/London')
+ rng = pd.date_range("3/6/2012 00:00", periods=10, freq="D", tz="dateutil/Europe/London")
rng.tz
See :ref:`the docs <timeseries.timezone>`.
diff --git a/doc/source/whatsnew/v0.15.1.rst b/doc/source/whatsnew/v0.15.1.rst
index da56f07e84d9f..a1d4f9d14a905 100644
--- a/doc/source/whatsnew/v0.15.1.rst
+++ b/doc/source/whatsnew/v0.15.1.rst
@@ -23,7 +23,7 @@ API changes
.. ipython:: python
- s = pd.Series(pd.date_range('20130101', periods=5, freq='D'))
+ s = pd.Series(pd.date_range("20130101", periods=5, freq="D"))
s.iloc[2] = np.nan
s
@@ -52,8 +52,7 @@ API changes
.. ipython:: python
np.random.seed(2718281)
- df = pd.DataFrame(np.random.randint(0, 100, (10, 2)),
- columns=['jim', 'joe'])
+ df = pd.DataFrame(np.random.randint(0, 100, (10, 2)), columns=["jim", "joe"])
df.head()
ts = pd.Series(5 * np.random.randint(0, 3, 10))
@@ -80,9 +79,9 @@ API changes
.. ipython:: python
- df = pd.DataFrame({'jim': range(5), 'joe': range(5, 10)})
+ df = pd.DataFrame({"jim": range(5), "joe": range(5, 10)})
df
- gr = df.groupby(df['jim'] < 2)
+ gr = df.groupby(df["jim"] < 2)
previous behavior (excludes 1st column from output):
@@ -106,7 +105,7 @@ API changes
.. ipython:: python
- s = pd.Series(['a', 'b', 'c', 'd'], [4, 3, 2, 1])
+ s = pd.Series(["a", "b", "c", "d"], [4, 3, 2, 1])
s
previous behavior:
@@ -208,6 +207,7 @@ Enhancements
.. ipython:: python
from collections import deque
+
df1 = pd.DataFrame([1, 2, 3])
df2 = pd.DataFrame([4, 5, 6])
@@ -228,8 +228,9 @@ Enhancements
.. ipython:: python
- dfi = pd.DataFrame(1, index=pd.MultiIndex.from_product([['a'],
- range(1000)]), columns=['A'])
+ dfi = pd.DataFrame(
+ 1, index=pd.MultiIndex.from_product([["a"], range(1000)]), columns=["A"]
+ )
previous behavior:
diff --git a/doc/source/whatsnew/v0.16.1.rst b/doc/source/whatsnew/v0.16.1.rst
index a89ede8f024a0..39767684c01d0 100644
--- a/doc/source/whatsnew/v0.16.1.rst
+++ b/doc/source/whatsnew/v0.16.1.rst
@@ -209,9 +209,8 @@ when sampling from rows.
.. ipython:: python
- df = pd.DataFrame({'col1': [9, 8, 7, 6],
- 'weight_column': [0.5, 0.4, 0.1, 0]})
- df.sample(n=3, weights='weight_column')
+ df = pd.DataFrame({"col1": [9, 8, 7, 6], "weight_column": [0.5, 0.4, 0.1, 0]})
+ df.sample(n=3, weights="weight_column")
.. _whatsnew_0161.enhancements.string:
@@ -229,7 +228,7 @@ enhancements make string operations easier and more consistent with standard pyt
.. ipython:: python
- idx = pd.Index([' jack', 'jill ', ' jesse ', 'frank'])
+ idx = pd.Index([" jack", "jill ", " jesse ", "frank"])
idx.str.strip()
One special case for the ``.str`` accessor on ``Index`` is that if a string method returns ``bool``, the ``.str`` accessor
@@ -238,11 +237,11 @@ enhancements make string operations easier and more consistent with standard pyt
.. ipython:: python
- idx = pd.Index(['a1', 'a2', 'b1', 'b2'])
+ idx = pd.Index(["a1", "a2", "b1", "b2"])
s = pd.Series(range(4), index=idx)
s
- idx.str.startswith('a')
- s[s.index.str.startswith('a')]
+ idx.str.startswith("a")
+ s[s.index.str.startswith("a")]
- The following new methods are accessible via ``.str`` accessor to apply the function to each values. (:issue:`9766`, :issue:`9773`, :issue:`10031`, :issue:`10045`, :issue:`10052`)
@@ -257,21 +256,21 @@ enhancements make string operations easier and more consistent with standard pyt
.. ipython:: python
- s = pd.Series(['a,b', 'a,c', 'b,c'])
+ s = pd.Series(["a,b", "a,c", "b,c"])
# return Series
- s.str.split(',')
+ s.str.split(",")
# return DataFrame
- s.str.split(',', expand=True)
+ s.str.split(",", expand=True)
- idx = pd.Index(['a,b', 'a,c', 'b,c'])
+ idx = pd.Index(["a,b", "a,c", "b,c"])
# return Index
- idx.str.split(',')
+ idx.str.split(",")
# return MultiIndex
- idx.str.split(',', expand=True)
+ idx.str.split(",", expand=True)
- Improved ``extract`` and ``get_dummies`` methods for ``Index.str`` (:issue:`9980`)
@@ -286,9 +285,9 @@ Other enhancements
.. ipython:: python
- pd.Timestamp('2014-08-01 09:00') + pd.tseries.offsets.BusinessHour()
- pd.Timestamp('2014-08-01 07:00') + pd.tseries.offsets.BusinessHour()
- pd.Timestamp('2014-08-01 16:30') + pd.tseries.offsets.BusinessHour()
+ pd.Timestamp("2014-08-01 09:00") + pd.tseries.offsets.BusinessHour()
+ pd.Timestamp("2014-08-01 07:00") + pd.tseries.offsets.BusinessHour()
+ pd.Timestamp("2014-08-01 16:30") + pd.tseries.offsets.BusinessHour()
- ``DataFrame.diff`` now takes an ``axis`` parameter that determines the direction of differencing (:issue:`9727`)
@@ -300,8 +299,8 @@ Other enhancements
.. ipython:: python
- df = pd.DataFrame(np.random.randn(3, 3), columns=['A', 'B', 'C'])
- df.drop(['A', 'X'], axis=1, errors='ignore')
+ df = pd.DataFrame(np.random.randn(3, 3), columns=["A", "B", "C"])
+ df.drop(["A", "X"], axis=1, errors="ignore")
- Add support for separating years and quarters using dashes, for
example 2014-Q1. (:issue:`9688`)
@@ -382,19 +381,16 @@ New behavior
.. ipython:: python
- pd.set_option('display.width', 80)
- pd.Index(range(4), name='foo')
- pd.Index(range(30), name='foo')
- pd.Index(range(104), name='foo')
- pd.CategoricalIndex(['a', 'bb', 'ccc', 'dddd'],
- ordered=True, name='foobar')
- pd.CategoricalIndex(['a', 'bb', 'ccc', 'dddd'] * 10,
- ordered=True, name='foobar')
- pd.CategoricalIndex(['a', 'bb', 'ccc', 'dddd'] * 100,
- ordered=True, name='foobar')
- pd.date_range('20130101', periods=4, name='foo', tz='US/Eastern')
- pd.date_range('20130101', periods=25, freq='D')
- pd.date_range('20130101', periods=104, name='foo', tz='US/Eastern')
+ pd.set_option("display.width", 80)
+ pd.Index(range(4), name="foo")
+ pd.Index(range(30), name="foo")
+ pd.Index(range(104), name="foo")
+ pd.CategoricalIndex(["a", "bb", "ccc", "dddd"], ordered=True, name="foobar")
+ pd.CategoricalIndex(["a", "bb", "ccc", "dddd"] * 10, ordered=True, name="foobar")
+ pd.CategoricalIndex(["a", "bb", "ccc", "dddd"] * 100, ordered=True, name="foobar")
+ pd.date_range("20130101", periods=4, name="foo", tz="US/Eastern")
+ pd.date_range("20130101", periods=25, freq="D")
+ pd.date_range("20130101", periods=104, name="foo", tz="US/Eastern")
.. _whatsnew_0161.performance:
diff --git a/doc/source/whatsnew/v0.16.2.rst b/doc/source/whatsnew/v0.16.2.rst
index 2cb0cbec68eff..bb2aa166419b4 100644
--- a/doc/source/whatsnew/v0.16.2.rst
+++ b/doc/source/whatsnew/v0.16.2.rst
@@ -48,9 +48,10 @@ This can be rewritten as
.. code-block:: python
- (df.pipe(h) # noqa F821
- .pipe(g, arg1=1) # noqa F821
- .pipe(f, arg2=2, arg3=3) # noqa F821
+ (
+ df.pipe(h) # noqa F821
+ .pipe(g, arg1=1) # noqa F821
+ .pipe(f, arg2=2, arg3=3) # noqa F821
)
Now both the code and the logic flow from top to bottom. Keyword arguments are next to
@@ -64,15 +65,16 @@ of ``(function, keyword)`` indicating where the DataFrame should flow. For examp
import statsmodels.formula.api as sm
- bb = pd.read_csv('data/baseball.csv', index_col='id')
+ bb = pd.read_csv("data/baseball.csv", index_col="id")
# sm.ols takes (formula, data)
- (bb.query('h > 0')
- .assign(ln_h=lambda df: np.log(df.h))
- .pipe((sm.ols, 'data'), 'hr ~ ln_h + year + g + C(lg)')
- .fit()
- .summary()
- )
+ (
+ bb.query("h > 0")
+ .assign(ln_h=lambda df: np.log(df.h))
+ .pipe((sm.ols, "data"), "hr ~ ln_h + year + g + C(lg)")
+ .fit()
+ .summary()
+ )
The pipe method is inspired by unix pipes, which stream text through
processes. More recently dplyr_ and magrittr_ have introduced the
diff --git a/doc/source/whatsnew/v0.17.0.rst b/doc/source/whatsnew/v0.17.0.rst
index e8f37a72f6417..9f700dacf38c7 100644
--- a/doc/source/whatsnew/v0.17.0.rst
+++ b/doc/source/whatsnew/v0.17.0.rst
@@ -80,9 +80,13 @@ The new implementation allows for having a single-timezone across all rows, with
.. ipython:: python
- df = pd.DataFrame({'A': pd.date_range('20130101', periods=3),
- 'B': pd.date_range('20130101', periods=3, tz='US/Eastern'),
- 'C': pd.date_range('20130101', periods=3, tz='CET')})
+ df = pd.DataFrame(
+ {
+ "A": pd.date_range("20130101", periods=3),
+ "B": pd.date_range("20130101", periods=3, tz="US/Eastern"),
+ "C": pd.date_range("20130101", periods=3, tz="CET"),
+ }
+ )
df
df.dtypes
@@ -95,8 +99,8 @@ This uses a new-dtype representation as well, that is very similar in look-and-f
.. ipython:: python
- df['B'].dtype
- type(df['B'].dtype)
+ df["B"].dtype
+ type(df["B"].dtype)
.. note::
@@ -119,8 +123,8 @@ This uses a new-dtype representation as well, that is very similar in look-and-f
.. ipython:: python
- pd.date_range('20130101', periods=3, tz='US/Eastern')
- pd.date_range('20130101', periods=3, tz='US/Eastern').dtype
+ pd.date_range("20130101", periods=3, tz="US/Eastern")
+ pd.date_range("20130101", periods=3, tz="US/Eastern").dtype
.. _whatsnew_0170.gil:
@@ -138,9 +142,10 @@ as well as the ``.sum()`` operation.
N = 1000000
ngroups = 10
- df = DataFrame({'key': np.random.randint(0, ngroups, size=N),
- 'data': np.random.randn(N)})
- df.groupby('key')['data'].sum()
+ df = DataFrame(
+ {"key": np.random.randint(0, ngroups, size=N), "data": np.random.randn(N)}
+ )
+ df.groupby("key")["data"].sum()
Releasing of the GIL could benefit an application that uses threads for user interactions (e.g. QT_), or performing multi-threaded computations. A nice example of a library that can handle these types of computation-in-parallel is the dask_ library.
@@ -189,16 +194,16 @@ We are now supporting a ``Series.dt.strftime`` method for datetime-likes to gene
.. ipython:: python
# DatetimeIndex
- s = pd.Series(pd.date_range('20130101', periods=4))
+ s = pd.Series(pd.date_range("20130101", periods=4))
s
- s.dt.strftime('%Y/%m/%d')
+ s.dt.strftime("%Y/%m/%d")
.. ipython:: python
# PeriodIndex
- s = pd.Series(pd.period_range('20130101', periods=4))
+ s = pd.Series(pd.period_range("20130101", periods=4))
s
- s.dt.strftime('%Y/%m/%d')
+ s.dt.strftime("%Y/%m/%d")
The string format is as the python standard library and details can be found `here <https://docs.python.org/2/library/datetime.html#strftime-and-strptime-behavior>`_
@@ -210,7 +215,7 @@ Series.dt.total_seconds
.. ipython:: python
# TimedeltaIndex
- s = pd.Series(pd.timedelta_range('1 minutes', periods=4))
+ s = pd.Series(pd.timedelta_range("1 minutes", periods=4))
s
s.dt.total_seconds()
@@ -225,18 +230,18 @@ A multiplied freq represents a span of corresponding length. The example below c
.. ipython:: python
- p = pd.Period('2015-08-01', freq='3D')
+ p = pd.Period("2015-08-01", freq="3D")
p
p + 1
p - 2
p.to_timestamp()
- p.to_timestamp(how='E')
+ p.to_timestamp(how="E")
You can use the multiplied freq in ``PeriodIndex`` and ``period_range``.
.. ipython:: python
- idx = pd.period_range('2015-08-01', periods=4, freq='2D')
+ idx = pd.period_range("2015-08-01", periods=4, freq="2D")
idx
idx + 1
@@ -249,14 +254,14 @@ Support for SAS XPORT files
.. code-block:: python
- df = pd.read_sas('sas_xport.xpt')
+ df = pd.read_sas("sas_xport.xpt")
It is also possible to obtain an iterator and read an XPORT file
incrementally.
.. code-block:: python
- for df in pd.read_sas('sas_xport.xpt', chunksize=10000):
+ for df in pd.read_sas("sas_xport.xpt", chunksize=10000):
do_something(df)
See the :ref:`docs <io.sas>` for more details.
@@ -270,7 +275,7 @@ Support for math functions in .eval()
.. code-block:: python
- df = pd.DataFrame({'a': np.random.randn(10)})
+ df = pd.DataFrame({"a": np.random.randn(10)})
df.eval("b = sin(a)")
The support math functions are ``sin``, ``cos``, ``exp``, ``log``, ``expm1``, ``log1p``,
@@ -292,23 +297,26 @@ See the :ref:`documentation <io.excel>` for more details.
.. ipython:: python
- df = pd.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]],
- columns=pd.MultiIndex.from_product(
- [['foo', 'bar'], ['a', 'b']], names=['col1', 'col2']),
- index=pd.MultiIndex.from_product([['j'], ['l', 'k']],
- names=['i1', 'i2']))
+ df = pd.DataFrame(
+ [[1, 2, 3, 4], [5, 6, 7, 8]],
+ columns=pd.MultiIndex.from_product(
+ [["foo", "bar"], ["a", "b"]], names=["col1", "col2"]
+ ),
+ index=pd.MultiIndex.from_product([["j"], ["l", "k"]], names=["i1", "i2"]),
+ )
df
- df.to_excel('test.xlsx')
+ df.to_excel("test.xlsx")
- df = pd.read_excel('test.xlsx', header=[0, 1], index_col=[0, 1])
+ df = pd.read_excel("test.xlsx", header=[0, 1], index_col=[0, 1])
df
.. ipython:: python
:suppress:
import os
- os.remove('test.xlsx')
+
+ os.remove("test.xlsx")
Previously, it was necessary to specify the ``has_index_names`` argument in ``read_excel``,
if the serialized data had index names. For version 0.17.0 the output format of ``to_excel``
@@ -354,14 +362,14 @@ Some East Asian countries use Unicode characters its width is corresponding to 2
.. ipython:: python
- df = pd.DataFrame({u'国籍': ['UK', u'日本'], u'名前': ['Alice', u'しのぶ']})
+ df = pd.DataFrame({u"国籍": ["UK", u"日本"], u"名前": ["Alice", u"しのぶ"]})
df;
.. image:: ../_static/option_unicode01.png
.. ipython:: python
- pd.set_option('display.unicode.east_asian_width', True)
+ pd.set_option("display.unicode.east_asian_width", True)
df;
.. image:: ../_static/option_unicode02.png
@@ -371,7 +379,7 @@ For further details, see :ref:`here <options.east_asian_width>`
.. ipython:: python
:suppress:
- pd.set_option('display.unicode.east_asian_width', False)
+ pd.set_option("display.unicode.east_asian_width", False)
.. _whatsnew_0170.enhancements.other:
@@ -391,9 +399,9 @@ Other enhancements
.. ipython:: python
- df1 = pd.DataFrame({'col1':[0,1], 'col_left':['a','b']})
- df2 = pd.DataFrame({'col1':[1,2,2],'col_right':[2,2,2]})
- pd.merge(df1, df2, on='col1', how='outer', indicator=True)
+ df1 = pd.DataFrame({"col1": [0, 1], "col_left": ["a", "b"]})
+ df2 = pd.DataFrame({"col1": [1, 2, 2], "col_right": [2, 2, 2]})
+ pd.merge(df1, df2, on="col1", how="outer", indicator=True)
For more, see the :ref:`updated docs <merging.indicator>`
@@ -407,7 +415,7 @@ Other enhancements
.. ipython:: python
- foo = pd.Series([1, 2], name='foo')
+ foo = pd.Series([1, 2], name="foo")
bar = pd.Series([1, 2])
baz = pd.Series([4, 5])
@@ -434,46 +442,43 @@ Other enhancements
.. ipython:: python
ser = pd.Series([np.nan, np.nan, 5, np.nan, np.nan, np.nan, 13])
- ser.interpolate(limit=1, limit_direction='both')
+ ser.interpolate(limit=1, limit_direction="both")
- Added a ``DataFrame.round`` method to round the values to a variable number of decimal places (:issue:`10568`).
.. ipython:: python
- df = pd.DataFrame(np.random.random([3, 3]),
- columns=['A', 'B', 'C'],
- index=['first', 'second', 'third'])
+ df = pd.DataFrame(
+ np.random.random([3, 3]),
+ columns=["A", "B", "C"],
+ index=["first", "second", "third"],
+ )
df
df.round(2)
- df.round({'A': 0, 'C': 2})
+ df.round({"A": 0, "C": 2})
- ``drop_duplicates`` and ``duplicated`` now accept a ``keep`` keyword to target first, last, and all duplicates. The ``take_last`` keyword is deprecated, see :ref:`here <whatsnew_0170.deprecations>` (:issue:`6511`, :issue:`8505`)
.. ipython:: python
- s = pd.Series(['A', 'B', 'C', 'A', 'B', 'D'])
+ s = pd.Series(["A", "B", "C", "A", "B", "D"])
s.drop_duplicates()
- s.drop_duplicates(keep='last')
+ s.drop_duplicates(keep="last")
s.drop_duplicates(keep=False)
- Reindex now has a ``tolerance`` argument that allows for finer control of :ref:`basics.limits_on_reindex_fill` (:issue:`10411`):
.. ipython:: python
- df = pd.DataFrame({'x': range(5),
- 't': pd.date_range('2000-01-01', periods=5)})
- df.reindex([0.1, 1.9, 3.5],
- method='nearest',
- tolerance=0.2)
+ df = pd.DataFrame({"x": range(5), "t": pd.date_range("2000-01-01", periods=5)})
+ df.reindex([0.1, 1.9, 3.5], method="nearest", tolerance=0.2)
When used on a ``DatetimeIndex``, ``TimedeltaIndex`` or ``PeriodIndex``, ``tolerance`` will coerced into a ``Timedelta`` if possible. This allows you to specify tolerance with a string:
.. ipython:: python
- df = df.set_index('t')
- df.reindex(pd.to_datetime(['1999-12-31']),
- method='nearest',
- tolerance='1 day')
+ df = df.set_index("t")
+ df.reindex(pd.to_datetime(["1999-12-31"]), method="nearest", tolerance="1 day")
``tolerance`` is also exposed by the lower level ``Index.get_indexer`` and ``Index.get_loc`` methods.
@@ -627,13 +632,13 @@ Of course you can coerce this as well.
.. ipython:: python
- pd.to_datetime(['2009-07-31', 'asd'], errors='coerce')
+ pd.to_datetime(["2009-07-31", "asd"], errors="coerce")
To keep the previous behavior, you can use ``errors='ignore'``:
.. ipython:: python
- pd.to_datetime(['2009-07-31', 'asd'], errors='ignore')
+ pd.to_datetime(["2009-07-31", "asd"], errors="ignore")
Furthermore, ``pd.to_timedelta`` has gained a similar API, of ``errors='raise'|'ignore'|'coerce'``, and the ``coerce`` keyword
has been deprecated in favor of ``errors='coerce'``.
@@ -667,9 +672,9 @@ New behavior:
.. ipython:: python
- pd.Timestamp('2012Q2')
- pd.Timestamp('2014')
- pd.DatetimeIndex(['2012Q2', '2014'])
+ pd.Timestamp("2012Q2")
+ pd.Timestamp("2014")
+ pd.DatetimeIndex(["2012Q2", "2014"])
.. note::
@@ -678,6 +683,7 @@ New behavior:
.. ipython:: python
import pandas.tseries.offsets as offsets
+
pd.Timestamp.now()
pd.Timestamp.now() + offsets.DateOffset(years=1)
@@ -780,8 +786,7 @@ Previous behavior:
.. ipython:: python
- df_with_missing = pd.DataFrame({'col1': [0, np.nan, 2],
- 'col2': [1, np.nan, np.nan]})
+ df_with_missing = pd.DataFrame({"col1": [0, np.nan, 2], "col2": [1, np.nan, np.nan]})
df_with_missing
@@ -806,18 +811,16 @@ New behavior:
.. ipython:: python
- df_with_missing.to_hdf('file.h5',
- 'df_with_missing',
- format='table',
- mode='w')
+ df_with_missing.to_hdf("file.h5", "df_with_missing", format="table", mode="w")
- pd.read_hdf('file.h5', 'df_with_missing')
+ pd.read_hdf("file.h5", "df_with_missing")
.. ipython:: python
:suppress:
import os
- os.remove('file.h5')
+
+ os.remove("file.h5")
See the :ref:`docs <io.hdf5>` for more details.
@@ -848,8 +851,8 @@ regular formatting as well as scientific notation, similar to how numpy's ``prec
.. ipython:: python
- pd.set_option('display.precision', 2)
- pd.DataFrame({'x': [123.456789]})
+ pd.set_option("display.precision", 2)
+ pd.DataFrame({"x": [123.456789]})
To preserve output behavior with prior versions the default value of ``display.precision`` has been reduced to ``6``
from ``7``.
@@ -857,7 +860,7 @@ from ``7``.
.. ipython:: python
:suppress:
- pd.set_option('display.precision', 6)
+ pd.set_option("display.precision", 6)
.. _whatsnew_0170.api_breaking.categorical_unique:
@@ -871,14 +874,11 @@ Changes to ``Categorical.unique``
.. ipython:: python
- cat = pd.Categorical(['C', 'A', 'B', 'C'],
- categories=['A', 'B', 'C'],
- ordered=True)
+ cat = pd.Categorical(["C", "A", "B", "C"], categories=["A", "B", "C"], ordered=True)
cat
cat.unique()
- cat = pd.Categorical(['C', 'A', 'B', 'C'],
- categories=['A', 'B', 'C'])
+ cat = pd.Categorical(["C", "A", "B", "C"], categories=["A", "B", "C"])
cat
cat.unique()
@@ -980,9 +980,11 @@ Removal of prior version deprecations/changes
.. ipython:: python
np.random.seed(1234)
- df = pd.DataFrame(np.random.randn(5, 2),
- columns=list('AB'),
- index=pd.date_range('2013-01-01', periods=5))
+ df = pd.DataFrame(
+ np.random.randn(5, 2),
+ columns=list("AB"),
+ index=pd.date_range("2013-01-01", periods=5),
+ )
df
Previously
@@ -1005,7 +1007,7 @@ Removal of prior version deprecations/changes
.. ipython:: python
- df.add(df.A, axis='index')
+ df.add(df.A, axis="index")
- Remove ``table`` keyword in ``HDFStore.put/append``, in favor of using ``format=`` (:issue:`4645`)
diff --git a/doc/source/whatsnew/v0.17.1.rst b/doc/source/whatsnew/v0.17.1.rst
index 5d15a01aee5a0..6b0a28ec47568 100644
--- a/doc/source/whatsnew/v0.17.1.rst
+++ b/doc/source/whatsnew/v0.17.1.rst
@@ -52,8 +52,8 @@ Here's a quick example:
.. ipython:: python
np.random.seed(123)
- df = pd.DataFrame(np.random.randn(10, 5), columns=list('abcde'))
- html = df.style.background_gradient(cmap='viridis', low=.5)
+ df = pd.DataFrame(np.random.randn(10, 5), columns=list("abcde"))
+ html = df.style.background_gradient(cmap="viridis", low=0.5)
We can render the HTML to get the following table.
@@ -80,14 +80,14 @@ Enhancements
.. ipython:: python
- df = pd.DataFrame({'A': ['foo'] * 1000}) # noqa: F821
- df['B'] = df['A'].astype('category')
+ df = pd.DataFrame({"A": ["foo"] * 1000}) # noqa: F821
+ df["B"] = df["A"].astype("category")
# shows the '+' as we have object dtypes
df.info()
# we have an accurate memory assessment (but can be expensive to compute this)
- df.info(memory_usage='deep')
+ df.info(memory_usage="deep")
- ``Index`` now has a ``fillna`` method (:issue:`10089`)
@@ -99,11 +99,11 @@ Enhancements
.. ipython:: python
- s = pd.Series(list('aabb')).astype('category')
+ s = pd.Series(list("aabb")).astype("category")
s
s.str.contains("a")
- date = pd.Series(pd.date_range('1/1/2015', periods=5)).astype('category')
+ date = pd.Series(pd.date_range("1/1/2015", periods=5)).astype("category")
date
date.dt.day
diff --git a/doc/source/whatsnew/v0.18.1.rst b/doc/source/whatsnew/v0.18.1.rst
index 13ed6bc38163b..3db00f686d62c 100644
--- a/doc/source/whatsnew/v0.18.1.rst
+++ b/doc/source/whatsnew/v0.18.1.rst
@@ -42,6 +42,7 @@ see :ref:`Custom Business Hour <timeseries.custombusinesshour>` (:issue:`11514`)
from pandas.tseries.offsets import CustomBusinessHour
from pandas.tseries.holiday import USFederalHolidayCalendar
+
bhour_us = CustomBusinessHour(calendar=USFederalHolidayCalendar())
Friday before MLK Day
@@ -49,6 +50,7 @@ Friday before MLK Day
.. ipython:: python
import datetime
+
dt = datetime.datetime(2014, 1, 17, 15)
dt + bhour_us
@@ -72,41 +74,42 @@ Previously you would have to do this to get a rolling window mean per-group:
.. ipython:: python
- df = pd.DataFrame({'A': [1] * 20 + [2] * 12 + [3] * 8,
- 'B': np.arange(40)})
+ df = pd.DataFrame({"A": [1] * 20 + [2] * 12 + [3] * 8, "B": np.arange(40)})
df
.. ipython:: python
- df.groupby('A').apply(lambda x: x.rolling(4).B.mean())
+ df.groupby("A").apply(lambda x: x.rolling(4).B.mean())
Now you can do:
.. ipython:: python
- df.groupby('A').rolling(4).B.mean()
+ df.groupby("A").rolling(4).B.mean()
For ``.resample(..)`` type of operations, previously you would have to:
.. ipython:: python
- df = pd.DataFrame({'date': pd.date_range(start='2016-01-01',
- periods=4,
- freq='W'),
- 'group': [1, 1, 2, 2],
- 'val': [5, 6, 7, 8]}).set_index('date')
+ df = pd.DataFrame(
+ {
+ "date": pd.date_range(start="2016-01-01", periods=4, freq="W"),
+ "group": [1, 1, 2, 2],
+ "val": [5, 6, 7, 8],
+ }
+ ).set_index("date")
df
.. ipython:: python
- df.groupby('group').apply(lambda x: x.resample('1D').ffill())
+ df.groupby("group").apply(lambda x: x.resample("1D").ffill())
Now you can do:
.. ipython:: python
- df.groupby('group').resample('1D').ffill()
+ df.groupby("group").resample("1D").ffill()
.. _whatsnew_0181.enhancements.method_chain:
@@ -129,9 +132,7 @@ arguments.
.. ipython:: python
- df = pd.DataFrame({'A': [1, 2, 3],
- 'B': [4, 5, 6],
- 'C': [7, 8, 9]})
+ df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]})
df.where(lambda x: x > 4, lambda x: x + 10)
Methods ``.loc[]``, ``.iloc[]``, ``.ix[]``
@@ -146,7 +147,7 @@ can return a valid boolean indexer or anything which is valid for these indexer'
df.loc[lambda x: x.A >= 2, lambda x: x.sum() > 10]
# callable returns list of labels
- df.loc[lambda x: [1, 2], lambda x: ['A', 'B']]
+ df.loc[lambda x: [1, 2], lambda x: ["A", "B"]]
Indexing with``[]``
"""""""""""""""""""
@@ -157,17 +158,15 @@ class and index type.
.. ipython:: python
- df[lambda x: 'A']
+ df[lambda x: "A"]
Using these methods / indexers, you can chain data selection operations
without using temporary variable.
.. ipython:: python
- bb = pd.read_csv('data/baseball.csv', index_col='id')
- (bb.groupby(['year', 'team'])
- .sum()
- .loc[lambda df: df.r > 100])
+ bb = pd.read_csv("data/baseball.csv", index_col="id")
+ (bb.groupby(["year", "team"]).sum().loc[lambda df: df.r > 100])
.. _whatsnew_0181.partial_string_indexing:
@@ -180,13 +179,13 @@ Partial string indexing now matches on ``DateTimeIndex`` when part of a ``MultiI
dft2 = pd.DataFrame(
np.random.randn(20, 1),
- columns=['A'],
- index=pd.MultiIndex.from_product([pd.date_range('20130101',
- periods=10,
- freq='12H'),
- ['a', 'b']]))
+ columns=["A"],
+ index=pd.MultiIndex.from_product(
+ [pd.date_range("20130101", periods=10, freq="12H"), ["a", "b"]]
+ ),
+ )
dft2
- dft2.loc['2013-01-05']
+ dft2.loc["2013-01-05"]
On other levels
@@ -195,7 +194,7 @@ On other levels
idx = pd.IndexSlice
dft2 = dft2.swaplevel(0, 1).sort_index()
dft2
- dft2.loc[idx[:, '2013-01-05'], :]
+ dft2.loc[idx[:, "2013-01-05"], :]
.. _whatsnew_0181.enhancements.assembling:
@@ -206,10 +205,9 @@ Assembling datetimes
.. ipython:: python
- df = pd.DataFrame({'year': [2015, 2016],
- 'month': [2, 3],
- 'day': [4, 5],
- 'hour': [2, 3]})
+ df = pd.DataFrame(
+ {"year": [2015, 2016], "month": [2, 3], "day": [4, 5], "hour": [2, 3]}
+ )
df
Assembling using the passed frame.
@@ -222,7 +220,7 @@ You can pass only the columns that you need to assemble.
.. ipython:: python
- pd.to_datetime(df[['year', 'month', 'day']])
+ pd.to_datetime(df[["year", "month", "day"]])
.. _whatsnew_0181.other:
@@ -243,7 +241,7 @@ Other enhancements
.. ipython:: python
- idx = pd.Index([1., 2., 3., 4.], dtype='float')
+ idx = pd.Index([1.0, 2.0, 3.0, 4.0], dtype="float")
# default, allow_fill=True, fill_value=None
idx.take([2, -1])
@@ -253,8 +251,8 @@ Other enhancements
.. ipython:: python
- idx = pd.Index(['a|b', 'a|c', 'b|c'])
- idx.str.get_dummies('|')
+ idx = pd.Index(["a|b", "a|c", "b|c"])
+ idx.str.get_dummies("|")
- ``pd.crosstab()`` has gained a ``normalize`` argument for normalizing frequency tables (:issue:`12569`). Examples in the updated docs :ref:`here <reshaping.crosstabulations>`.
@@ -313,8 +311,7 @@ The index in ``.groupby(..).nth()`` output is now more consistent when the ``as_
.. ipython:: python
- df = pd.DataFrame({'A': ['a', 'b', 'a'],
- 'B': [1, 2, 3]})
+ df = pd.DataFrame({"A": ["a", "b", "a"], "B": [1, 2, 3]})
df
Previous behavior:
@@ -337,16 +334,16 @@ New behavior:
.. ipython:: python
- df.groupby('A', as_index=True)['B'].nth(0)
- df.groupby('A', as_index=False)['B'].nth(0)
+ df.groupby("A", as_index=True)["B"].nth(0)
+ df.groupby("A", as_index=False)["B"].nth(0)
Furthermore, previously, a ``.groupby`` would always sort, regardless if ``sort=False`` was passed with ``.nth()``.
.. ipython:: python
np.random.seed(1234)
- df = pd.DataFrame(np.random.randn(100, 2), columns=['a', 'b'])
- df['c'] = np.random.randint(0, 4, 100)
+ df = pd.DataFrame(np.random.randn(100, 2), columns=["a", "b"])
+ df["c"] = np.random.randint(0, 4, 100)
Previous behavior:
@@ -374,8 +371,8 @@ New behavior:
.. ipython:: python
- df.groupby('c', sort=True).nth(1)
- df.groupby('c', sort=False).nth(1)
+ df.groupby("c", sort=True).nth(1)
+ df.groupby("c", sort=False).nth(1)
.. _whatsnew_0181.numpy_compatibility:
@@ -421,8 +418,9 @@ Using ``apply`` on resampling groupby operations (using a ``pd.TimeGrouper``) no
.. ipython:: python
- df = pd.DataFrame({'date': pd.to_datetime(['10/10/2000', '11/10/2000']),
- 'value': [10, 13]})
+ df = pd.DataFrame(
+ {"date": pd.to_datetime(["10/10/2000", "11/10/2000"]), "value": [10, 13]}
+ )
df
Previous behavior:
diff --git a/doc/source/whatsnew/v0.19.0.rst b/doc/source/whatsnew/v0.19.0.rst
index 6e8c4273a0550..4acf9d7181781 100644
--- a/doc/source/whatsnew/v0.19.0.rst
+++ b/doc/source/whatsnew/v0.19.0.rst
@@ -49,10 +49,8 @@ except that we match on nearest key rather than equal keys.
.. ipython:: python
- left = pd.DataFrame({'a': [1, 5, 10],
- 'left_val': ['a', 'b', 'c']})
- right = pd.DataFrame({'a': [1, 2, 3, 6, 7],
- 'right_val': [1, 2, 3, 6, 7]})
+ left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
+ right = pd.DataFrame({"a": [1, 2, 3, 6, 7], "right_val": [1, 2, 3, 6, 7]})
left
right
@@ -62,13 +60,13 @@ recent value otherwise.
.. ipython:: python
- pd.merge_asof(left, right, on='a')
+ pd.merge_asof(left, right, on="a")
We can also match rows ONLY with prior data, and not an exact match.
.. ipython:: python
- pd.merge_asof(left, right, on='a', allow_exact_matches=False)
+ pd.merge_asof(left, right, on="a", allow_exact_matches=False)
In a typical time-series example, we have ``trades`` and ``quotes`` and we want to ``asof-join`` them.
@@ -76,36 +74,44 @@ This also illustrates using the ``by`` parameter to group data before merging.
.. ipython:: python
- trades = pd.DataFrame({
- 'time': pd.to_datetime(['20160525 13:30:00.023',
- '20160525 13:30:00.038',
- '20160525 13:30:00.048',
- '20160525 13:30:00.048',
- '20160525 13:30:00.048']),
- 'ticker': ['MSFT', 'MSFT',
- 'GOOG', 'GOOG', 'AAPL'],
- 'price': [51.95, 51.95,
- 720.77, 720.92, 98.00],
- 'quantity': [75, 155,
- 100, 100, 100]},
- columns=['time', 'ticker', 'price', 'quantity'])
-
- quotes = pd.DataFrame({
- 'time': pd.to_datetime(['20160525 13:30:00.023',
- '20160525 13:30:00.023',
- '20160525 13:30:00.030',
- '20160525 13:30:00.041',
- '20160525 13:30:00.048',
- '20160525 13:30:00.049',
- '20160525 13:30:00.072',
- '20160525 13:30:00.075']),
- 'ticker': ['GOOG', 'MSFT', 'MSFT', 'MSFT',
- 'GOOG', 'AAPL', 'GOOG', 'MSFT'],
- 'bid': [720.50, 51.95, 51.97, 51.99,
- 720.50, 97.99, 720.50, 52.01],
- 'ask': [720.93, 51.96, 51.98, 52.00,
- 720.93, 98.01, 720.88, 52.03]},
- columns=['time', 'ticker', 'bid', 'ask'])
+ trades = pd.DataFrame(
+ {
+ "time": pd.to_datetime(
+ [
+ "20160525 13:30:00.023",
+ "20160525 13:30:00.038",
+ "20160525 13:30:00.048",
+ "20160525 13:30:00.048",
+ "20160525 13:30:00.048",
+ ]
+ ),
+ "ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"],
+ "price": [51.95, 51.95, 720.77, 720.92, 98.00],
+ "quantity": [75, 155, 100, 100, 100],
+ },
+ columns=["time", "ticker", "price", "quantity"],
+ )
+
+ quotes = pd.DataFrame(
+ {
+ "time": pd.to_datetime(
+ [
+ "20160525 13:30:00.023",
+ "20160525 13:30:00.023",
+ "20160525 13:30:00.030",
+ "20160525 13:30:00.041",
+ "20160525 13:30:00.048",
+ "20160525 13:30:00.049",
+ "20160525 13:30:00.072",
+ "20160525 13:30:00.075",
+ ]
+ ),
+ "ticker": ["GOOG", "MSFT", "MSFT", "MSFT", "GOOG", "AAPL", "GOOG", "MSFT"],
+ "bid": [720.50, 51.95, 51.97, 51.99, 720.50, 97.99, 720.50, 52.01],
+ "ask": [720.93, 51.96, 51.98, 52.00, 720.93, 98.01, 720.88, 52.03],
+ },
+ columns=["time", "ticker", "bid", "ask"],
+ )
.. ipython:: python
@@ -118,9 +124,7 @@ that forward filling happens automatically taking the most recent non-NaN value.
.. ipython:: python
- pd.merge_asof(trades, quotes,
- on='time',
- by='ticker')
+ pd.merge_asof(trades, quotes, on="time", by="ticker")
This returns a merged DataFrame with the entries in the same order as the original left
passed DataFrame (``trades`` in this case), with the fields of the ``quotes`` merged.
@@ -135,9 +139,10 @@ See the full documentation :ref:`here <stats.moments.ts>`.
.. ipython:: python
- dft = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]},
- index=pd.date_range('20130101 09:00:00',
- periods=5, freq='s'))
+ dft = pd.DataFrame(
+ {"B": [0, 1, 2, np.nan, 4]},
+ index=pd.date_range("20130101 09:00:00", periods=5, freq="s"),
+ )
dft
This is a regular frequency index. Using an integer window parameter works to roll along the window frequency.
@@ -151,20 +156,26 @@ Specifying an offset allows a more intuitive specification of the rolling freque
.. ipython:: python
- dft.rolling('2s').sum()
+ dft.rolling("2s").sum()
Using a non-regular, but still monotonic index, rolling with an integer window does not impart any special calculation.
.. ipython:: python
- dft = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]},
- index=pd.Index([pd.Timestamp('20130101 09:00:00'),
- pd.Timestamp('20130101 09:00:02'),
- pd.Timestamp('20130101 09:00:03'),
- pd.Timestamp('20130101 09:00:05'),
- pd.Timestamp('20130101 09:00:06')],
- name='foo'))
+ dft = pd.DataFrame(
+ {"B": [0, 1, 2, np.nan, 4]},
+ index=pd.Index(
+ [
+ pd.Timestamp("20130101 09:00:00"),
+ pd.Timestamp("20130101 09:00:02"),
+ pd.Timestamp("20130101 09:00:03"),
+ pd.Timestamp("20130101 09:00:05"),
+ pd.Timestamp("20130101 09:00:06"),
+ ],
+ name="foo",
+ ),
+ )
dft
dft.rolling(2).sum()
@@ -173,7 +184,7 @@ Using the time-specification generates variable windows for this sparse data.
.. ipython:: python
- dft.rolling('2s').sum()
+ dft.rolling("2s").sum()
Furthermore, we now allow an optional ``on`` parameter to specify a column (rather than the
default of the index) in a DataFrame.
@@ -182,7 +193,7 @@ default of the index) in a DataFrame.
dft = dft.reset_index()
dft
- dft.rolling('2s', on='foo').sum()
+ dft.rolling("2s", on="foo").sum()
.. _whatsnew_0190.enhancements.read_csv_dupe_col_names_support:
@@ -199,8 +210,8 @@ they are in the file or passed in as the ``names`` parameter (:issue:`7160`, :is
.. ipython:: python
- data = '0,1,2\n3,4,5'
- names = ['a', 'b', 'a']
+ data = "0,1,2\n3,4,5"
+ names = ["a", "b", "a"]
**Previous behavior**:
@@ -235,17 +246,22 @@ converting to ``Categorical`` after parsing. See the io :ref:`docs here <io.cat
.. ipython:: python
- data = 'col1,col2,col3\na,b,1\na,b,2\nc,d,3'
+ data = """
+ col1,col2,col3
+ a,b,1
+ a,b,2
+ c,d,3
+ """
pd.read_csv(StringIO(data))
pd.read_csv(StringIO(data)).dtypes
- pd.read_csv(StringIO(data), dtype='category').dtypes
+ pd.read_csv(StringIO(data), dtype="category").dtypes
Individual columns can be parsed as a ``Categorical`` using a dict specification
.. ipython:: python
- pd.read_csv(StringIO(data), dtype={'col1': 'category'}).dtypes
+ pd.read_csv(StringIO(data), dtype={"col1": "category"}).dtypes
.. note::
@@ -256,11 +272,11 @@ Individual columns can be parsed as a ``Categorical`` using a dict specification
.. ipython:: python
- df = pd.read_csv(StringIO(data), dtype='category')
+ df = pd.read_csv(StringIO(data), dtype="category")
df.dtypes
- df['col3']
- df['col3'].cat.categories = pd.to_numeric(df['col3'].cat.categories)
- df['col3']
+ df["col3"]
+ df["col3"].cat.categories = pd.to_numeric(df["col3"].cat.categories)
+ df["col3"]
.. _whatsnew_0190.enhancements.union_categoricals:
@@ -272,6 +288,7 @@ Categorical concatenation
.. ipython:: python
from pandas.api.types import union_categoricals
+
a = pd.Categorical(["b", "c"])
b = pd.Categorical(["a", "b"])
union_categoricals([a, b])
@@ -280,8 +297,8 @@ Categorical concatenation
.. ipython:: python
- s1 = pd.Series(['a', 'b'], dtype='category')
- s2 = pd.Series(['b', 'c'], dtype='category')
+ s1 = pd.Series(["a", "b"], dtype="category")
+ s2 = pd.Series(["b", "c"], dtype="category")
**Previous behavior**:
@@ -313,25 +330,25 @@ These provide date offsets anchored (by default) to the 15th and end of month, a
.. ipython:: python
- pd.Timestamp('2016-01-01') + SemiMonthEnd()
+ pd.Timestamp("2016-01-01") + SemiMonthEnd()
- pd.date_range('2015-01-01', freq='SM', periods=4)
+ pd.date_range("2015-01-01", freq="SM", periods=4)
**SemiMonthBegin**:
.. ipython:: python
- pd.Timestamp('2016-01-01') + SemiMonthBegin()
+ pd.Timestamp("2016-01-01") + SemiMonthBegin()
- pd.date_range('2015-01-01', freq='SMS', periods=4)
+ pd.date_range("2015-01-01", freq="SMS", periods=4)
Using the anchoring suffix, you can also specify the day of month to use instead of the 15th.
.. ipython:: python
- pd.date_range('2015-01-01', freq='SMS-16', periods=4)
+ pd.date_range("2015-01-01", freq="SMS-16", periods=4)
- pd.date_range('2015-01-01', freq='SM-14', periods=4)
+ pd.date_range("2015-01-01", freq="SM-14", periods=4)
.. _whatsnew_0190.enhancements.index:
@@ -344,7 +361,7 @@ The following methods and options are added to ``Index``, to be more consistent
.. ipython:: python
- idx = pd.Index(['a', 'b', 'c'])
+ idx = pd.Index(["a", "b", "c"])
idx.where([True, False, True])
@@ -360,11 +377,10 @@ For ``MultiIndex``, values are dropped if any level is missing by default. Speci
.. ipython:: python
- midx = pd.MultiIndex.from_arrays([[1, 2, np.nan, 4],
- [1, 2, np.nan, np.nan]])
+ midx = pd.MultiIndex.from_arrays([[1, 2, np.nan, 4], [1, 2, np.nan, np.nan]])
midx
midx.dropna()
- midx.dropna(how='all')
+ midx.dropna(how="all")
``Index`` now supports ``.str.extractall()`` which returns a ``DataFrame``, see the :ref:`docs here <text.extractall>` (:issue:`10008`, :issue:`13156`)
@@ -415,7 +431,7 @@ The ``pd.get_dummies`` function now returns dummy-encoded columns as small integ
.. ipython:: python
- pd.get_dummies(['a', 'b', 'a', 'c']).dtypes
+ pd.get_dummies(["a", "b", "a", "c"]).dtypes
.. _whatsnew_0190.enhancements.to_numeric_downcast:
@@ -427,9 +443,9 @@ Downcast values to smallest possible dtype in ``to_numeric``
.. ipython:: python
- s = ['1', 2, 3]
- pd.to_numeric(s, downcast='unsigned')
- pd.to_numeric(s, downcast='integer')
+ s = ["1", 2, 3]
+ pd.to_numeric(s, downcast="unsigned")
+ pd.to_numeric(s, downcast="integer")
.. _whatsnew_0190.dev_api:
@@ -447,7 +463,8 @@ The following are now part of this API:
import pprint
from pandas.api import types
- funcs = [f for f in dir(types) if not f.startswith('_')]
+
+ funcs = [f for f in dir(types) if not f.startswith("_")]
pprint.pprint(funcs)
.. note::
@@ -472,16 +489,16 @@ Other enhancements
.. ipython:: python
- df = pd.DataFrame({'date': pd.date_range('2015-01-01', freq='W', periods=5),
- 'a': np.arange(5)},
- index=pd.MultiIndex.from_arrays([[1, 2, 3, 4, 5],
- pd.date_range('2015-01-01',
- freq='W',
- periods=5)
- ], names=['v', 'd']))
+ df = pd.DataFrame(
+ {"date": pd.date_range("2015-01-01", freq="W", periods=5), "a": np.arange(5)},
+ index=pd.MultiIndex.from_arrays(
+ [[1, 2, 3, 4, 5], pd.date_range("2015-01-01", freq="W", periods=5)],
+ names=["v", "d"],
+ ),
+ )
df
- df.resample('M', on='date').sum()
- df.resample('M', level='d').sum()
+ df.resample("M", on="date").sum()
+ df.resample("M", level="d").sum()
- The ``.get_credentials()`` method of ``GbqConnector`` can now first try to fetch `the application default credentials <https://developers.google.com/identity/protocols/application-default-credentials>`__. See the docs for more details (:issue:`13577`).
- The ``.tz_localize()`` method of ``DatetimeIndex`` and ``Timestamp`` has gained the ``errors`` keyword, so you can potentially coerce nonexistent timestamps to ``NaT``. The default behavior remains to raising a ``NonExistentTimeError`` (:issue:`13057`)
@@ -507,10 +524,9 @@ Other enhancements
.. ipython:: python
- df = pd.DataFrame({'A': [2, 7], 'B': [3, 5], 'C': [4, 8]},
- index=['row1', 'row2'])
+ df = pd.DataFrame({"A": [2, 7], "B": [3, 5], "C": [4, 8]}, index=["row1", "row2"])
df
- df.sort_values(by='row2', axis=1)
+ df.sort_values(by="row2", axis=1)
- Added documentation to :ref:`I/O<io.dtypes>` regarding the perils of reading in columns with mixed dtypes and how to handle it (:issue:`13746`)
- :meth:`~DataFrame.to_html` now has a ``border`` argument to control the value in the opening ``<table>`` tag. The default is the value of the ``html.border`` option, which defaults to 1. This also affects the notebook HTML repr, but since Jupyter's CSS includes a border-width attribute, the visual effect is the same. (:issue:`11563`).
@@ -583,12 +599,12 @@ Arithmetic operators align both ``index`` (no changes).
.. ipython:: python
- s1 = pd.Series([1, 2, 3], index=list('ABC'))
- s2 = pd.Series([2, 2, 2], index=list('ABD'))
+ s1 = pd.Series([1, 2, 3], index=list("ABC"))
+ s2 = pd.Series([2, 2, 2], index=list("ABD"))
s1 + s2
- df1 = pd.DataFrame([1, 2, 3], index=list('ABC'))
- df2 = pd.DataFrame([2, 2, 2], index=list('ABD'))
+ df1 = pd.DataFrame([1, 2, 3], index=list("ABC"))
+ df2 = pd.DataFrame([2, 2, 2], index=list("ABD"))
df1 + df2
Comparison operators
@@ -661,8 +677,8 @@ Logical operators align both ``.index`` of left and right hand side.
.. ipython:: python
- s1 = pd.Series([True, False, True], index=list('ABC'))
- s2 = pd.Series([True, True, True], index=list('ABD'))
+ s1 = pd.Series([True, False, True], index=list("ABC"))
+ s2 = pd.Series([True, True, True], index=list("ABD"))
s1 & s2
.. note::
@@ -679,8 +695,8 @@ Logical operators align both ``.index`` of left and right hand side.
.. ipython:: python
- df1 = pd.DataFrame([True, False, True], index=list('ABC'))
- df2 = pd.DataFrame([True, True, True], index=list('ABD'))
+ df1 = pd.DataFrame([True, False, True], index=list("ABC"))
+ df2 = pd.DataFrame([True, True, True], index=list("ABD"))
df1 & df2
Flexible comparison methods
@@ -691,8 +707,8 @@ which has the different ``index``.
.. ipython:: python
- s1 = pd.Series([1, 2, 3], index=['a', 'b', 'c'])
- s2 = pd.Series([2, 2, 2], index=['b', 'c', 'd'])
+ s1 = pd.Series([1, 2, 3], index=["a", "b", "c"])
+ s2 = pd.Series([2, 2, 2], index=["b", "c", "d"])
s1.eq(s2)
s1.ge(s2)
@@ -749,7 +765,7 @@ This will now convert integers/floats with the default unit of ``ns``.
.. ipython:: python
- pd.to_datetime([1, 'foo'], errors='coerce')
+ pd.to_datetime([1, "foo"], errors="coerce")
Bug fixes related to ``.to_datetime()``:
@@ -768,9 +784,9 @@ Merging will now preserve the dtype of the join keys (:issue:`8596`)
.. ipython:: python
- df1 = pd.DataFrame({'key': [1], 'v1': [10]})
+ df1 = pd.DataFrame({"key": [1], "v1": [10]})
df1
- df2 = pd.DataFrame({'key': [1, 2], 'v1': [20, 30]})
+ df2 = pd.DataFrame({"key": [1, 2], "v1": [20, 30]})
df2
**Previous behavior**:
@@ -796,16 +812,16 @@ We are able to preserve the join keys
.. ipython:: python
- pd.merge(df1, df2, how='outer')
- pd.merge(df1, df2, how='outer').dtypes
+ pd.merge(df1, df2, how="outer")
+ pd.merge(df1, df2, how="outer").dtypes
Of course if you have missing values that are introduced, then the
resulting dtype will be upcast, which is unchanged from previous.
.. ipython:: python
- pd.merge(df1, df2, how='outer', on='key')
- pd.merge(df1, df2, how='outer', on='key').dtypes
+ pd.merge(df1, df2, how="outer", on="key")
+ pd.merge(df1, df2, how="outer", on="key").dtypes
.. _whatsnew_0190.api.describe:
@@ -889,7 +905,7 @@ As a consequence of this change, ``PeriodIndex`` no longer has an integer dtype:
.. ipython:: python
- pi = pd.PeriodIndex(['2016-08-01'], freq='D')
+ pi = pd.PeriodIndex(["2016-08-01"], freq="D")
pi
pd.api.types.is_integer_dtype(pi)
pd.api.types.is_period_dtype(pi)
@@ -916,7 +932,7 @@ These result in ``pd.NaT`` without providing ``freq`` option.
.. ipython:: python
- pd.Period('NaT')
+ pd.Period("NaT")
pd.Period(None)
@@ -955,7 +971,7 @@ of integers (:issue:`13988`).
.. ipython:: python
- pi = pd.PeriodIndex(['2011-01', '2011-02'], freq='M')
+ pi = pd.PeriodIndex(["2011-01", "2011-02"], freq="M")
pi.values
@@ -985,7 +1001,7 @@ Previous behavior:
.. ipython:: python
- pd.Index(['a', 'b']) + pd.Index(['a', 'c'])
+ pd.Index(["a", "b"]) + pd.Index(["a", "c"])
Note that numeric Index objects already performed element-wise operations.
For example, the behavior of adding two integer Indexes is unchanged.
@@ -1011,8 +1027,10 @@ DatetimeIndex objects resulting in a TimedeltaIndex:
.. ipython:: python
- (pd.DatetimeIndex(['2016-01-01', '2016-01-02'])
- - pd.DatetimeIndex(['2016-01-02', '2016-01-03']))
+ (
+ pd.DatetimeIndex(["2016-01-01", "2016-01-02"])
+ - pd.DatetimeIndex(["2016-01-02", "2016-01-03"])
+ )
.. _whatsnew_0190.api.difference:
@@ -1073,8 +1091,7 @@ Previously, most ``Index`` classes returned ``np.ndarray``, and ``DatetimeIndex`
.. ipython:: python
pd.Index([1, 2, 3]).unique()
- pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'],
- tz='Asia/Tokyo').unique()
+ pd.DatetimeIndex(["2011-01-01", "2011-01-02", "2011-01-03"], tz="Asia/Tokyo").unique()
.. _whatsnew_0190.api.multiindex:
@@ -1086,8 +1103,8 @@ in ``MultiIndex`` levels (:issue:`13743`, :issue:`13854`).
.. ipython:: python
- cat = pd.Categorical(['a', 'b'], categories=list("bac"))
- lvl1 = ['foo', 'bar']
+ cat = pd.Categorical(["a", "b"], categories=list("bac"))
+ lvl1 = ["foo", "bar"]
midx = pd.MultiIndex.from_arrays([cat, lvl1])
midx
@@ -1113,9 +1130,9 @@ As a consequence, ``groupby`` and ``set_index`` also preserve categorical dtypes
.. ipython:: python
- df = pd.DataFrame({'A': [0, 1], 'B': [10, 11], 'C': cat})
- df_grouped = df.groupby(by=['A', 'C']).first()
- df_set_idx = df.set_index(['A', 'C'])
+ df = pd.DataFrame({"A": [0, 1], "B": [10, 11], "C": cat})
+ df_grouped = df.groupby(by=["A", "C"]).first()
+ df_set_idx = df.set_index(["A", "C"])
**Previous behavior**:
@@ -1163,7 +1180,7 @@ the result of calling :func:`read_csv` without the ``chunksize=`` argument
.. ipython:: python
- data = 'A,B\n0,1\n2,3\n4,5\n6,7'
+ data = "A,B\n0,1\n2,3\n4,5\n6,7"
**Previous behavior**:
@@ -1248,7 +1265,7 @@ Operators now preserve dtypes
.. code-block:: python
- s = pd.SparseSeries([1., 0., 2., 0.], fill_value=0)
+ s = pd.SparseSeries([1.0, 0.0, 2.0, 0.0], fill_value=0)
s
s.astype(np.int64)
diff --git a/doc/source/whatsnew/v0.19.1.rst b/doc/source/whatsnew/v0.19.1.rst
index f8b60f457b33f..6ff3fb6900a99 100644
--- a/doc/source/whatsnew/v0.19.1.rst
+++ b/doc/source/whatsnew/v0.19.1.rst
@@ -8,7 +8,7 @@ Version 0.19.1 (November 3, 2016)
.. ipython:: python
:suppress:
- from pandas import * # noqa F401, F403
+ from pandas import * # noqa F401, F403
This is a minor bug-fix release from 0.19.0 and includes some small regression fixes,
diff --git a/doc/source/whatsnew/v0.19.2.rst b/doc/source/whatsnew/v0.19.2.rst
index 924c95f21ceff..bba89d78be869 100644
--- a/doc/source/whatsnew/v0.19.2.rst
+++ b/doc/source/whatsnew/v0.19.2.rst
@@ -8,7 +8,7 @@ Version 0.19.2 (December 24, 2016)
.. ipython:: python
:suppress:
- from pandas import * # noqa F401, F403
+ from pandas import * # noqa F401, F403
This is a minor bug-fix release in the 0.19.x series and includes some small regression fixes,
diff --git a/doc/source/whatsnew/v0.20.2.rst b/doc/source/whatsnew/v0.20.2.rst
index 7f84c6b3f17bd..430a39d2d2e97 100644
--- a/doc/source/whatsnew/v0.20.2.rst
+++ b/doc/source/whatsnew/v0.20.2.rst
@@ -8,7 +8,7 @@ Version 0.20.2 (June 4, 2017)
.. ipython:: python
:suppress:
- from pandas import * # noqa F401, F403
+ from pandas import * # noqa F401, F403
This is a minor bug-fix release in the 0.20.x series and includes some small regression fixes,
diff --git a/doc/source/whatsnew/v0.20.3.rst b/doc/source/whatsnew/v0.20.3.rst
index 888d0048ca9f3..ff28f6830783e 100644
--- a/doc/source/whatsnew/v0.20.3.rst
+++ b/doc/source/whatsnew/v0.20.3.rst
@@ -8,7 +8,7 @@ Version 0.20.3 (July 7, 2017)
.. ipython:: python
:suppress:
- from pandas import * # noqa F401, F403
+ from pandas import * # noqa F401, F403
This is a minor bug-fix release in the 0.20.x series and includes some small regression fixes
diff --git a/doc/source/whatsnew/v0.21.1.rst b/doc/source/whatsnew/v0.21.1.rst
index f930dfac869cd..fc4391bdba975 100644
--- a/doc/source/whatsnew/v0.21.1.rst
+++ b/doc/source/whatsnew/v0.21.1.rst
@@ -8,7 +8,7 @@ Version 0.21.1 (December 12, 2017)
.. ipython:: python
:suppress:
- from pandas import * # noqa F401, F403
+ from pandas import * # noqa F401, F403
This is a minor bug-fix release in the 0.21.x series and includes some small regression fixes,
diff --git a/doc/source/whatsnew/v0.22.0.rst b/doc/source/whatsnew/v0.22.0.rst
index 66d3ab3305565..1ee6db47a4d8e 100644
--- a/doc/source/whatsnew/v0.22.0.rst
+++ b/doc/source/whatsnew/v0.22.0.rst
@@ -8,7 +8,7 @@ Version 0.22.0 (December 29, 2017)
.. ipython:: python
:suppress:
- from pandas import * # noqa F401, F403
+ from pandas import * # noqa F401, F403
This is a major release from 0.21.1 and includes a single, API-breaking change.
@@ -119,7 +119,7 @@ instead of ``NaN``.
.. ipython:: python
- grouper = pd.Categorical(['a', 'a'], categories=['a', 'b'])
+ grouper = pd.Categorical(["a", "a"], categories=["a", "b"])
pd.Series([1, 2]).groupby(grouper).sum()
To restore the 0.21 behavior of returning ``NaN`` for unobserved groups,
@@ -159,15 +159,14 @@ sum and ``1`` for product.
.. ipython:: python
- s = pd.Series([1, 1, np.nan, np.nan],
- index=pd.date_range('2017', periods=4))
- s.resample('2d').sum()
+ s = pd.Series([1, 1, np.nan, np.nan], index=pd.date_range("2017", periods=4))
+ s.resample("2d").sum()
To restore the 0.21 behavior of returning ``NaN``, use ``min_count>=1``.
.. ipython:: python
- s.resample('2d').sum(min_count=1)
+ s.resample("2d").sum(min_count=1)
In particular, upsampling and taking the sum or product is affected, as
upsampling introduces missing values even if the original series was
@@ -190,7 +189,7 @@ entirely valid.
.. ipython:: python
- idx = pd.DatetimeIndex(['2017-01-01', '2017-01-02'])
+ idx = pd.DatetimeIndex(["2017-01-01", "2017-01-02"])
pd.Series([1, 2], index=idx).resample("12H").sum()
Once again, the ``min_count`` keyword is available to restore the 0.21 behavior.
diff --git a/doc/source/whatsnew/v0.5.0.rst b/doc/source/whatsnew/v0.5.0.rst
index 7ccb141260f18..7447a10fa1d6b 100644
--- a/doc/source/whatsnew/v0.5.0.rst
+++ b/doc/source/whatsnew/v0.5.0.rst
@@ -9,7 +9,7 @@ Version 0.5.0 (October 24, 2011)
.. ipython:: python
:suppress:
- from pandas import * # noqa F401, F403
+ from pandas import * # noqa F401, F403
New features
diff --git a/doc/source/whatsnew/v0.6.0.rst b/doc/source/whatsnew/v0.6.0.rst
index 1cb9dcbe159aa..8ff688eaa91e7 100644
--- a/doc/source/whatsnew/v0.6.0.rst
+++ b/doc/source/whatsnew/v0.6.0.rst
@@ -8,7 +8,7 @@ Version 0.6.0 (November 25, 2011)
.. ipython:: python
:suppress:
- from pandas import * # noqa F401, F403
+ from pandas import * # noqa F401, F403
New features
diff --git a/doc/source/whatsnew/v0.7.3.rst b/doc/source/whatsnew/v0.7.3.rst
index 5ed48c0d8d6d9..4ca31baf560bb 100644
--- a/doc/source/whatsnew/v0.7.3.rst
+++ b/doc/source/whatsnew/v0.7.3.rst
@@ -23,7 +23,8 @@ New features
.. code-block:: python
from pandas.tools.plotting import scatter_matrix
- scatter_matrix(df, alpha=0.2) # noqa F821
+
+ scatter_matrix(df, alpha=0.2) # noqa F821
- Add ``stacked`` argument to Series and DataFrame's ``plot`` method for
@@ -31,12 +32,12 @@ New features
.. code-block:: python
- df.plot(kind='bar', stacked=True) # noqa F821
+ df.plot(kind="bar", stacked=True) # noqa F821
.. code-block:: python
- df.plot(kind='barh', stacked=True) # noqa F821
+ df.plot(kind="barh", stacked=True) # noqa F821
- Add log x and y :ref:`scaling options <visualization.basic>` to
@@ -52,9 +53,9 @@ Reverted some changes to how NA values (represented typically as ``NaN`` or
.. ipython:: python
- series = pd.Series(['Steve', np.nan, 'Joe'])
- series == 'Steve'
- series != 'Steve'
+ series = pd.Series(["Steve", np.nan, "Joe"])
+ series == "Steve"
+ series != "Steve"
In comparisons, NA / NaN will always come through as ``False`` except with
``!=`` which is ``True``. *Be very careful* with boolean arithmetic, especially
@@ -63,7 +64,7 @@ filter into boolean array operations if you are worried about this:
.. ipython:: python
- mask = series == 'Steve'
+ mask = series == "Steve"
series[mask & series.notnull()]
While propagating NA in comparisons may seem like the right behavior to some
@@ -82,15 +83,18 @@ Series, to be more consistent with the ``groupby`` behavior with DataFrame:
.. ipython:: python
:okwarning:
- df = pd.DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
- 'foo', 'bar', 'foo', 'foo'],
- 'B': ['one', 'one', 'two', 'three',
- 'two', 'two', 'one', 'three'],
- 'C': np.random.randn(8), 'D': np.random.randn(8)})
+ df = pd.DataFrame(
+ {
+ "A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
+ "B": ["one", "one", "two", "three", "two", "two", "one", "three"],
+ "C": np.random.randn(8),
+ "D": np.random.randn(8),
+ }
+ )
df
- grouped = df.groupby('A')['C']
+ grouped = df.groupby("A")["C"]
grouped.describe()
- grouped.apply(lambda x: x.sort_values()[-2:]) # top 2 values
+ grouped.apply(lambda x: x.sort_values()[-2:]) # top 2 values
.. _whatsnew_0.7.3.contributors:
diff --git a/doc/source/whatsnew/v0.8.0.rst b/doc/source/whatsnew/v0.8.0.rst
index 9bba68d8c331d..8a84630a28b34 100644
--- a/doc/source/whatsnew/v0.8.0.rst
+++ b/doc/source/whatsnew/v0.8.0.rst
@@ -159,7 +159,8 @@ New plotting methods
.. code-block:: python
import pandas as pd
- fx = pd.read_pickle('data/fx_prices')
+
+ fx = pd.read_pickle("data/fx_prices")
import matplotlib.pyplot as plt
``Series.plot`` now supports a ``secondary_y`` option:
@@ -168,20 +169,19 @@ New plotting methods
plt.figure()
- fx['FR'].plot(style='g')
+ fx["FR"].plot(style="g")
- fx['IT'].plot(style='k--', secondary_y=True)
+ fx["IT"].plot(style="k--", secondary_y=True)
Vytautas Jancauskas, the 2012 GSOC participant, has added many new plot
types. For example, ``'kde'`` is a new option:
.. ipython:: python
- s = pd.Series(np.concatenate((np.random.randn(1000),
- np.random.randn(1000) * 0.5 + 3)))
+ s = pd.Series(np.concatenate((np.random.randn(1000), np.random.randn(1000) * 0.5 + 3)))
plt.figure()
s.hist(density=True, alpha=0.2)
- s.plot(kind='kde')
+ s.plot(kind="kde")
See :ref:`the plotting page <visualization.other>` for much more.
@@ -205,7 +205,8 @@ with code using scalar values because you are handing control over to NumPy:
.. ipython:: python
import datetime
- rng = pd.date_range('1/1/2000', periods=10)
+
+ rng = pd.date_range("1/1/2000", periods=10)
rng[5]
isinstance(rng[5], datetime.datetime)
rng_asarray = np.asarray(rng)
@@ -251,7 +252,7 @@ type. See `matplotlib documentation
.. ipython:: python
- rng = pd.date_range('1/1/2000', periods=10)
+ rng = pd.date_range("1/1/2000", periods=10)
rng
np.asarray(rng)
converted = np.asarray(rng, dtype=object)
diff --git a/doc/source/whatsnew/v0.9.0.rst b/doc/source/whatsnew/v0.9.0.rst
index 5172b1989765d..44ded51e31fda 100644
--- a/doc/source/whatsnew/v0.9.0.rst
+++ b/doc/source/whatsnew/v0.9.0.rst
@@ -41,9 +41,11 @@ API changes
import io
- data = ('0,0,1\n'
- '1,1,0\n'
- '0,1,0')
+ data = """
+ 0,0,1
+ 1,1,0
+ 0,1,0
+ """
df = pd.read_csv(io.StringIO(data), header=None)
df
@@ -59,7 +61,7 @@ API changes
s1 = pd.Series([1, 2, 3])
s1
- s2 = pd.Series(s1, index=['foo', 'bar', 'baz'])
+ s2 = pd.Series(s1, index=["foo", "bar", "baz"])
s2
- Deprecated ``day_of_year`` API removed from PeriodIndex, use ``dayofyear``
| - [x] xref #36777
- [ ] tests added / passed
- [x] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/36857 | 2020-10-04T10:13:38Z | 2020-10-05T12:58:10Z | 2020-10-05T12:58:10Z | 2020-10-06T08:15:18Z |
DOC: blacken-docs doc/source/ecosystem.rst | diff --git a/doc/source/ecosystem.rst b/doc/source/ecosystem.rst
index ed6ce7e9759b6..4086f64817568 100644
--- a/doc/source/ecosystem.rst
+++ b/doc/source/ecosystem.rst
@@ -170,7 +170,9 @@ invoked with the following command
.. code:: python
- import dtale; dtale.show(df)
+ import dtale
+
+ dtale.show(df)
D-Tale integrates seamlessly with jupyter notebooks, python terminals, kaggle
& Google Colab. Here are some demos of the `grid <http://alphatechadmin.pythonanywhere.com/>`__
| - [x] xref #36777
- [ ] tests added / passed
- [x] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/36856 | 2020-10-04T09:41:10Z | 2020-10-04T11:45:43Z | 2020-10-04T11:45:43Z | 2020-10-04T11:45:52Z |
TYP: update setup.cfg | diff --git a/setup.cfg b/setup.cfg
index 73986f692b6cd..48948e0d754c9 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -207,9 +207,6 @@ check_untyped_defs=False
[mypy-pandas.core.indexes.multi]
check_untyped_defs=False
-[mypy-pandas.core.indexes.period]
-check_untyped_defs=False
-
[mypy-pandas.core.indexes.range]
check_untyped_defs=False
@@ -243,9 +240,6 @@ check_untyped_defs=False
[mypy-pandas.core.series]
check_untyped_defs=False
-[mypy-pandas.core.strings]
-check_untyped_defs=False
-
[mypy-pandas.core.window.common]
check_untyped_defs=False
| https://api.github.com/repos/pandas-dev/pandas/pulls/36854 | 2020-10-04T08:35:04Z | 2020-10-04T22:39:46Z | 2020-10-04T22:39:46Z | 2020-10-05T09:36:00Z | |
REF/CLN: pandas/io/parsers.py | diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index 63c3f9899d915..866854ff1fe33 100644
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -606,7 +606,7 @@ def read_csv(
del kwds["filepath_or_buffer"]
del kwds["sep"]
- kwds_defaults = _check_defaults_read(
+ kwds_defaults = _refine_defaults_read(
dialect, delimiter, delim_whitespace, engine, sep, defaults={"delimiter": ","}
)
kwds.update(kwds_defaults)
@@ -684,7 +684,7 @@ def read_table(
del kwds["filepath_or_buffer"]
del kwds["sep"]
- kwds_defaults = _check_defaults_read(
+ kwds_defaults = _refine_defaults_read(
dialect, delimiter, delim_whitespace, engine, sep, defaults={"delimiter": "\t"}
)
kwds.update(kwds_defaults)
@@ -789,63 +789,16 @@ def __init__(self, f, engine=None, **kwds):
else:
engine = "python"
engine_specified = False
-
+ self.engine = engine
self._engine_specified = kwds.get("engine_specified", engine_specified)
+ _validate_skipfooter(kwds)
+
if kwds.get("dialect") is not None:
dialect = kwds["dialect"]
if dialect in csv.list_dialects():
dialect = csv.get_dialect(dialect)
-
- # Any valid dialect should have these attributes.
- # If any are missing, we will raise automatically.
- for param in (
- "delimiter",
- "doublequote",
- "escapechar",
- "skipinitialspace",
- "quotechar",
- "quoting",
- ):
- try:
- dialect_val = getattr(dialect, param)
- except AttributeError as err:
- raise ValueError(
- f"Invalid dialect {kwds['dialect']} provided"
- ) from err
- parser_default = _parser_defaults[param]
- provided = kwds.get(param, parser_default)
-
- # Messages for conflicting values between the dialect
- # instance and the actual parameters provided.
- conflict_msgs = []
-
- # Don't warn if the default parameter was passed in,
- # even if it conflicts with the dialect (gh-23761).
- if provided != parser_default and provided != dialect_val:
- msg = (
- f"Conflicting values for '{param}': '{provided}' was "
- f"provided, but the dialect specifies '{dialect_val}'. "
- "Using the dialect-specified value."
- )
-
- # Annoying corner case for not warning about
- # conflicts between dialect and delimiter parameter.
- # Refer to the outer "_read_" function for more info.
- if not (param == "delimiter" and kwds.pop("sep_override", False)):
- conflict_msgs.append(msg)
-
- if conflict_msgs:
- warnings.warn(
- "\n\n".join(conflict_msgs), ParserWarning, stacklevel=2
- )
- kwds[param] = dialect_val
-
- if kwds.get("skipfooter"):
- if kwds.get("iterator") or kwds.get("chunksize"):
- raise ValueError("'skipfooter' not supported for 'iteration'")
- if kwds.get("nrows"):
- raise ValueError("'skipfooter' not supported with 'nrows'")
+ kwds = _merge_with_dialect_properties(dialect, kwds)
if kwds.get("header", "infer") == "infer":
kwds["header"] = 0 if kwds.get("names") is None else None
@@ -853,7 +806,6 @@ def __init__(self, f, engine=None, **kwds):
self.orig_options = kwds
# miscellanea
- self.engine = engine
self._currow = 0
options = self._get_options_with_defaults(engine)
@@ -928,7 +880,6 @@ def _check_file_or_buffer(self, f, engine):
def _clean_options(self, options, engine):
result = options.copy()
- engine_specified = self._engine_specified
fallback_reason = None
# C engine not supported yet
@@ -992,7 +943,7 @@ def _clean_options(self, options, engine):
)
engine = "python"
- if fallback_reason and engine_specified:
+ if fallback_reason and self._engine_specified:
raise ValueError(fallback_reason)
if engine == "c":
@@ -1028,25 +979,18 @@ def _clean_options(self, options, engine):
validate_header_arg(options["header"])
- depr_warning = ""
-
for arg in _deprecated_args:
parser_default = _c_parser_defaults[arg]
depr_default = _deprecated_defaults[arg]
-
- msg = (
- f"The {repr(arg)} argument has been deprecated and will be "
- "removed in a future version."
- )
-
if result.get(arg, depr_default) != depr_default:
- depr_warning += msg + "\n\n"
+ msg = (
+ f"The {arg} argument has been deprecated and will be "
+ "removed in a future version.\n\n"
+ )
+ warnings.warn(msg, FutureWarning, stacklevel=2)
else:
result[arg] = parser_default
- if depr_warning != "":
- warnings.warn(depr_warning, FutureWarning, stacklevel=2)
-
if index_col is True:
raise ValueError("The value of index_col couldn't be 'True'")
if _is_index_col(index_col):
@@ -3706,7 +3650,7 @@ def _make_reader(self, f):
)
-def _check_defaults_read(
+def _refine_defaults_read(
dialect: Union[str, csv.Dialect],
delimiter: Union[str, object],
delim_whitespace: bool,
@@ -3714,7 +3658,7 @@ def _check_defaults_read(
sep: Union[str, object],
defaults: Dict[str, Any],
):
- """Check default values of input parameters of read_csv, read_table.
+ """Validate/refine default values of input parameters of read_csv, read_table.
Parameters
----------
@@ -3766,7 +3710,7 @@ def _check_defaults_read(
# the comparison to dialect values by checking if default values
# for BOTH "delimiter" and "sep" were provided.
if dialect is not None:
- kwds["sep_override"] = (delimiter is None) and (
+ kwds["sep_override"] = delimiter is None and (
sep is lib.no_default or sep == delim_default
)
@@ -3793,3 +3737,95 @@ def _check_defaults_read(
kwds["engine_specified"] = False
return kwds
+
+
+def _merge_with_dialect_properties(
+ dialect: csv.Dialect,
+ defaults: Dict[str, Any],
+) -> Dict[str, Any]:
+ """
+ Merge default kwargs in TextFileReader with dialect parameters.
+
+ Parameters
+ ----------
+ dialect : csv.Dialect
+ Concrete csv dialect. See csv.Dialect documentation for more details.
+ defaults : dict
+ Keyword arguments passed to TextFileReader.
+
+ Returns
+ -------
+ kwds : dict
+ Updated keyword arguments, merged with dialect parameters.
+
+ Raises
+ ------
+ ValueError
+ If incorrect dialect is provided.
+ """
+ kwds = defaults.copy()
+
+ # Any valid dialect should have these attributes.
+ # If any are missing, we will raise automatically.
+ mandatory_dialect_attrs = (
+ "delimiter",
+ "doublequote",
+ "escapechar",
+ "skipinitialspace",
+ "quotechar",
+ "quoting",
+ )
+
+ for param in mandatory_dialect_attrs:
+ try:
+ dialect_val = getattr(dialect, param)
+ except AttributeError as err:
+ raise ValueError(f"Invalid dialect {dialect} provided") from err
+
+ parser_default = _parser_defaults[param]
+ provided = kwds.get(param, parser_default)
+
+ # Messages for conflicting values between the dialect
+ # instance and the actual parameters provided.
+ conflict_msgs = []
+
+ # Don't warn if the default parameter was passed in,
+ # even if it conflicts with the dialect (gh-23761).
+ if provided != parser_default and provided != dialect_val:
+ msg = (
+ f"Conflicting values for '{param}': '{provided}' was "
+ f"provided, but the dialect specifies '{dialect_val}'. "
+ "Using the dialect-specified value."
+ )
+
+ # Annoying corner case for not warning about
+ # conflicts between dialect and delimiter parameter.
+ # Refer to the outer "_read_" function for more info.
+ if not (param == "delimiter" and kwds.pop("sep_override", False)):
+ conflict_msgs.append(msg)
+
+ if conflict_msgs:
+ warnings.warn("\n\n".join(conflict_msgs), ParserWarning, stacklevel=2)
+ kwds[param] = dialect_val
+ return kwds
+
+
+def _validate_skipfooter(kwds: Dict[str, Any]) -> None:
+ """
+ Check whether skipfooter is compatible with other kwargs in TextFileReader.
+
+ Parameters
+ ----------
+ kwds : dict
+ Keyword arguments passed to TextFileReader.
+
+ Raises
+ ------
+ ValueError
+ If skipfooter is not compatible with other parameters.
+ """
+ if kwds.get("skipfooter"):
+ if kwds.get("iterator") or kwds.get("chunksize"):
+ raise ValueError("'skipfooter' not supported for 'iteration'")
+ if kwds.get("nrows"):
+ raise ValueError("'skipfooter' not supported with 'nrows'")
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
Refactor/cleanup ``pandas/io/parsers.py``
- Extract method _refresh_kwargs_based_on_dialect
- Extract method _validate_skipfooter
- Drop local variable engine_specified
- Clean-up FutureWarning issue
| https://api.github.com/repos/pandas-dev/pandas/pulls/36852 | 2020-10-04T07:17:43Z | 2020-10-14T17:54:46Z | 2020-10-14T17:54:46Z | 2020-10-22T07:31:10Z |
DOC: Standardize references to pandas in the documentation for #32316 | diff --git a/doc/source/development/code_style.rst b/doc/source/development/code_style.rst
index 387f65ea583a0..5aa1c1099d6e0 100644
--- a/doc/source/development/code_style.rst
+++ b/doc/source/development/code_style.rst
@@ -9,7 +9,7 @@ pandas code style guide
.. contents:: Table of contents:
:local:
-*pandas* follows the `PEP8 <https://www.python.org/dev/peps/pep-0008/>`_
+pandas follows the `PEP8 <https://www.python.org/dev/peps/pep-0008/>`_
standard and uses `Black <https://black.readthedocs.io/en/stable/>`_
and `Flake8 <https://flake8.pycqa.org/en/latest/>`_ to ensure a
consistent code format throughout the project. For details see the
diff --git a/doc/source/development/contributing.rst b/doc/source/development/contributing.rst
index d6955c5d4b8d2..d3802ef71d1af 100644
--- a/doc/source/development/contributing.rst
+++ b/doc/source/development/contributing.rst
@@ -134,7 +134,7 @@ want to clone your fork to your machine::
git remote add upstream https://github.com/pandas-dev/pandas.git
This creates the directory ``pandas-yourname`` and connects your repository to
-the upstream (main project) *pandas* repository.
+the upstream (main project) pandas repository.
Note that performing a shallow clone (with ``--depth==N``, for some ``N`` greater
or equal to 1) might break some tests and features as ``pd.show_versions()``
diff --git a/doc/source/ecosystem.rst b/doc/source/ecosystem.rst
index ed6ce7e9759b6..8f52c83f6bd35 100644
--- a/doc/source/ecosystem.rst
+++ b/doc/source/ecosystem.rst
@@ -98,7 +98,7 @@ With Altair, you can spend more time understanding your data and its
meaning. Altair's API is simple, friendly and consistent and built on
top of the powerful Vega-Lite JSON specification. This elegant
simplicity produces beautiful and effective visualizations with a
-minimal amount of code. Altair works with Pandas DataFrames.
+minimal amount of code. Altair works with pandas DataFrames.
`Bokeh <https://bokeh.pydata.org>`__
@@ -110,7 +110,7 @@ graphics in the style of Protovis/D3, while delivering high-performance interact
large data to thin clients.
`Pandas-Bokeh <https://github.com/PatrikHlobil/Pandas-Bokeh>`__ provides a high level API
-for Bokeh that can be loaded as a native Pandas plotting backend via
+for Bokeh that can be loaded as a native pandas plotting backend via
.. code:: python
@@ -185,7 +185,7 @@ IDE
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
IPython is an interactive command shell and distributed computing
-environment. IPython tab completion works with Pandas methods and also
+environment. IPython tab completion works with pandas methods and also
attributes like DataFrame columns.
`Jupyter Notebook / Jupyter Lab <https://jupyter.org>`__
@@ -274,13 +274,13 @@ The following data feeds are available:
`Quandl/Python <https://github.com/quandl/Python>`__
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Quandl API for Python wraps the Quandl REST API to return
-Pandas DataFrames with timeseries indexes.
+pandas DataFrames with timeseries indexes.
`Pydatastream <https://github.com/vfilimonov/pydatastream>`__
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
PyDatastream is a Python interface to the
`Refinitiv Datastream (DWS) <https://www.refinitiv.com/en/products/datastream-macroeconomic-analysis>`__
-REST API to return indexed Pandas DataFrames with financial data.
+REST API to return indexed pandas DataFrames with financial data.
This package requires valid credentials for this API (non free).
`pandaSDMX <https://pandasdmx.readthedocs.io>`__
@@ -401,7 +401,7 @@ If also displays progress bars.
`Ray <https://ray.readthedocs.io/en/latest/pandas_on_ray.html>`__
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Pandas on Ray is an early stage DataFrame library that wraps Pandas and transparently distributes the data and computation. The user does not need to know how many cores their system has, nor do they need to specify how to distribute the data. In fact, users can continue using their previous Pandas notebooks while experiencing a considerable speedup from Pandas on Ray, even on a single machine. Only a modification of the import statement is needed, as we demonstrate below. Once you’ve changed your import statement, you’re ready to use Pandas on Ray just like you would Pandas.
+Pandas on Ray is an early stage DataFrame library that wraps pandas and transparently distributes the data and computation. The user does not need to know how many cores their system has, nor do they need to specify how to distribute the data. In fact, users can continue using their previous Pandas notebooks while experiencing a considerable speedup from Pandas on Ray, even on a single machine. Only a modification of the import statement is needed, as we demonstrate below. Once you’ve changed your import statement, you’re ready to use Pandas on Ray just like you would Pandas.
.. code:: python
@@ -412,7 +412,7 @@ Pandas on Ray is an early stage DataFrame library that wraps Pandas and transpar
`Vaex <https://docs.vaex.io/>`__
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Increasingly, packages are being built on top of pandas to address specific needs in data preparation, analysis and visualization. Vaex is a python library for Out-of-Core DataFrames (similar to Pandas), to visualize and explore big tabular datasets. It can calculate statistics such as mean, sum, count, standard deviation etc, on an N-dimensional grid up to a billion (10\ :sup:`9`) objects/rows per second. Visualization is done using histograms, density plots and 3d volume rendering, allowing interactive exploration of big data. Vaex uses memory mapping, zero memory copy policy and lazy computations for best performance (no memory wasted).
+Increasingly, packages are being built on top of pandas to address specific needs in data preparation, analysis and visualization. Vaex is a python library for Out-of-Core DataFrames (similar to pandas), to visualize and explore big tabular datasets. It can calculate statistics such as mean, sum, count, standard deviation etc, on an N-dimensional grid up to a billion (10\ :sup:`9`) objects/rows per second. Visualization is done using histograms, density plots and 3d volume rendering, allowing interactive exploration of big data. Vaex uses memory mapping, zero memory copy policy and lazy computations for best performance (no memory wasted).
* vaex.from_pandas
* vaex.to_pandas_df
diff --git a/doc/source/getting_started/install.rst b/doc/source/getting_started/install.rst
index a6341451b1b80..e9c8d0b9b9f53 100644
--- a/doc/source/getting_started/install.rst
+++ b/doc/source/getting_started/install.rst
@@ -184,7 +184,7 @@ You can find simple installation instructions for pandas in this document: ``ins
Installing from source
~~~~~~~~~~~~~~~~~~~~~~
-See the :ref:`contributing guide <contributing>` for complete instructions on building from the git source tree. Further, see :ref:`creating a development environment <contributing.dev_env>` if you wish to create a *pandas* development environment.
+See the :ref:`contributing guide <contributing>` for complete instructions on building from the git source tree. Further, see :ref:`creating a development environment <contributing.dev_env>` if you wish to create a pandas development environment.
Running the test suite
----------------------
diff --git a/doc/source/getting_started/overview.rst b/doc/source/getting_started/overview.rst
index 57d87d4ec8a91..3043cf25c5312 100644
--- a/doc/source/getting_started/overview.rst
+++ b/doc/source/getting_started/overview.rst
@@ -6,7 +6,7 @@
Package overview
****************
-**pandas** is a `Python <https://www.python.org>`__ package providing fast,
+pandas is a `Python <https://www.python.org>`__ package providing fast,
flexible, and expressive data structures designed to make working with
"relational" or "labeled" data both easy and intuitive. It aims to be the
fundamental high-level building block for doing practical, **real-world** data
diff --git a/doc/source/user_guide/scale.rst b/doc/source/user_guide/scale.rst
index 206d8dd0f4739..02e7840889fa6 100644
--- a/doc/source/user_guide/scale.rst
+++ b/doc/source/user_guide/scale.rst
@@ -260,7 +260,7 @@ Inspecting the ``ddf`` object, we see a few things
* There are new attributes like ``.npartitions`` and ``.divisions``
The partitions and divisions are how Dask parallelizes computation. A **Dask**
-DataFrame is made up of many **Pandas** DataFrames. A single method call on a
+DataFrame is made up of many pandas DataFrames. A single method call on a
Dask DataFrame ends up making many pandas method calls, and Dask knows how to
coordinate everything to get the result.
diff --git a/doc/source/whatsnew/v0.11.0.rst b/doc/source/whatsnew/v0.11.0.rst
index eb91ac427063f..fa6bd2de62fc3 100644
--- a/doc/source/whatsnew/v0.11.0.rst
+++ b/doc/source/whatsnew/v0.11.0.rst
@@ -12,7 +12,7 @@ Data have had quite a number of additions, and Dtype support is now full-fledged
There are also a number of important API changes that long-time pandas users should
pay close attention to.
-There is a new section in the documentation, :ref:`10 Minutes to Pandas <10min>`,
+There is a new section in the documentation, :ref:`10 Minutes to pandas <10min>`,
primarily geared to new users.
There is a new section in the documentation, :ref:`Cookbook <cookbook>`, a collection
diff --git a/doc/source/whatsnew/v0.13.0.rst b/doc/source/whatsnew/v0.13.0.rst
index bc607409546c6..3c6b70fb21383 100644
--- a/doc/source/whatsnew/v0.13.0.rst
+++ b/doc/source/whatsnew/v0.13.0.rst
@@ -668,7 +668,7 @@ Enhancements
- ``Series`` now supports a ``to_frame`` method to convert it to a single-column DataFrame (:issue:`5164`)
-- All R datasets listed here http://stat.ethz.ch/R-manual/R-devel/library/datasets/html/00Index.html can now be loaded into Pandas objects
+- All R datasets listed here http://stat.ethz.ch/R-manual/R-devel/library/datasets/html/00Index.html can now be loaded into pandas objects
.. code-block:: python
diff --git a/doc/source/whatsnew/v0.13.1.rst b/doc/source/whatsnew/v0.13.1.rst
index 9e416f8eeb3f1..9a62debbc7b0f 100644
--- a/doc/source/whatsnew/v0.13.1.rst
+++ b/doc/source/whatsnew/v0.13.1.rst
@@ -17,7 +17,7 @@ Highlights include:
- Will intelligently limit display precision for datetime/timedelta formats.
- Enhanced Panel :meth:`~pandas.Panel.apply` method.
- Suggested tutorials in new :ref:`Tutorials<tutorials>` section.
-- Our pandas ecosystem is growing, We now feature related projects in a new :ref:`Pandas Ecosystem<ecosystem>` section.
+- Our pandas ecosystem is growing, We now feature related projects in a new :ref:`pandas Ecosystem<ecosystem>` section.
- Much work has been taking place on improving the docs, and a new :ref:`Contributing<contributing>` section has been added.
- Even though it may only be of interest to devs, we <3 our new CI status page: `ScatterCI <http://scatterci.github.io/pydata/pandas>`__.
diff --git a/doc/source/whatsnew/v0.16.1.rst b/doc/source/whatsnew/v0.16.1.rst
index a89ede8f024a0..186656ae151e9 100644
--- a/doc/source/whatsnew/v0.16.1.rst
+++ b/doc/source/whatsnew/v0.16.1.rst
@@ -13,7 +13,7 @@ We recommend that all users upgrade to this version.
Highlights include:
- Support for a ``CategoricalIndex``, a category based index, see :ref:`here <whatsnew_0161.enhancements.categoricalindex>`
-- New section on how-to-contribute to *pandas*, see :ref:`here <contributing>`
+- New section on how-to-contribute to pandas, see :ref:`here <contributing>`
- Revised "Merge, join, and concatenate" documentation, including graphical examples to make it easier to understand each operations, see :ref:`here <merging>`
- New method ``sample`` for drawing random samples from Series, DataFrames and Panels. See :ref:`here <whatsnew_0161.enhancements.sample>`
- The default ``Index`` printing has changed to a more uniform format, see :ref:`here <whatsnew_0161.index_repr>`
diff --git a/doc/source/whatsnew/v0.16.2.rst b/doc/source/whatsnew/v0.16.2.rst
index 2cb0cbec68eff..fc43d21e5aed5 100644
--- a/doc/source/whatsnew/v0.16.2.rst
+++ b/doc/source/whatsnew/v0.16.2.rst
@@ -14,7 +14,7 @@ We recommend that all users upgrade to this version.
Highlights include:
- A new ``pipe`` method, see :ref:`here <whatsnew_0162.enhancements.pipe>`
-- Documentation on how to use numba_ with *pandas*, see :ref:`here <enhancingperf.numba>`
+- Documentation on how to use numba_ with pandas, see :ref:`here <enhancingperf.numba>`
.. contents:: What's new in v0.16.2
diff --git a/doc/source/whatsnew/v0.17.0.rst b/doc/source/whatsnew/v0.17.0.rst
index e8f37a72f6417..43bbb57b38303 100644
--- a/doc/source/whatsnew/v0.17.0.rst
+++ b/doc/source/whatsnew/v0.17.0.rst
@@ -50,7 +50,7 @@ Highlights include:
- Development installed versions of pandas will now have ``PEP440`` compliant version strings (:issue:`9518`)
- Development support for benchmarking with the `Air Speed Velocity library <https://github.com/spacetelescope/asv/>`_ (:issue:`8361`)
- Support for reading SAS xport files, see :ref:`here <whatsnew_0170.enhancements.sas_xport>`
-- Documentation comparing SAS to *pandas*, see :ref:`here <compare_with_sas>`
+- Documentation comparing SAS to pandas, see :ref:`here <compare_with_sas>`
- Removal of the automatic TimeSeries broadcasting, deprecated since 0.8.0, see :ref:`here <whatsnew_0170.prior_deprecations>`
- Display format with plain text can optionally align with Unicode East Asian Width, see :ref:`here <whatsnew_0170.east_asian_width>`
- Compatibility with Python 3.5 (:issue:`11097`)
diff --git a/doc/source/whatsnew/v0.17.1.rst b/doc/source/whatsnew/v0.17.1.rst
index 5d15a01aee5a0..f6bddd0b50bf7 100644
--- a/doc/source/whatsnew/v0.17.1.rst
+++ b/doc/source/whatsnew/v0.17.1.rst
@@ -8,7 +8,7 @@ Version 0.17.1 (November 21, 2015)
.. note::
- We are proud to announce that *pandas* has become a sponsored project of the (`NumFOCUS organization`_). This will help ensure the success of development of *pandas* as a world-class open-source project.
+ We are proud to announce that pandas has become a sponsored project of the (`NumFOCUS organization`_). This will help ensure the success of development of *pandas* as a world-class open-source project.
.. _numfocus organization: http://www.numfocus.org/blog/numfocus-announces-new-fiscally-sponsored-project-pandas
diff --git a/doc/source/whatsnew/v0.18.0.rst b/doc/source/whatsnew/v0.18.0.rst
index ef5242b0e33c8..c32aa441f9376 100644
--- a/doc/source/whatsnew/v0.18.0.rst
+++ b/doc/source/whatsnew/v0.18.0.rst
@@ -1274,7 +1274,7 @@ Bug fixes
- Bug in ``.groupby`` where a ``KeyError`` was not raised for a wrong column if there was only one row in the dataframe (:issue:`11741`)
- Bug in ``.read_csv`` with dtype specified on empty data producing an error (:issue:`12048`)
- Bug in ``.read_csv`` where strings like ``'2E'`` are treated as valid floats (:issue:`12237`)
-- Bug in building *pandas* with debugging symbols (:issue:`12123`)
+- Bug in building pandas with debugging symbols (:issue:`12123`)
- Removed ``millisecond`` property of ``DatetimeIndex``. This would always raise a ``ValueError`` (:issue:`12019`).
diff --git a/doc/source/whatsnew/v0.23.0.rst b/doc/source/whatsnew/v0.23.0.rst
index cb811fd83d90d..1b356a314c2a3 100644
--- a/doc/source/whatsnew/v0.23.0.rst
+++ b/doc/source/whatsnew/v0.23.0.rst
@@ -884,7 +884,7 @@ Extraction of matching patterns from strings
By default, extracting matching patterns from strings with :func:`str.extract` used to return a
``Series`` if a single group was being extracted (a ``DataFrame`` if more than one group was
-extracted). As of Pandas 0.23.0 :func:`str.extract` always returns a ``DataFrame``, unless
+extracted). As of pandas 0.23.0 :func:`str.extract` always returns a ``DataFrame``, unless
``expand`` is set to ``False``. Finally, ``None`` was an accepted value for
the ``expand`` parameter (which was equivalent to ``False``), but now raises a ``ValueError``. (:issue:`11386`)
@@ -1175,7 +1175,7 @@ Performance improvements
Documentation changes
~~~~~~~~~~~~~~~~~~~~~
-Thanks to all of the contributors who participated in the Pandas Documentation
+Thanks to all of the contributors who participated in the pandas Documentation
Sprint, which took place on March 10th. We had about 500 participants from over
30 locations across the world. You should notice that many of the
:ref:`API docstrings <api>` have greatly improved.
diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst
index 7b4440148677b..cd253fc45539e 100644
--- a/doc/source/whatsnew/v0.25.0.rst
+++ b/doc/source/whatsnew/v0.25.0.rst
@@ -545,13 +545,13 @@ with :attr:`numpy.nan` in the case of an empty :class:`DataFrame` (:issue:`26397
``__str__`` methods now call ``__repr__`` rather than vice versa
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-Pandas has until now mostly defined string representations in a Pandas objects's
+Pandas has until now mostly defined string representations in a pandas objects's
``__str__``/``__unicode__``/``__bytes__`` methods, and called ``__str__`` from the ``__repr__``
method, if a specific ``__repr__`` method is not found. This is not needed for Python3.
-In Pandas 0.25, the string representations of Pandas objects are now generally
+In Pandas 0.25, the string representations of pandas objects are now generally
defined in ``__repr__``, and calls to ``__str__`` in general now pass the call on to
the ``__repr__``, if a specific ``__str__`` method doesn't exist, as is standard for Python.
-This change is backward compatible for direct usage of Pandas, but if you subclass
+This change is backward compatible for direct usage of pandas, but if you subclass
Pandas objects *and* give your subclasses specific ``__str__``/``__repr__`` methods,
you may have to adjust your ``__str__``/``__repr__`` methods (:issue:`26495`).
@@ -881,7 +881,7 @@ Other API changes
- Bug in :meth:`DatetimeIndex.snap` which didn't preserving the ``name`` of the input :class:`Index` (:issue:`25575`)
- The ``arg`` argument in :meth:`pandas.core.groupby.DataFrameGroupBy.agg` has been renamed to ``func`` (:issue:`26089`)
- The ``arg`` argument in :meth:`pandas.core.window._Window.aggregate` has been renamed to ``func`` (:issue:`26372`)
-- Most Pandas classes had a ``__bytes__`` method, which was used for getting a python2-style bytestring representation of the object. This method has been removed as a part of dropping Python2 (:issue:`26447`)
+- Most pandas classes had a ``__bytes__`` method, which was used for getting a python2-style bytestring representation of the object. This method has been removed as a part of dropping Python2 (:issue:`26447`)
- The ``.str``-accessor has been disabled for 1-level :class:`MultiIndex`, use :meth:`MultiIndex.to_flat_index` if necessary (:issue:`23679`)
- Removed support of gtk package for clipboards (:issue:`26563`)
- Using an unsupported version of Beautiful Soup 4 will now raise an ``ImportError`` instead of a ``ValueError`` (:issue:`27063`)
diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst
index 32175d344c320..ff23f0b30d93d 100755
--- a/doc/source/whatsnew/v1.0.0.rst
+++ b/doc/source/whatsnew/v1.0.0.rst
@@ -18,7 +18,7 @@ including other versions of pandas.
New deprecation policy
~~~~~~~~~~~~~~~~~~~~~~
-Starting with Pandas 1.0.0, pandas will adopt a variant of `SemVer`_ to
+Starting with pandas 1.0.0, pandas will adopt a variant of `SemVer`_ to
version releases. Briefly,
* Deprecations will be introduced in minor releases (e.g. 1.1.0, 1.2.0, 2.1.0, ...)
@@ -763,7 +763,7 @@ Other API changes
- :class:`core.groupby.GroupBy.transform` now raises on invalid operation names (:issue:`27489`)
- :meth:`pandas.api.types.infer_dtype` will now return "integer-na" for integer and ``np.nan`` mix (:issue:`27283`)
- :meth:`MultiIndex.from_arrays` will no longer infer names from arrays if ``names=None`` is explicitly provided (:issue:`27292`)
-- In order to improve tab-completion, Pandas does not include most deprecated attributes when introspecting a pandas object using ``dir`` (e.g. ``dir(df)``).
+- In order to improve tab-completion, pandas does not include most deprecated attributes when introspecting a pandas object using ``dir`` (e.g. ``dir(df)``).
To see which attributes are excluded, see an object's ``_deprecations`` attribute, for example ``pd.DataFrame._deprecations`` (:issue:`28805`).
- The returned dtype of :func:`unique` now matches the input dtype. (:issue:`27874`)
- Changed the default configuration value for ``options.matplotlib.register_converters`` from ``True`` to ``"auto"`` (:issue:`18720`).
|
Standardize references to pandas in the documentation for #32316 | https://api.github.com/repos/pandas-dev/pandas/pulls/36851 | 2020-10-04T05:49:02Z | 2020-10-04T21:05:48Z | null | 2020-10-04T21:05:49Z |
DOC: black enhancingperf.rst and 10min.rst code style | diff --git a/doc/source/user_guide/10min.rst b/doc/source/user_guide/10min.rst
index 673f8689736f1..8270b2ee49bd8 100644
--- a/doc/source/user_guide/10min.rst
+++ b/doc/source/user_guide/10min.rst
@@ -34,9 +34,9 @@ and labeled columns:
.. ipython:: python
- dates = pd.date_range('20130101', periods=6)
+ dates = pd.date_range("20130101", periods=6)
dates
- df = pd.DataFrame(np.random.randn(6, 4), index=dates, columns=list('ABCD'))
+ df = pd.DataFrame(np.random.randn(6, 4), index=dates, columns=list("ABCD"))
df
Creating a :class:`DataFrame` by passing a dict of objects that can be converted to series-like.
@@ -156,7 +156,7 @@ Sorting by values:
.. ipython:: python
- df.sort_values(by='B')
+ df.sort_values(by="B")
Selection
---------
@@ -178,14 +178,14 @@ equivalent to ``df.A``:
.. ipython:: python
- df['A']
+ df["A"]
Selecting via ``[]``, which slices the rows.
.. ipython:: python
df[0:3]
- df['20130102':'20130104']
+ df["20130102":"20130104"]
Selection by label
~~~~~~~~~~~~~~~~~~
@@ -202,31 +202,31 @@ Selecting on a multi-axis by label:
.. ipython:: python
- df.loc[:, ['A', 'B']]
+ df.loc[:, ["A", "B"]]
Showing label slicing, both endpoints are *included*:
.. ipython:: python
- df.loc['20130102':'20130104', ['A', 'B']]
+ df.loc["20130102":"20130104", ["A", "B"]]
Reduction in the dimensions of the returned object:
.. ipython:: python
- df.loc['20130102', ['A', 'B']]
+ df.loc["20130102", ["A", "B"]]
For getting a scalar value:
.. ipython:: python
- df.loc[dates[0], 'A']
+ df.loc[dates[0], "A"]
For getting fast access to a scalar (equivalent to the prior method):
.. ipython:: python
- df.at[dates[0], 'A']
+ df.at[dates[0], "A"]
Selection by position
~~~~~~~~~~~~~~~~~~~~~
@@ -282,7 +282,7 @@ Using a single column's values to select data.
.. ipython:: python
- df[df['A'] > 0]
+ df[df["A"] > 0]
Selecting values from a DataFrame where a boolean condition is met.
@@ -295,9 +295,9 @@ Using the :func:`~Series.isin` method for filtering:
.. ipython:: python
df2 = df.copy()
- df2['E'] = ['one', 'one', 'two', 'three', 'four', 'three']
+ df2["E"] = ["one", "one", "two", "three", "four", "three"]
df2
- df2[df2['E'].isin(['two', 'four'])]
+ df2[df2["E"].isin(["two", "four"])]
Setting
~~~~~~~
@@ -307,15 +307,15 @@ by the indexes.
.. ipython:: python
- s1 = pd.Series([1, 2, 3, 4, 5, 6], index=pd.date_range('20130102', periods=6))
+ s1 = pd.Series([1, 2, 3, 4, 5, 6], index=pd.date_range("20130102", periods=6))
s1
- df['F'] = s1
+ df["F"] = s1
Setting values by label:
.. ipython:: python
- df.at[dates[0], 'A'] = 0
+ df.at[dates[0], "A"] = 0
Setting values by position:
@@ -327,7 +327,7 @@ Setting by assigning with a NumPy array:
.. ipython:: python
- df.loc[:, 'D'] = np.array([5] * len(df))
+ df.loc[:, "D"] = np.array([5] * len(df))
The result of the prior setting operations.
@@ -356,15 +356,15 @@ returns a copy of the data.
.. ipython:: python
- df1 = df.reindex(index=dates[0:4], columns=list(df.columns) + ['E'])
- df1.loc[dates[0]:dates[1], 'E'] = 1
+ df1 = df.reindex(index=dates[0:4], columns=list(df.columns) + ["E"])
+ df1.loc[dates[0] : dates[1], "E"] = 1
df1
To drop any rows that have missing data.
.. ipython:: python
- df1.dropna(how='any')
+ df1.dropna(how="any")
Filling missing data.
@@ -408,7 +408,7 @@ In addition, pandas automatically broadcasts along the specified dimension.
s = pd.Series([1, 3, 5, np.nan, 6, 8], index=dates).shift(2)
s
- df.sub(s, axis='index')
+ df.sub(s, axis="index")
Apply
@@ -444,7 +444,7 @@ some cases always uses them). See more at :ref:`Vectorized String Methods
.. ipython:: python
- s = pd.Series(['A', 'B', 'C', 'Aaba', 'Baca', np.nan, 'CABA', 'dog', 'cat'])
+ s = pd.Series(["A", "B", "C", "Aaba", "Baca", np.nan, "CABA", "dog", "cat"])
s.str.lower()
Merge
@@ -486,21 +486,21 @@ SQL style merges. See the :ref:`Database style joining <merging.join>` section.
.. ipython:: python
- left = pd.DataFrame({'key': ['foo', 'foo'], 'lval': [1, 2]})
- right = pd.DataFrame({'key': ['foo', 'foo'], 'rval': [4, 5]})
+ left = pd.DataFrame({"key": ["foo", "foo"], "lval": [1, 2]})
+ right = pd.DataFrame({"key": ["foo", "foo"], "rval": [4, 5]})
left
right
- pd.merge(left, right, on='key')
+ pd.merge(left, right, on="key")
Another example that can be given is:
.. ipython:: python
- left = pd.DataFrame({'key': ['foo', 'bar'], 'lval': [1, 2]})
- right = pd.DataFrame({'key': ['foo', 'bar'], 'rval': [4, 5]})
+ left = pd.DataFrame({"key": ["foo", "bar"], "lval": [1, 2]})
+ right = pd.DataFrame({"key": ["foo", "bar"], "rval": [4, 5]})
left
right
- pd.merge(left, right, on='key')
+ pd.merge(left, right, on="key")
Grouping
--------
@@ -531,14 +531,14 @@ groups.
.. ipython:: python
- df.groupby('A').sum()
+ df.groupby("A").sum()
Grouping by multiple columns forms a hierarchical index, and again we can
apply the :meth:`~pandas.core.groupby.GroupBy.sum` function.
.. ipython:: python
- df.groupby(['A', 'B']).sum()
+ df.groupby(["A", "B"]).sum()
Reshaping
---------
@@ -559,8 +559,8 @@ Stack
]
)
)
- index = pd.MultiIndex.from_tuples(tuples, names=['first', 'second'])
- df = pd.DataFrame(np.random.randn(8, 2), index=index, columns=['A', 'B'])
+ index = pd.MultiIndex.from_tuples(tuples, names=["first", "second"])
+ df = pd.DataFrame(np.random.randn(8, 2), index=index, columns=["A", "B"])
df2 = df[:4]
df2
@@ -603,7 +603,7 @@ We can produce pivot tables from this data very easily:
.. ipython:: python
- pd.pivot_table(df, values='D', index=['A', 'B'], columns=['C'])
+ pd.pivot_table(df, values="D", index=["A", "B"], columns=["C"])
Time series
@@ -616,31 +616,31 @@ financial applications. See the :ref:`Time Series section <timeseries>`.
.. ipython:: python
- rng = pd.date_range('1/1/2012', periods=100, freq='S')
+ rng = pd.date_range("1/1/2012", periods=100, freq="S")
ts = pd.Series(np.random.randint(0, 500, len(rng)), index=rng)
- ts.resample('5Min').sum()
+ ts.resample("5Min").sum()
Time zone representation:
.. ipython:: python
- rng = pd.date_range('3/6/2012 00:00', periods=5, freq='D')
+ rng = pd.date_range("3/6/2012 00:00", periods=5, freq="D")
ts = pd.Series(np.random.randn(len(rng)), rng)
ts
- ts_utc = ts.tz_localize('UTC')
+ ts_utc = ts.tz_localize("UTC")
ts_utc
Converting to another time zone:
.. ipython:: python
- ts_utc.tz_convert('US/Eastern')
+ ts_utc.tz_convert("US/Eastern")
Converting between time span representations:
.. ipython:: python
- rng = pd.date_range('1/1/2012', periods=5, freq='M')
+ rng = pd.date_range("1/1/2012", periods=5, freq="M")
ts = pd.Series(np.random.randn(len(rng)), index=rng)
ts
ps = ts.to_period()
@@ -654,9 +654,9 @@ the quarter end:
.. ipython:: python
- prng = pd.period_range('1990Q1', '2000Q4', freq='Q-NOV')
+ prng = pd.period_range("1990Q1", "2000Q4", freq="Q-NOV")
ts = pd.Series(np.random.randn(len(prng)), prng)
- ts.index = (prng.asfreq('M', 'e') + 1).asfreq('H', 's') + 9
+ ts.index = (prng.asfreq("M", "e") + 1).asfreq("H", "s") + 9
ts.head()
Categoricals
@@ -754,19 +754,20 @@ CSV
.. ipython:: python
- df.to_csv('foo.csv')
+ df.to_csv("foo.csv")
:ref:`Reading from a csv file. <io.read_csv_table>`
.. ipython:: python
- pd.read_csv('foo.csv')
+ pd.read_csv("foo.csv")
.. ipython:: python
:suppress:
import os
- os.remove('foo.csv')
+
+ os.remove("foo.csv")
HDF5
~~~~
@@ -777,18 +778,18 @@ Writing to a HDF5 Store.
.. ipython:: python
- df.to_hdf('foo.h5', 'df')
+ df.to_hdf("foo.h5", "df")
Reading from a HDF5 Store.
.. ipython:: python
- pd.read_hdf('foo.h5', 'df')
+ pd.read_hdf("foo.h5", "df")
.. ipython:: python
:suppress:
- os.remove('foo.h5')
+ os.remove("foo.h5")
Excel
~~~~~
@@ -799,18 +800,18 @@ Writing to an excel file.
.. ipython:: python
- df.to_excel('foo.xlsx', sheet_name='Sheet1')
+ df.to_excel("foo.xlsx", sheet_name="Sheet1")
Reading from an excel file.
.. ipython:: python
- pd.read_excel('foo.xlsx', 'Sheet1', index_col=None, na_values=['NA'])
+ pd.read_excel("foo.xlsx", "Sheet1", index_col=None, na_values=["NA"])
.. ipython:: python
:suppress:
- os.remove('foo.xlsx')
+ os.remove("foo.xlsx")
Gotchas
-------
diff --git a/doc/source/user_guide/enhancingperf.rst b/doc/source/user_guide/enhancingperf.rst
index ce9db0a5279c3..d30554986607d 100644
--- a/doc/source/user_guide/enhancingperf.rst
+++ b/doc/source/user_guide/enhancingperf.rst
@@ -48,10 +48,14 @@ We have a ``DataFrame`` to which we want to apply a function row-wise.
.. ipython:: python
- df = pd.DataFrame({'a': np.random.randn(1000),
- 'b': np.random.randn(1000),
- 'N': np.random.randint(100, 1000, (1000)),
- 'x': 'x'})
+ df = pd.DataFrame(
+ {
+ "a": np.random.randn(1000),
+ "b": np.random.randn(1000),
+ "N": np.random.randint(100, 1000, (1000)),
+ "x": "x",
+ }
+ )
df
Here's the function in pure Python:
@@ -61,6 +65,7 @@ Here's the function in pure Python:
def f(x):
return x * (x - 1)
+
def integrate_f(a, b, N):
s = 0
dx = (b - a) / N
@@ -72,7 +77,7 @@ We achieve our result by using ``apply`` (row-wise):
.. code-block:: ipython
- In [7]: %timeit df.apply(lambda x: integrate_f(x['a'], x['b'], x['N']), axis=1)
+ In [7]: %timeit df.apply(lambda x: integrate_f(x["a"], x["b"], x["N"]), axis=1)
10 loops, best of 3: 174 ms per loop
But clearly this isn't fast enough for us. Let's take a look and see where the
@@ -81,7 +86,7 @@ four calls) using the `prun ipython magic function <https://ipython.readthedocs.
.. ipython:: python
- %prun -l 4 df.apply(lambda x: integrate_f(x['a'], x['b'], x['N']), axis=1) # noqa E999
+ %prun -l 4 df.apply(lambda x: integrate_f(x["a"], x["b"], x["N"]), axis=1) # noqa E999
By far the majority of time is spend inside either ``integrate_f`` or ``f``,
hence we'll concentrate our efforts cythonizing these two functions.
@@ -123,7 +128,7 @@ is here to distinguish between function versions):
.. code-block:: ipython
- In [4]: %timeit df.apply(lambda x: integrate_f_plain(x['a'], x['b'], x['N']), axis=1)
+ In [4]: %timeit df.apply(lambda x: integrate_f_plain(x["a"], x["b"], x["N"]), axis=1)
10 loops, best of 3: 85.5 ms per loop
Already this has shaved a third off, not too bad for a simple copy and paste.
@@ -152,7 +157,7 @@ We get another huge improvement simply by providing type information:
.. code-block:: ipython
- In [4]: %timeit df.apply(lambda x: integrate_f_typed(x['a'], x['b'], x['N']), axis=1)
+ In [4]: %timeit df.apply(lambda x: integrate_f_typed(x["a"], x["b"], x["N"]), axis=1)
10 loops, best of 3: 20.3 ms per loop
Now, we're talking! It's now over ten times faster than the original python
@@ -161,7 +166,7 @@ look at what's eating up time:
.. ipython:: python
- %prun -l 4 df.apply(lambda x: integrate_f_typed(x['a'], x['b'], x['N']), axis=1)
+ %prun -l 4 df.apply(lambda x: integrate_f_typed(x["a"], x["b"], x["N"]), axis=1)
.. _enhancingperf.ndarray:
@@ -220,15 +225,13 @@ the rows, applying our ``integrate_f_typed``, and putting this in the zeros arra
.. code-block:: python
- apply_integrate_f(df['a'], df['b'], df['N'])
+ apply_integrate_f(df["a"], df["b"], df["N"])
But rather, use :meth:`Series.to_numpy` to get the underlying ``ndarray``:
.. code-block:: python
- apply_integrate_f(df['a'].to_numpy(),
- df['b'].to_numpy(),
- df['N'].to_numpy())
+ apply_integrate_f(df["a"].to_numpy(), df["b"].to_numpy(), df["N"].to_numpy())
.. note::
@@ -237,18 +240,14 @@ the rows, applying our ``integrate_f_typed``, and putting this in the zeros arra
.. code-block:: ipython
- In [4]: %timeit apply_integrate_f(df['a'].to_numpy(),
- df['b'].to_numpy(),
- df['N'].to_numpy())
+ In [4]: %timeit apply_integrate_f(df["a"].to_numpy(), df["b"].to_numpy(), df["N"].to_numpy())
1000 loops, best of 3: 1.25 ms per loop
We've gotten another big improvement. Let's check again where the time is spent:
.. ipython:: python
- %%prun -l 4 apply_integrate_f(df['a'].to_numpy(),
- df['b'].to_numpy(),
- df['N'].to_numpy())
+ %%prun -l 4 apply_integrate_f(df["a"].to_numpy(), df["b"].to_numpy(), df["N"].to_numpy())
As one might expect, the majority of the time is now spent in ``apply_integrate_f``,
so if we wanted to make anymore efficiencies we must continue to concentrate our
@@ -293,9 +292,7 @@ advanced Cython techniques:
.. code-block:: ipython
- In [4]: %timeit apply_integrate_f_wrap(df['a'].to_numpy(),
- df['b'].to_numpy(),
- df['N'].to_numpy())
+ In [4]: %timeit apply_integrate_f_wrap(df["a"].to_numpy(), df["b"].to_numpy(), df["N"].to_numpy())
1000 loops, best of 3: 987 us per loop
Even faster, with the caveat that a bug in our Cython code (an off-by-one error,
@@ -350,7 +347,7 @@ take the plain Python code from above and annotate with the ``@jit`` decorator.
@numba.jit
def apply_integrate_f_numba(col_a, col_b, col_N):
n = len(col_N)
- result = np.empty(n, dtype='float64')
+ result = np.empty(n, dtype="float64")
assert len(col_a) == len(col_b) == n
for i in range(n):
result[i] = integrate_f_numba(col_a[i], col_b[i], col_N[i])
@@ -358,10 +355,10 @@ take the plain Python code from above and annotate with the ``@jit`` decorator.
def compute_numba(df):
- result = apply_integrate_f_numba(df['a'].to_numpy(),
- df['b'].to_numpy(),
- df['N'].to_numpy())
- return pd.Series(result, index=df.index, name='result')
+ result = apply_integrate_f_numba(
+ df["a"].to_numpy(), df["b"].to_numpy(), df["N"].to_numpy()
+ )
+ return pd.Series(result, index=df.index, name="result")
Note that we directly pass NumPy arrays to the Numba function. ``compute_numba`` is just a wrapper that provides a
nicer interface by passing/returning pandas objects.
@@ -403,15 +400,15 @@ Consider the following toy example of doubling each observation:
.. code-block:: ipython
# Custom function without numba
- In [5]: %timeit df['col1_doubled'] = df['a'].apply(double_every_value_nonumba) # noqa E501
+ In [5]: %timeit df["col1_doubled"] = df["a"].apply(double_every_value_nonumba) # noqa E501
1000 loops, best of 3: 797 us per loop
# Standard implementation (faster than a custom function)
- In [6]: %timeit df['col1_doubled'] = df['a'] * 2
+ In [6]: %timeit df["col1_doubled"] = df["a"] * 2
1000 loops, best of 3: 233 us per loop
# Custom function with numba
- In [7]: %timeit df['col1_doubled'] = double_every_value_withnumba(df['a'].to_numpy())
+ In [7]: %timeit df["col1_doubled"] = double_every_value_withnumba(df["a"].to_numpy())
1000 loops, best of 3: 145 us per loop
Caveats
@@ -487,7 +484,7 @@ These operations are supported by :func:`pandas.eval`:
* ``list`` and ``tuple`` literals, e.g., ``[1, 2]`` or ``(1, 2)``
* Attribute access, e.g., ``df.a``
* Subscript expressions, e.g., ``df[0]``
-* Simple variable evaluation, e.g., ``pd.eval('df')`` (this is not very useful)
+* Simple variable evaluation, e.g., ``pd.eval("df")`` (this is not very useful)
* Math functions: ``sin``, ``cos``, ``exp``, ``log``, ``expm1``, ``log1p``,
``sqrt``, ``sinh``, ``cosh``, ``tanh``, ``arcsin``, ``arccos``, ``arctan``, ``arccosh``,
``arcsinh``, ``arctanh``, ``abs``, ``arctan2`` and ``log10``.
@@ -537,7 +534,7 @@ Now let's compare adding them together using plain ol' Python versus
.. ipython:: python
- %timeit pd.eval('df1 + df2 + df3 + df4')
+ %timeit pd.eval("df1 + df2 + df3 + df4")
Now let's do the same thing but with comparisons:
@@ -548,7 +545,7 @@ Now let's do the same thing but with comparisons:
.. ipython:: python
- %timeit pd.eval('(df1 > 0) & (df2 > 0) & (df3 > 0) & (df4 > 0)')
+ %timeit pd.eval("(df1 > 0) & (df2 > 0) & (df3 > 0) & (df4 > 0)")
:func:`~pandas.eval` also works with unaligned pandas objects:
@@ -560,7 +557,7 @@ Now let's do the same thing but with comparisons:
.. ipython:: python
- %timeit pd.eval('df1 + df2 + df3 + df4 + s')
+ %timeit pd.eval("df1 + df2 + df3 + df4 + s")
.. note::
@@ -587,19 +584,19 @@ evaluate an expression in the "context" of a :class:`~pandas.DataFrame`.
:suppress:
try:
- del a
+ del a
except NameError:
- pass
+ pass
try:
- del b
+ del b
except NameError:
- pass
+ pass
.. ipython:: python
- df = pd.DataFrame(np.random.randn(5, 2), columns=['a', 'b'])
- df.eval('a + b')
+ df = pd.DataFrame(np.random.randn(5, 2), columns=["a", "b"])
+ df.eval("a + b")
Any expression that is a valid :func:`pandas.eval` expression is also a valid
:meth:`DataFrame.eval` expression, with the added benefit that you don't have to
@@ -617,9 +614,9 @@ on the original ``DataFrame`` or return a copy with the new column.
.. ipython:: python
df = pd.DataFrame(dict(a=range(5), b=range(5, 10)))
- df.eval('c = a + b', inplace=True)
- df.eval('d = a + b + c', inplace=True)
- df.eval('a = 1', inplace=True)
+ df.eval("c = a + b", inplace=True)
+ df.eval("d = a + b + c", inplace=True)
+ df.eval("a = 1", inplace=True)
df
When ``inplace`` is set to ``False``, the default, a copy of the ``DataFrame`` with the
@@ -628,7 +625,7 @@ new or modified columns is returned and the original frame is unchanged.
.. ipython:: python
df
- df.eval('e = a - c', inplace=False)
+ df.eval("e = a - c", inplace=False)
df
As a convenience, multiple assignments can be performed by using a
@@ -636,19 +633,22 @@ multi-line string.
.. ipython:: python
- df.eval("""
+ df.eval(
+ """
c = a + b
d = a + b + c
- a = 1""", inplace=False)
+ a = 1""",
+ inplace=False,
+ )
The equivalent in standard Python would be
.. ipython:: python
df = pd.DataFrame(dict(a=range(5), b=range(5, 10)))
- df['c'] = df['a'] + df['b']
- df['d'] = df['a'] + df['b'] + df['c']
- df['a'] = 1
+ df["c"] = df["a"] + df["b"]
+ df["d"] = df["a"] + df["b"] + df["c"]
+ df["a"] = 1
df
The ``query`` method has a ``inplace`` keyword which determines
@@ -657,8 +657,8 @@ whether the query modifies the original frame.
.. ipython:: python
df = pd.DataFrame(dict(a=range(5), b=range(5, 10)))
- df.query('a > 2')
- df.query('a > 2', inplace=True)
+ df.query("a > 2")
+ df.query("a > 2", inplace=True)
df
Local variables
@@ -669,10 +669,10 @@ expression by placing the ``@`` character in front of the name. For example,
.. ipython:: python
- df = pd.DataFrame(np.random.randn(5, 2), columns=list('ab'))
+ df = pd.DataFrame(np.random.randn(5, 2), columns=list("ab"))
newcol = np.random.randn(len(df))
- df.eval('b + @newcol')
- df.query('b < @newcol')
+ df.eval("b + @newcol")
+ df.query("b < @newcol")
If you don't prefix the local variable with ``@``, pandas will raise an
exception telling you the variable is undefined.
@@ -685,8 +685,8 @@ name in an expression.
.. ipython:: python
a = np.random.randn()
- df.query('@a < a')
- df.loc[a < df['a']] # same as the previous expression
+ df.query("@a < a")
+ df.loc[a < df["a"]] # same as the previous expression
With :func:`pandas.eval` you cannot use the ``@`` prefix *at all*, because it
isn't defined in that context. ``pandas`` will let you know this if you try to
@@ -696,14 +696,14 @@ use ``@`` in a top-level call to :func:`pandas.eval`. For example,
:okexcept:
a, b = 1, 2
- pd.eval('@a + b')
+ pd.eval("@a + b")
In this case, you should simply refer to the variables like you would in
standard Python.
.. ipython:: python
- pd.eval('a + b')
+ pd.eval("a + b")
:func:`pandas.eval` parsers
@@ -723,10 +723,10 @@ semantics.
.. ipython:: python
- expr = '(df1 > 0) & (df2 > 0) & (df3 > 0) & (df4 > 0)'
- x = pd.eval(expr, parser='python')
- expr_no_parens = 'df1 > 0 & df2 > 0 & df3 > 0 & df4 > 0'
- y = pd.eval(expr_no_parens, parser='pandas')
+ expr = "(df1 > 0) & (df2 > 0) & (df3 > 0) & (df4 > 0)"
+ x = pd.eval(expr, parser="python")
+ expr_no_parens = "df1 > 0 & df2 > 0 & df3 > 0 & df4 > 0"
+ y = pd.eval(expr_no_parens, parser="pandas")
np.all(x == y)
@@ -735,10 +735,10 @@ well:
.. ipython:: python
- expr = '(df1 > 0) & (df2 > 0) & (df3 > 0) & (df4 > 0)'
- x = pd.eval(expr, parser='python')
- expr_with_ands = 'df1 > 0 and df2 > 0 and df3 > 0 and df4 > 0'
- y = pd.eval(expr_with_ands, parser='pandas')
+ expr = "(df1 > 0) & (df2 > 0) & (df3 > 0) & (df4 > 0)"
+ x = pd.eval(expr, parser="python")
+ expr_with_ands = "df1 > 0 and df2 > 0 and df3 > 0 and df4 > 0"
+ y = pd.eval(expr_with_ands, parser="pandas")
np.all(x == y)
@@ -768,7 +768,7 @@ is a bit slower (not by much) than evaluating the same expression in Python
.. ipython:: python
- %timeit pd.eval('df1 + df2 + df3 + df4', engine='python')
+ %timeit pd.eval("df1 + df2 + df3 + df4", engine="python")
:func:`pandas.eval` performance
@@ -812,10 +812,11 @@ you have an expression--for example
.. ipython:: python
- df = pd.DataFrame({'strings': np.repeat(list('cba'), 3),
- 'nums': np.repeat(range(3), 3)})
+ df = pd.DataFrame(
+ {"strings": np.repeat(list("cba"), 3), "nums": np.repeat(range(3), 3)}
+ )
df
- df.query('strings == "a" and nums == 1')
+ df.query("strings == 'a' and nums == 1")
the numeric part of the comparison (``nums == 1``) will be evaluated by
``numexpr``.
diff --git a/setup.cfg b/setup.cfg
index 73986f692b6cd..8702e903d825b 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -39,7 +39,8 @@ bootstrap =
import pandas as pd
np # avoiding error when importing again numpy or pandas
pd # (in some cases we want to do it to show users)
-ignore = E402, # module level import not at top of file
+ignore = E203, # space before : (needed for how black formats slicing)
+ E402, # module level import not at top of file
W503, # line break before binary operator
# Classes/functions in different blocks can generate those errors
E302, # expected 2 blank lines, found 0
| ref #36777 | https://api.github.com/repos/pandas-dev/pandas/pulls/36849 | 2020-10-04T04:38:34Z | 2020-10-04T17:39:24Z | 2020-10-04T17:39:24Z | 2020-10-05T05:57:39Z |
REF: IndexOpsMixin wrapping | diff --git a/pandas/core/base.py b/pandas/core/base.py
index 24bbd28ddc421..1063e742e38c8 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -4,7 +4,7 @@
import builtins
import textwrap
-from typing import Any, Callable, Dict, FrozenSet, List, Optional, Union, cast
+from typing import Any, Callable, Dict, FrozenSet, List, Optional, TypeVar, Union, cast
import numpy as np
@@ -43,6 +43,8 @@
duplicated="IndexOpsMixin",
)
+_T = TypeVar("_T", bound="IndexOpsMixin")
+
class PandasObject(DirNamesMixin):
"""
@@ -604,7 +606,7 @@ def _values(self) -> Union[ExtensionArray, np.ndarray]:
# must be defined here as a property for mypy
raise AbstractMethodError(self)
- def transpose(self, *args, **kwargs):
+ def transpose(self: _T, *args, **kwargs) -> _T:
"""
Return the transpose, which is by definition self.
@@ -851,10 +853,10 @@ def to_numpy(self, dtype=None, copy=False, na_value=lib.no_default, **kwargs):
return result
@property
- def empty(self):
+ def empty(self) -> bool:
return not self.size
- def max(self, axis=None, skipna=True, *args, **kwargs):
+ def max(self, axis=None, skipna: bool = True, *args, **kwargs):
"""
Return the maximum value of the Index.
@@ -899,7 +901,7 @@ def max(self, axis=None, skipna=True, *args, **kwargs):
return nanops.nanmax(self._values, skipna=skipna)
@doc(op="max", oppose="min", value="largest")
- def argmax(self, axis=None, skipna=True, *args, **kwargs):
+ def argmax(self, axis=None, skipna: bool = True, *args, **kwargs) -> int:
"""
Return int position of the {value} value in the Series.
@@ -954,7 +956,7 @@ def argmax(self, axis=None, skipna=True, *args, **kwargs):
nv.validate_argmax_with_skipna(skipna, args, kwargs)
return nanops.nanargmax(self._values, skipna=skipna)
- def min(self, axis=None, skipna=True, *args, **kwargs):
+ def min(self, axis=None, skipna: bool = True, *args, **kwargs):
"""
Return the minimum value of the Index.
@@ -999,7 +1001,7 @@ def min(self, axis=None, skipna=True, *args, **kwargs):
return nanops.nanmin(self._values, skipna=skipna)
@doc(argmax, op="min", oppose="max", value="smallest")
- def argmin(self, axis=None, skipna=True, *args, **kwargs):
+ def argmin(self, axis=None, skipna=True, *args, **kwargs) -> int:
nv.validate_minmax_axis(axis)
nv.validate_argmax_with_skipna(skipna, args, kwargs)
return nanops.nanargmin(self._values, skipna=skipna)
@@ -1054,6 +1056,9 @@ def hasnans(self):
"""
return bool(isna(self).any())
+ def isna(self):
+ return isna(self._values)
+
def _reduce(
self,
op,
@@ -1161,7 +1166,12 @@ def map_f(values, f):
return new_values
def value_counts(
- self, normalize=False, sort=True, ascending=False, bins=None, dropna=True
+ self,
+ normalize: bool = False,
+ sort: bool = True,
+ ascending: bool = False,
+ bins=None,
+ dropna: bool = True,
):
"""
Return a Series containing counts of unique values.
@@ -1500,20 +1510,9 @@ def searchsorted(self, value, side="left", sorter=None) -> np.ndarray:
return algorithms.searchsorted(self._values, value, side=side, sorter=sorter)
def drop_duplicates(self, keep="first"):
- if isinstance(self, ABCIndexClass):
- if self.is_unique:
- return self._shallow_copy()
-
duplicated = self.duplicated(keep=keep)
result = self[np.logical_not(duplicated)]
return result
def duplicated(self, keep="first"):
- if isinstance(self, ABCIndexClass):
- if self.is_unique:
- return np.zeros(len(self), dtype=bool)
- return duplicated(self, keep=keep)
- else:
- return self._constructor(
- duplicated(self, keep=keep), index=self.index
- ).__finalize__(self, method="duplicated")
+ return duplicated(self._values, keep=keep)
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 99d9568926df4..deec01cf88a7b 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -2366,6 +2366,9 @@ def drop_duplicates(self, keep="first"):
>>> idx.drop_duplicates(keep=False)
Index(['cow', 'beetle', 'hippo'], dtype='object')
"""
+ if self.is_unique:
+ return self._shallow_copy()
+
return super().drop_duplicates(keep=keep)
def duplicated(self, keep="first"):
@@ -2422,6 +2425,9 @@ def duplicated(self, keep="first"):
>>> idx.duplicated(keep=False)
array([ True, False, True, False, True])
"""
+ if self.is_unique:
+ # fastpath available bc we are immutable
+ return np.zeros(len(self), dtype=bool)
return super().duplicated(keep=keep)
def _get_unique_index(self, dropna: bool = False):
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 5cc163807fac6..3ad4efa4a43c1 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -2053,7 +2053,9 @@ def duplicated(self, keep="first") -> "Series":
4 True
dtype: bool
"""
- return super().duplicated(keep=keep)
+ res = base.IndexOpsMixin.duplicated(self, keep=keep)
+ result = self._constructor(res, index=self.index)
+ return result.__finalize__(self, method="duplicated")
def idxmin(self, axis=0, skipna=True, *args, **kwargs):
"""
@@ -4776,7 +4778,7 @@ def _convert_dtypes(
@doc(NDFrame.isna, klass=_shared_doc_kwargs["klass"])
def isna(self) -> "Series":
- return super().isna()
+ return generic.NDFrame.isna(self)
@doc(NDFrame.isna, klass=_shared_doc_kwargs["klass"])
def isnull(self) -> "Series":
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/36848 | 2020-10-04T02:54:10Z | 2020-10-08T20:46:38Z | 2020-10-08T20:46:38Z | 2020-10-08T21:19:55Z |
CI: pin pymysql #36465 | diff --git a/ci/deps/travis-37-cov.yaml b/ci/deps/travis-37-cov.yaml
index 7d5104a58ce83..c89b42ef06a2e 100644
--- a/ci/deps/travis-37-cov.yaml
+++ b/ci/deps/travis-37-cov.yaml
@@ -32,7 +32,7 @@ dependencies:
- google-cloud-bigquery>=1.27.2 # GH 36436
- psycopg2
- pyarrow>=0.15.0
- - pymysql=0.7.11
+ - pymysql<0.10.0 # temporary pin, GH 36465
- pytables
- python-snappy
- python-dateutil
@@ -40,7 +40,7 @@ dependencies:
- s3fs>=0.4.0
- scikit-learn
- scipy
- - sqlalchemy=1.3.0
+ - sqlalchemy
- statsmodels
- xarray
- xlrd
| Part of #36465
| https://api.github.com/repos/pandas-dev/pandas/pulls/36847 | 2020-10-04T02:51:52Z | 2020-10-10T16:24:07Z | 2020-10-10T16:24:06Z | 2020-10-10T21:32:25Z |
Update documentation | diff --git a/README.md b/README.md
index a2f2f1c04442a..4ab684f36c054 100644
--- a/README.md
+++ b/README.md
@@ -20,7 +20,7 @@
## What is it?
-**pandas** is a Python package that provides fast, flexible, and expressive data
+**Pandas** is a Python package that provides fast, flexible, and expressive data
structures designed to make working with "relational" or "labeled" data both
easy and intuitive. It aims to be the fundamental high-level building block for
doing practical, **real world** data analysis in Python. Additionally, it has
| Capitalization error
| https://api.github.com/repos/pandas-dev/pandas/pulls/36846 | 2020-10-04T02:40:08Z | 2020-10-04T03:18:12Z | null | 2020-10-04T03:18:19Z |
DOC: normalize usage of word "pandas" | diff --git a/doc/source/development/code_style.rst b/doc/source/development/code_style.rst
index 387f65ea583a0..5aa1c1099d6e0 100644
--- a/doc/source/development/code_style.rst
+++ b/doc/source/development/code_style.rst
@@ -9,7 +9,7 @@ pandas code style guide
.. contents:: Table of contents:
:local:
-*pandas* follows the `PEP8 <https://www.python.org/dev/peps/pep-0008/>`_
+pandas follows the `PEP8 <https://www.python.org/dev/peps/pep-0008/>`_
standard and uses `Black <https://black.readthedocs.io/en/stable/>`_
and `Flake8 <https://flake8.pycqa.org/en/latest/>`_ to ensure a
consistent code format throughout the project. For details see the
diff --git a/doc/source/development/contributing.rst b/doc/source/development/contributing.rst
index d6955c5d4b8d2..17eba825d1c29 100644
--- a/doc/source/development/contributing.rst
+++ b/doc/source/development/contributing.rst
@@ -155,7 +155,7 @@ Using a Docker container
Instead of manually setting up a development environment, you can use `Docker
<https://docs.docker.com/get-docker/>`_ to automatically create the environment with just several
-commands. Pandas provides a ``DockerFile`` in the root directory to build a Docker image
+commands. pandas provides a ``DockerFile`` in the root directory to build a Docker image
with a full pandas development environment.
**Docker Commands**
@@ -190,7 +190,7 @@ Note that you might need to rebuild the C extensions if/when you merge with upst
Installing a C compiler
~~~~~~~~~~~~~~~~~~~~~~~
-Pandas uses C extensions (mostly written using Cython) to speed up certain
+pandas uses C extensions (mostly written using Cython) to speed up certain
operations. To install pandas from source, you need to compile these C
extensions, which means you need a C compiler. This process depends on which
platform you're using.
@@ -1219,7 +1219,7 @@ This test shows off several useful features of Hypothesis, as well as
demonstrating a good use-case: checking properties that should hold over
a large or complicated domain of inputs.
-To keep the Pandas test suite running quickly, parametrized tests are
+To keep the pandas test suite running quickly, parametrized tests are
preferred if the inputs or logic are simple, with Hypothesis tests reserved
for cases with complex logic or where there are too many combinations of
options or subtle interactions to test (or think of!) all of them.
diff --git a/doc/source/development/maintaining.rst b/doc/source/development/maintaining.rst
index cd084ab263477..2a21704c27005 100644
--- a/doc/source/development/maintaining.rst
+++ b/doc/source/development/maintaining.rst
@@ -207,7 +207,7 @@ Only core team members can merge pull requests. We have a few guidelines.
1. You should typically not self-merge your own pull requests. Exceptions include
things like small changes to fix CI (e.g. pinning a package version).
2. You should not merge pull requests that have an active discussion, or pull
- requests that has any ``-1`` votes from a core maintainer. Pandas operates
+ requests that has any ``-1`` votes from a core maintainer. pandas operates
by consensus.
3. For larger changes, it's good to have a +1 from at least two core team members.
diff --git a/doc/source/ecosystem.rst b/doc/source/ecosystem.rst
index ed6ce7e9759b6..cef9dae74dfd2 100644
--- a/doc/source/ecosystem.rst
+++ b/doc/source/ecosystem.rst
@@ -98,7 +98,7 @@ With Altair, you can spend more time understanding your data and its
meaning. Altair's API is simple, friendly and consistent and built on
top of the powerful Vega-Lite JSON specification. This elegant
simplicity produces beautiful and effective visualizations with a
-minimal amount of code. Altair works with Pandas DataFrames.
+minimal amount of code. Altair works with pandas DataFrames.
`Bokeh <https://bokeh.pydata.org>`__
@@ -110,7 +110,7 @@ graphics in the style of Protovis/D3, while delivering high-performance interact
large data to thin clients.
`Pandas-Bokeh <https://github.com/PatrikHlobil/Pandas-Bokeh>`__ provides a high level API
-for Bokeh that can be loaded as a native Pandas plotting backend via
+for Bokeh that can be loaded as a native pandas plotting backend via
.. code:: python
@@ -185,7 +185,7 @@ IDE
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
IPython is an interactive command shell and distributed computing
-environment. IPython tab completion works with Pandas methods and also
+environment. IPython tab completion works with pandas methods and also
attributes like DataFrame columns.
`Jupyter Notebook / Jupyter Lab <https://jupyter.org>`__
@@ -199,7 +199,7 @@ Jupyter notebooks can be converted to a number of open standard output formats
Python) through 'Download As' in the web interface and ``jupyter convert``
in a shell.
-Pandas DataFrames implement ``_repr_html_``and ``_repr_latex`` methods
+pandas DataFrames implement ``_repr_html_``and ``_repr_latex`` methods
which are utilized by Jupyter Notebook for displaying
(abbreviated) HTML or LaTeX tables. LaTeX output is properly escaped.
(Note: HTML tables may or may not be
@@ -227,7 +227,7 @@ Its `Variable Explorer <https://docs.spyder-ide.org/variableexplorer.html>`__
allows users to view, manipulate and edit pandas ``Index``, ``Series``,
and ``DataFrame`` objects like a "spreadsheet", including copying and modifying
values, sorting, displaying a "heatmap", converting data types and more.
-Pandas objects can also be renamed, duplicated, new columns added,
+pandas objects can also be renamed, duplicated, new columns added,
copyed/pasted to/from the clipboard (as TSV), and saved/loaded to/from a file.
Spyder can also import data from a variety of plain text and binary files
or the clipboard into a new pandas DataFrame via a sophisticated import wizard.
@@ -274,13 +274,13 @@ The following data feeds are available:
`Quandl/Python <https://github.com/quandl/Python>`__
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Quandl API for Python wraps the Quandl REST API to return
-Pandas DataFrames with timeseries indexes.
+pandas DataFrames with timeseries indexes.
`Pydatastream <https://github.com/vfilimonov/pydatastream>`__
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
PyDatastream is a Python interface to the
`Refinitiv Datastream (DWS) <https://www.refinitiv.com/en/products/datastream-macroeconomic-analysis>`__
-REST API to return indexed Pandas DataFrames with financial data.
+REST API to return indexed pandas DataFrames with financial data.
This package requires valid credentials for this API (non free).
`pandaSDMX <https://pandasdmx.readthedocs.io>`__
@@ -355,7 +355,7 @@ Out-of-core
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Blaze provides a standard API for doing computations with various
-in-memory and on-disk backends: NumPy, Pandas, SQLAlchemy, MongoDB, PyTables,
+in-memory and on-disk backends: NumPy, pandas, SQLAlchemy, MongoDB, PyTables,
PySpark.
`Dask <https://dask.readthedocs.io/en/latest/>`__
@@ -401,7 +401,7 @@ If also displays progress bars.
`Ray <https://ray.readthedocs.io/en/latest/pandas_on_ray.html>`__
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Pandas on Ray is an early stage DataFrame library that wraps Pandas and transparently distributes the data and computation. The user does not need to know how many cores their system has, nor do they need to specify how to distribute the data. In fact, users can continue using their previous Pandas notebooks while experiencing a considerable speedup from Pandas on Ray, even on a single machine. Only a modification of the import statement is needed, as we demonstrate below. Once you’ve changed your import statement, you’re ready to use Pandas on Ray just like you would Pandas.
+pandas on Ray is an early stage DataFrame library that wraps pandas and transparently distributes the data and computation. The user does not need to know how many cores their system has, nor do they need to specify how to distribute the data. In fact, users can continue using their previous pandas notebooks while experiencing a considerable speedup from pandas on Ray, even on a single machine. Only a modification of the import statement is needed, as we demonstrate below. Once you’ve changed your import statement, you’re ready to use pandas on Ray just like you would pandas.
.. code:: python
@@ -412,7 +412,7 @@ Pandas on Ray is an early stage DataFrame library that wraps Pandas and transpar
`Vaex <https://docs.vaex.io/>`__
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Increasingly, packages are being built on top of pandas to address specific needs in data preparation, analysis and visualization. Vaex is a python library for Out-of-Core DataFrames (similar to Pandas), to visualize and explore big tabular datasets. It can calculate statistics such as mean, sum, count, standard deviation etc, on an N-dimensional grid up to a billion (10\ :sup:`9`) objects/rows per second. Visualization is done using histograms, density plots and 3d volume rendering, allowing interactive exploration of big data. Vaex uses memory mapping, zero memory copy policy and lazy computations for best performance (no memory wasted).
+Increasingly, packages are being built on top of pandas to address specific needs in data preparation, analysis and visualization. Vaex is a python library for Out-of-Core DataFrames (similar to pandas), to visualize and explore big tabular datasets. It can calculate statistics such as mean, sum, count, standard deviation etc, on an N-dimensional grid up to a billion (10\ :sup:`9`) objects/rows per second. Visualization is done using histograms, density plots and 3d volume rendering, allowing interactive exploration of big data. Vaex uses memory mapping, zero memory copy policy and lazy computations for best performance (no memory wasted).
* vaex.from_pandas
* vaex.to_pandas_df
@@ -422,7 +422,7 @@ Increasingly, packages are being built on top of pandas to address specific need
Extension data types
--------------------
-Pandas provides an interface for defining
+pandas provides an interface for defining
:ref:`extension types <extending.extension-types>` to extend NumPy's type
system. The following libraries implement that interface to provide types not
found in NumPy or pandas, which work well with pandas' data containers.
diff --git a/doc/source/getting_started/comparison/comparison_with_r.rst b/doc/source/getting_started/comparison/comparison_with_r.rst
index 358bb6ad951f0..864081002086b 100644
--- a/doc/source/getting_started/comparison/comparison_with_r.rst
+++ b/doc/source/getting_started/comparison/comparison_with_r.rst
@@ -5,11 +5,11 @@
Comparison with R / R libraries
*******************************
-Since ``pandas`` aims to provide a lot of the data manipulation and analysis
+Since pandas aims to provide a lot of the data manipulation and analysis
functionality that people use `R <https://www.r-project.org/>`__ for, this page
was started to provide a more detailed look at the `R language
<https://en.wikipedia.org/wiki/R_(programming_language)>`__ and its many third
-party libraries as they relate to ``pandas``. In comparisons with R and CRAN
+party libraries as they relate to pandas. In comparisons with R and CRAN
libraries, we care about the following things:
* **Functionality / flexibility**: what can/cannot be done with each tool
@@ -21,7 +21,7 @@ libraries, we care about the following things:
This page is also here to offer a bit of a translation guide for users of these
R packages.
-For transfer of ``DataFrame`` objects from ``pandas`` to R, one option is to
+For transfer of ``DataFrame`` objects from pandas to R, one option is to
use HDF5 files, see :ref:`io.external_compatibility` for an
example.
@@ -118,7 +118,7 @@ or by integer location
df <- data.frame(matrix(rnorm(1000), ncol=100))
df[, c(1:10, 25:30, 40, 50:100)]
-Selecting multiple columns by name in ``pandas`` is straightforward
+Selecting multiple columns by name in pandas is straightforward
.. ipython:: python
@@ -235,7 +235,7 @@ since the subclass sizes are possibly irregular. Using a data.frame called
tapply(baseball$batting.average, baseball.example$team,
max)
-In ``pandas`` we may use :meth:`~pandas.pivot_table` method to handle this:
+In pandas we may use :meth:`~pandas.pivot_table` method to handle this:
.. ipython:: python
@@ -268,7 +268,7 @@ column's values are less than another column's values:
subset(df, a <= b)
df[df$a <= df$b,] # note the comma
-In ``pandas``, there are a few ways to perform subsetting. You can use
+In pandas, there are a few ways to perform subsetting. You can use
:meth:`~pandas.DataFrame.query` or pass an expression as if it were an
index/slice as well as standard boolean indexing:
@@ -295,7 +295,7 @@ An expression using a data.frame called ``df`` in R with the columns ``a`` and
with(df, a + b)
df$a + df$b # same as the previous expression
-In ``pandas`` the equivalent expression, using the
+In pandas the equivalent expression, using the
:meth:`~pandas.DataFrame.eval` method, would be:
.. ipython:: python
@@ -347,7 +347,7 @@ summarize ``x`` by ``month``:
mean = round(mean(x), 2),
sd = round(sd(x), 2))
-In ``pandas`` the equivalent expression, using the
+In pandas the equivalent expression, using the
:meth:`~pandas.DataFrame.groupby` method, would be:
.. ipython:: python
diff --git a/doc/source/getting_started/comparison/comparison_with_stata.rst b/doc/source/getting_started/comparison/comparison_with_stata.rst
index 7b8d9c6be61db..014506cc18327 100644
--- a/doc/source/getting_started/comparison/comparison_with_stata.rst
+++ b/doc/source/getting_started/comparison/comparison_with_stata.rst
@@ -146,7 +146,7 @@ the pandas command would be:
# alternatively, read_table is an alias to read_csv with tab delimiter
tips = pd.read_table("tips.csv", header=None)
-Pandas can also read Stata data sets in ``.dta`` format with the :func:`read_stata` function.
+pandas can also read Stata data sets in ``.dta`` format with the :func:`read_stata` function.
.. code-block:: python
@@ -172,7 +172,7 @@ Similarly in pandas, the opposite of ``read_csv`` is :meth:`DataFrame.to_csv`.
tips.to_csv("tips2.csv")
-Pandas can also export to Stata file format with the :meth:`DataFrame.to_stata` method.
+pandas can also export to Stata file format with the :meth:`DataFrame.to_stata` method.
.. code-block:: python
@@ -583,7 +583,7 @@ should be used for comparisons.
outer_join[pd.isna(outer_join["value_x"])]
outer_join[pd.notna(outer_join["value_x"])]
-Pandas also provides a variety of methods to work with missing data -- some of
+pandas also provides a variety of methods to work with missing data -- some of
which would be challenging to express in Stata. For example, there are methods to
drop all rows with any missing values, replacing missing values with a specified
value, like the mean, or forward filling from previous rows. See the
@@ -674,7 +674,7 @@ Other considerations
Disk vs memory
~~~~~~~~~~~~~~
-Pandas and Stata both operate exclusively in memory. This means that the size of
+pandas and Stata both operate exclusively in memory. This means that the size of
data able to be loaded in pandas is limited by your machine's memory.
If out of core processing is needed, one possibility is the
`dask.dataframe <https://dask.pydata.org/en/latest/dataframe.html>`_
diff --git a/doc/source/getting_started/install.rst b/doc/source/getting_started/install.rst
index a6341451b1b80..70d145c54e919 100644
--- a/doc/source/getting_started/install.rst
+++ b/doc/source/getting_started/install.rst
@@ -184,7 +184,7 @@ You can find simple installation instructions for pandas in this document: ``ins
Installing from source
~~~~~~~~~~~~~~~~~~~~~~
-See the :ref:`contributing guide <contributing>` for complete instructions on building from the git source tree. Further, see :ref:`creating a development environment <contributing.dev_env>` if you wish to create a *pandas* development environment.
+See the :ref:`contributing guide <contributing>` for complete instructions on building from the git source tree. Further, see :ref:`creating a development environment <contributing.dev_env>` if you wish to create a pandas development environment.
Running the test suite
----------------------
@@ -249,7 +249,7 @@ Recommended dependencies
Optional dependencies
~~~~~~~~~~~~~~~~~~~~~
-Pandas has many optional dependencies that are only used for specific methods.
+pandas has many optional dependencies that are only used for specific methods.
For example, :func:`pandas.read_hdf` requires the ``pytables`` package, while
:meth:`DataFrame.to_markdown` requires the ``tabulate`` package. If the
optional dependency is not installed, pandas will raise an ``ImportError`` when
diff --git a/doc/source/getting_started/overview.rst b/doc/source/getting_started/overview.rst
index 57d87d4ec8a91..3043cf25c5312 100644
--- a/doc/source/getting_started/overview.rst
+++ b/doc/source/getting_started/overview.rst
@@ -6,7 +6,7 @@
Package overview
****************
-**pandas** is a `Python <https://www.python.org>`__ package providing fast,
+pandas is a `Python <https://www.python.org>`__ package providing fast,
flexible, and expressive data structures designed to make working with
"relational" or "labeled" data both easy and intuitive. It aims to be the
fundamental high-level building block for doing practical, **real-world** data
diff --git a/doc/source/reference/arrays.rst b/doc/source/reference/arrays.rst
index 1725c415fa020..5c068d8404cd6 100644
--- a/doc/source/reference/arrays.rst
+++ b/doc/source/reference/arrays.rst
@@ -16,7 +16,7 @@ For some data types, pandas extends NumPy's type system. String aliases for thes
can be found at :ref:`basics.dtypes`.
=================== ========================= ================== =============================
-Kind of Data Pandas Data Type Scalar Array
+Kind of Data pandas Data Type Scalar Array
=================== ========================= ================== =============================
TZ-aware datetime :class:`DatetimeTZDtype` :class:`Timestamp` :ref:`api.arrays.datetime`
Timedeltas (none) :class:`Timedelta` :ref:`api.arrays.timedelta`
@@ -29,7 +29,7 @@ Strings :class:`StringDtype` :class:`str` :ref:`api.array
Boolean (with NA) :class:`BooleanDtype` :class:`bool` :ref:`api.arrays.bool`
=================== ========================= ================== =============================
-Pandas and third-party libraries can extend NumPy's type system (see :ref:`extending.extension-types`).
+pandas and third-party libraries can extend NumPy's type system (see :ref:`extending.extension-types`).
The top-level :meth:`array` method can be used to create a new array, which may be
stored in a :class:`Series`, :class:`Index`, or as a column in a :class:`DataFrame`.
@@ -43,7 +43,7 @@ stored in a :class:`Series`, :class:`Index`, or as a column in a :class:`DataFra
Datetime data
-------------
-NumPy cannot natively represent timezone-aware datetimes. Pandas supports this
+NumPy cannot natively represent timezone-aware datetimes. pandas supports this
with the :class:`arrays.DatetimeArray` extension array, which can hold timezone-naive
or timezone-aware values.
@@ -162,7 +162,7 @@ If the data are tz-aware, then every value in the array must have the same timez
Timedelta data
--------------
-NumPy can natively represent timedeltas. Pandas provides :class:`Timedelta`
+NumPy can natively represent timedeltas. pandas provides :class:`Timedelta`
for symmetry with :class:`Timestamp`.
.. autosummary::
@@ -217,7 +217,7 @@ A collection of timedeltas may be stored in a :class:`TimedeltaArray`.
Timespan data
-------------
-Pandas represents spans of times as :class:`Period` objects.
+pandas represents spans of times as :class:`Period` objects.
Period
------
@@ -352,7 +352,7 @@ Nullable integer
----------------
:class:`numpy.ndarray` cannot natively represent integer-data with missing values.
-Pandas provides this through :class:`arrays.IntegerArray`.
+pandas provides this through :class:`arrays.IntegerArray`.
.. autosummary::
:toctree: api/
@@ -378,7 +378,7 @@ Pandas provides this through :class:`arrays.IntegerArray`.
Categorical data
----------------
-Pandas defines a custom data type for representing data that can take only a
+pandas defines a custom data type for representing data that can take only a
limited, fixed set of values. The dtype of a ``Categorical`` can be described by
a :class:`pandas.api.types.CategoricalDtype`.
diff --git a/doc/source/reference/series.rst b/doc/source/reference/series.rst
index 5131d35334693..f1069e46b56cc 100644
--- a/doc/source/reference/series.rst
+++ b/doc/source/reference/series.rst
@@ -280,7 +280,7 @@ Time Series-related
Accessors
---------
-Pandas provides dtype-specific methods under various accessors.
+pandas provides dtype-specific methods under various accessors.
These are separate namespaces within :class:`Series` that only apply
to specific data types.
diff --git a/doc/source/user_guide/basics.rst b/doc/source/user_guide/basics.rst
index e348111fe7881..5fa214d2ed389 100644
--- a/doc/source/user_guide/basics.rst
+++ b/doc/source/user_guide/basics.rst
@@ -52,7 +52,7 @@ Note, **these attributes can be safely assigned to**!
df.columns = [x.lower() for x in df.columns]
df
-Pandas objects (:class:`Index`, :class:`Series`, :class:`DataFrame`) can be
+pandas objects (:class:`Index`, :class:`Series`, :class:`DataFrame`) can be
thought of as containers for arrays, which hold the actual data and do the
actual computation. For many types, the underlying array is a
:class:`numpy.ndarray`. However, pandas and 3rd party libraries may *extend*
@@ -410,7 +410,7 @@ data structure with a scalar value:
pd.Series(['foo', 'bar', 'baz']) == 'foo'
pd.Index(['foo', 'bar', 'baz']) == 'foo'
-Pandas also handles element-wise comparisons between different array-like
+pandas also handles element-wise comparisons between different array-like
objects of the same length:
.. ipython:: python
@@ -804,7 +804,7 @@ Is equivalent to:
(df_p.pipe(extract_city_name)
.pipe(add_country_name, country_name="US"))
-Pandas encourages the second style, which is known as method chaining.
+pandas encourages the second style, which is known as method chaining.
``pipe`` makes it easy to use your own or another library's functions
in method chains, alongside pandas' methods.
@@ -1498,7 +1498,7 @@ Thus, for example, iterating over a DataFrame gives you the column names:
print(col)
-Pandas objects also have the dict-like :meth:`~DataFrame.items` method to
+pandas objects also have the dict-like :meth:`~DataFrame.items` method to
iterate over the (key, value) pairs.
To iterate over the rows of a DataFrame, you can use the following methods:
@@ -1741,7 +1741,7 @@ always uses them).
.. note::
Prior to pandas 1.0, string methods were only available on ``object`` -dtype
- ``Series``. Pandas 1.0 added the :class:`StringDtype` which is dedicated
+ ``Series``. pandas 1.0 added the :class:`StringDtype` which is dedicated
to strings. See :ref:`text.types` for more.
Please see :ref:`Vectorized String Methods <text.string_methods>` for a complete
@@ -1752,7 +1752,7 @@ description.
Sorting
-------
-Pandas supports three kinds of sorting: sorting by index labels,
+pandas supports three kinds of sorting: sorting by index labels,
sorting by column values, and sorting by a combination of both.
.. _basics.sort_index:
@@ -1995,7 +1995,7 @@ columns of a DataFrame. NumPy provides support for ``float``,
``int``, ``bool``, ``timedelta64[ns]`` and ``datetime64[ns]`` (note that NumPy
does not support timezone-aware datetimes).
-Pandas and third-party libraries *extend* NumPy's type system in a few places.
+pandas and third-party libraries *extend* NumPy's type system in a few places.
This section describes the extensions pandas has made internally.
See :ref:`extending.extension-types` for how to write your own extension that
works with pandas. See :ref:`ecosystem.extensions` for a list of third-party
@@ -2032,7 +2032,7 @@ documentation sections for more on each type.
| Boolean (with NA) | :class:`BooleanDtype` | :class:`bool` | :class:`arrays.BooleanArray` | ``'boolean'`` | :ref:`api.arrays.bool` |
+-------------------+---------------------------+--------------------+-------------------------------+-----------------------------------------+-------------------------------+
-Pandas has two ways to store strings.
+pandas has two ways to store strings.
1. ``object`` dtype, which can hold any Python object, including strings.
2. :class:`StringDtype`, which is dedicated to strings.
@@ -2424,5 +2424,5 @@ All NumPy dtypes are subclasses of ``numpy.generic``:
.. note::
- Pandas also defines the types ``category``, and ``datetime64[ns, tz]``, which are not integrated into the normal
+ pandas also defines the types ``category``, and ``datetime64[ns, tz]``, which are not integrated into the normal
NumPy hierarchy and won't show up with the above function.
diff --git a/doc/source/user_guide/boolean.rst b/doc/source/user_guide/boolean.rst
index d690c1093399a..76c922fcef638 100644
--- a/doc/source/user_guide/boolean.rst
+++ b/doc/source/user_guide/boolean.rst
@@ -82,7 +82,7 @@ the ``NA`` really is ``True`` or ``False``, since ``True & True`` is ``True``,
but ``True & False`` is ``False``, so we can't determine the output.
-This differs from how ``np.nan`` behaves in logical operations. Pandas treated
+This differs from how ``np.nan`` behaves in logical operations. pandas treated
``np.nan`` is *always false in the output*.
In ``or``
diff --git a/doc/source/user_guide/categorical.rst b/doc/source/user_guide/categorical.rst
index 6a8e1767ef7e8..67f11bbb45b02 100644
--- a/doc/source/user_guide/categorical.rst
+++ b/doc/source/user_guide/categorical.rst
@@ -1011,7 +1011,7 @@ The following differences to R's factor functions can be observed:
* In contrast to R's ``factor`` function, using categorical data as the sole input to create a
new categorical series will *not* remove unused categories but create a new categorical series
which is equal to the passed in one!
-* R allows for missing values to be included in its ``levels`` (pandas' ``categories``). Pandas
+* R allows for missing values to be included in its ``levels`` (pandas' ``categories``). pandas
does not allow ``NaN`` categories, but missing values can still be in the ``values``.
@@ -1107,7 +1107,7 @@ are not numeric data (even in the case that ``.categories`` is numeric).
dtype in apply
~~~~~~~~~~~~~~
-Pandas currently does not preserve the dtype in apply functions: If you apply along rows you get
+pandas currently does not preserve the dtype in apply functions: If you apply along rows you get
a ``Series`` of ``object`` ``dtype`` (same as getting a row -> getting one element will return a
basic type) and applying along columns will also convert to object. ``NaN`` values are unaffected.
You can use ``fillna`` to handle missing values before applying a function.
diff --git a/doc/source/user_guide/cookbook.rst b/doc/source/user_guide/cookbook.rst
index 0a30d865f3c23..214b8a680fa7e 100644
--- a/doc/source/user_guide/cookbook.rst
+++ b/doc/source/user_guide/cookbook.rst
@@ -15,7 +15,7 @@ Simplified, condensed, new-user friendly, in-line examples have been inserted wh
augment the Stack-Overflow and GitHub links. Many of the links contain expanded information,
above what the in-line examples offer.
-Pandas (pd) and Numpy (np) are the only two abbreviated imported modules. The rest are kept
+pandas (pd) and Numpy (np) are the only two abbreviated imported modules. The rest are kept
explicitly imported for newer users.
These examples are written for Python 3. Minor tweaks might be necessary for earlier python
diff --git a/doc/source/user_guide/dsintro.rst b/doc/source/user_guide/dsintro.rst
index d698b316d321e..905877cca61db 100644
--- a/doc/source/user_guide/dsintro.rst
+++ b/doc/source/user_guide/dsintro.rst
@@ -78,13 +78,13 @@ Series can be instantiated from dicts:
When the data is a dict, and an index is not passed, the ``Series`` index
will be ordered by the dict's insertion order, if you're using Python
- version >= 3.6 and Pandas version >= 0.23.
+ version >= 3.6 and pandas version >= 0.23.
- If you're using Python < 3.6 or Pandas < 0.23, and an index is not passed,
+ If you're using Python < 3.6 or pandas < 0.23, and an index is not passed,
the ``Series`` index will be the lexically ordered list of dict keys.
In the example above, if you were on a Python version lower than 3.6 or a
-Pandas version lower than 0.23, the ``Series`` would be ordered by the lexical
+pandas version lower than 0.23, the ``Series`` would be ordered by the lexical
order of the dict keys (i.e. ``['a', 'b', 'c']`` rather than ``['b', 'a', 'c']``).
If an index is passed, the values in data corresponding to the labels in the
@@ -151,7 +151,7 @@ index (to disable :ref:`automatic alignment <dsintro.alignment>`, for example).
:attr:`Series.array` will always be an :class:`~pandas.api.extensions.ExtensionArray`.
Briefly, an ExtensionArray is a thin wrapper around one or more *concrete* arrays like a
-:class:`numpy.ndarray`. Pandas knows how to take an ``ExtensionArray`` and
+:class:`numpy.ndarray`. pandas knows how to take an ``ExtensionArray`` and
store it in a ``Series`` or a column of a ``DataFrame``.
See :ref:`basics.dtypes` for more.
@@ -290,9 +290,9 @@ based on common sense rules.
When the data is a dict, and ``columns`` is not specified, the ``DataFrame``
columns will be ordered by the dict's insertion order, if you are using
- Python version >= 3.6 and Pandas >= 0.23.
+ Python version >= 3.6 and pandas >= 0.23.
- If you are using Python < 3.6 or Pandas < 0.23, and ``columns`` is not
+ If you are using Python < 3.6 or pandas < 0.23, and ``columns`` is not
specified, the ``DataFrame`` columns will be the lexically ordered list of dict
keys.
diff --git a/doc/source/user_guide/duplicates.rst b/doc/source/user_guide/duplicates.rst
index 2993ca7799510..7cda067fb24ad 100644
--- a/doc/source/user_guide/duplicates.rst
+++ b/doc/source/user_guide/duplicates.rst
@@ -79,7 +79,7 @@ unique with :attr:`Index.is_unique`:
.. note::
Checking whether an index is unique is somewhat expensive for large datasets.
- Pandas does cache this result, so re-checking on the same index is very fast.
+ pandas does cache this result, so re-checking on the same index is very fast.
:meth:`Index.duplicated` will return a boolean ndarray indicating whether a
label is repeated.
diff --git a/doc/source/user_guide/enhancingperf.rst b/doc/source/user_guide/enhancingperf.rst
index ce9db0a5279c3..baad00076dbe6 100644
--- a/doc/source/user_guide/enhancingperf.rst
+++ b/doc/source/user_guide/enhancingperf.rst
@@ -689,7 +689,7 @@ name in an expression.
df.loc[a < df['a']] # same as the previous expression
With :func:`pandas.eval` you cannot use the ``@`` prefix *at all*, because it
-isn't defined in that context. ``pandas`` will let you know this if you try to
+isn't defined in that context. pandas will let you know this if you try to
use ``@`` in a top-level call to :func:`pandas.eval`. For example,
.. ipython:: python
diff --git a/doc/source/user_guide/groupby.rst b/doc/source/user_guide/groupby.rst
index 9696f14f03b56..1dbabd12dbdc9 100644
--- a/doc/source/user_guide/groupby.rst
+++ b/doc/source/user_guide/groupby.rst
@@ -614,7 +614,7 @@ For a grouped ``DataFrame``, you can rename in a similar manner:
grouped["C"].agg(["sum", "sum"])
- Pandas *does* allow you to provide multiple lambdas. In this case, pandas
+ pandas *does* allow you to provide multiple lambdas. In this case, pandas
will mangle the name of the (nameless) lambda functions, appending ``_<i>``
to each subsequent lambda.
@@ -636,7 +636,7 @@ accepts the special syntax in :meth:`GroupBy.agg`, known as "named aggregation",
- The keywords are the *output* column names
- The values are tuples whose first element is the column to select
- and the second element is the aggregation to apply to that column. Pandas
+ and the second element is the aggregation to apply to that column. pandas
provides the ``pandas.NamedAgg`` namedtuple with the fields ``['column', 'aggfunc']``
to make it clearer what the arguments are. As usual, the aggregation can
be a callable or a string alias.
diff --git a/doc/source/user_guide/indexing.rst b/doc/source/user_guide/indexing.rst
index b11baad1e3eb5..530fdfba7d12c 100644
--- a/doc/source/user_guide/indexing.rst
+++ b/doc/source/user_guide/indexing.rst
@@ -46,7 +46,7 @@ Different choices for indexing
------------------------------
Object selection has had a number of user-requested additions in order to
-support more explicit location based indexing. Pandas now supports three types
+support more explicit location based indexing. pandas now supports three types
of multi-axis indexing.
* ``.loc`` is primarily label based, but may also be used with a boolean array. ``.loc`` will raise ``KeyError`` when the items are not found. Allowed inputs are:
@@ -315,7 +315,7 @@ Selection by label
.. versionchanged:: 1.0.0
- Pandas will raise a ``KeyError`` if indexing with a list with missing labels. See :ref:`list-like Using loc with
+ pandas will raise a ``KeyError`` if indexing with a list with missing labels. See :ref:`list-like Using loc with
missing keys in a list is Deprecated <indexing.deprecate_loc_reindex_listlike>`.
pandas provides a suite of methods in order to have **purely label based indexing**. This is a strict inclusion based protocol.
@@ -433,7 +433,7 @@ Selection by position
This is sometimes called ``chained assignment`` and should be avoided.
See :ref:`Returning a View versus Copy <indexing.view_versus_copy>`.
-Pandas provides a suite of methods in order to get **purely integer based indexing**. The semantics follow closely Python and NumPy slicing. These are ``0-based`` indexing. When slicing, the start bound is *included*, while the upper bound is *excluded*. Trying to use a non-integer, even a **valid** label will raise an ``IndexError``.
+pandas provides a suite of methods in order to get **purely integer based indexing**. The semantics follow closely Python and NumPy slicing. These are ``0-based`` indexing. When slicing, the start bound is *included*, while the upper bound is *excluded*. Trying to use a non-integer, even a **valid** label will raise an ``IndexError``.
The ``.iloc`` attribute is the primary access method. The following are valid inputs:
@@ -1812,7 +1812,7 @@ about!
Sometimes a ``SettingWithCopy`` warning will arise at times when there's no
obvious chained indexing going on. **These** are the bugs that
-``SettingWithCopy`` is designed to catch! Pandas is probably trying to warn you
+``SettingWithCopy`` is designed to catch! pandas is probably trying to warn you
that you've done this:
.. code-block:: python
@@ -1835,7 +1835,7 @@ When you use chained indexing, the order and type of the indexing operation
partially determine whether the result is a slice into the original object, or
a copy of the slice.
-Pandas has the ``SettingWithCopyWarning`` because assigning to a copy of a
+pandas has the ``SettingWithCopyWarning`` because assigning to a copy of a
slice is frequently not intentional, but a mistake caused by chained indexing
returning a copy where a slice was expected.
diff --git a/doc/source/user_guide/integer_na.rst b/doc/source/user_guide/integer_na.rst
index acee1638570f7..be38736f493b5 100644
--- a/doc/source/user_guide/integer_na.rst
+++ b/doc/source/user_guide/integer_na.rst
@@ -30,7 +30,7 @@ numbers.
Construction
------------
-Pandas can represent integer data with possibly missing values using
+pandas can represent integer data with possibly missing values using
:class:`arrays.IntegerArray`. This is an :ref:`extension types <extending.extension-types>`
implemented within pandas.
diff --git a/doc/source/user_guide/io.rst b/doc/source/user_guide/io.rst
index 184894bbafe28..ae22ee836cd8c 100644
--- a/doc/source/user_guide/io.rst
+++ b/doc/source/user_guide/io.rst
@@ -894,7 +894,7 @@ take full advantage of the flexibility of the date parsing API:
)
df
-Pandas will try to call the ``date_parser`` function in three different ways. If
+pandas will try to call the ``date_parser`` function in three different ways. If
an exception is raised, the next one is tried:
1. ``date_parser`` is first called with one or more arrays as arguments,
@@ -926,7 +926,7 @@ Note that performance-wise, you should try these methods of parsing dates in ord
Parsing a CSV with mixed timezones
++++++++++++++++++++++++++++++++++
-Pandas cannot natively represent a column or index with mixed timezones. If your CSV
+pandas cannot natively represent a column or index with mixed timezones. If your CSV
file contains columns with a mixture of timezones, the default result will be
an object-dtype column with strings, even with ``parse_dates``.
@@ -1602,7 +1602,7 @@ python engine is selected explicitly using ``engine='python'``.
Reading/writing remote files
''''''''''''''''''''''''''''
-You can pass in a URL to read or write remote files to many of Pandas' IO
+You can pass in a URL to read or write remote files to many of pandas' IO
functions - the following example shows reading a CSV file:
.. code-block:: python
@@ -2265,7 +2265,7 @@ The full list of types supported are described in the Table Schema
spec. This table shows the mapping from pandas types:
=============== =================
-Pandas type Table Schema type
+pandas type Table Schema type
=============== =================
int64 integer
float64 number
@@ -2661,7 +2661,7 @@ that contain URLs.
url_df = pd.DataFrame(
{
- "name": ["Python", "Pandas"],
+ "name": ["Python", "pandas"],
"url": ["https://www.python.org/", "https://pandas.pydata.org"],
}
)
@@ -3143,7 +3143,7 @@ one can pass an :class:`~pandas.io.excel.ExcelWriter`.
Writing Excel files to memory
+++++++++++++++++++++++++++++
-Pandas supports writing Excel files to buffer-like objects such as ``StringIO`` or
+pandas supports writing Excel files to buffer-like objects such as ``StringIO`` or
``BytesIO`` using :class:`~pandas.io.excel.ExcelWriter`.
.. code-block:: python
@@ -3177,7 +3177,7 @@ Pandas supports writing Excel files to buffer-like objects such as ``StringIO``
Excel writer engines
''''''''''''''''''''
-Pandas chooses an Excel writer via two methods:
+pandas chooses an Excel writer via two methods:
1. the ``engine`` keyword argument
2. the filename extension (via the default specified in config options)
@@ -3474,7 +3474,7 @@ for some advanced strategies
.. warning::
- Pandas uses PyTables for reading and writing HDF5 files, which allows
+ pandas uses PyTables for reading and writing HDF5 files, which allows
serializing object-dtype data with pickle. Loading pickled data received from
untrusted sources can be unsafe.
@@ -4734,7 +4734,7 @@ Several caveats.
* Duplicate column names and non-string columns names are not supported.
* The ``pyarrow`` engine always writes the index to the output, but ``fastparquet`` only writes non-default
- indexes. This extra column can cause problems for non-Pandas consumers that are not expecting it. You can
+ indexes. This extra column can cause problems for non-pandas consumers that are not expecting it. You can
force including or omitting indexes with the ``index`` argument, regardless of the underlying engine.
* Index level names, if specified, must be strings.
* In the ``pyarrow`` engine, categorical dtypes for non-string types can be serialized to parquet, but will de-serialize as their primitive dtype.
@@ -4894,7 +4894,7 @@ ORC
.. versionadded:: 1.0.0
Similar to the :ref:`parquet <io.parquet>` format, the `ORC Format <https://orc.apache.org/>`__ is a binary columnar serialization
-for data frames. It is designed to make reading data frames efficient. Pandas provides *only* a reader for the
+for data frames. It is designed to make reading data frames efficient. pandas provides *only* a reader for the
ORC format, :func:`~pandas.read_orc`. This requires the `pyarrow <https://arrow.apache.org/docs/python/>`__ library.
.. _io.sql:
diff --git a/doc/source/user_guide/missing_data.rst b/doc/source/user_guide/missing_data.rst
index 3c97cc7da6edb..7eb377694910b 100644
--- a/doc/source/user_guide/missing_data.rst
+++ b/doc/source/user_guide/missing_data.rst
@@ -81,7 +81,7 @@ Integer dtypes and missing data
-------------------------------
Because ``NaN`` is a float, a column of integers with even one missing values
-is cast to floating-point dtype (see :ref:`gotchas.intna` for more). Pandas
+is cast to floating-point dtype (see :ref:`gotchas.intna` for more). pandas
provides a nullable integer array, which can be used by explicitly requesting
the dtype:
@@ -735,7 +735,7 @@ However, these can be filled in using :meth:`~DataFrame.fillna` and it will work
reindexed[crit.fillna(False)]
reindexed[crit.fillna(True)]
-Pandas provides a nullable integer dtype, but you must explicitly request it
+pandas provides a nullable integer dtype, but you must explicitly request it
when creating the series or column. Notice that we use a capital "I" in
the ``dtype="Int64"``.
diff --git a/doc/source/user_guide/scale.rst b/doc/source/user_guide/scale.rst
index f36f27269a996..7f2419bc7f19d 100644
--- a/doc/source/user_guide/scale.rst
+++ b/doc/source/user_guide/scale.rst
@@ -4,7 +4,7 @@
Scaling to large datasets
*************************
-Pandas provides data structures for in-memory analytics, which makes using pandas
+pandas provides data structures for in-memory analytics, which makes using pandas
to analyze datasets that are larger than memory datasets somewhat tricky. Even datasets
that are a sizable fraction of memory become unwieldy, as some pandas operations need
to make intermediate copies.
@@ -13,7 +13,7 @@ This document provides a few recommendations for scaling your analysis to larger
It's a complement to :ref:`enhancingperf`, which focuses on speeding up analysis
for datasets that fit in memory.
-But first, it's worth considering *not using pandas*. Pandas isn't the right
+But first, it's worth considering *not using pandas*. pandas isn't the right
tool for all situations. If you're working with very large datasets and a tool
like PostgreSQL fits your needs, then you should probably be using that.
Assuming you want or need the expressiveness and power of pandas, let's carry on.
@@ -230,7 +230,7 @@ different library that implements these out-of-core algorithms for you.
Use other libraries
-------------------
-Pandas is just one library offering a DataFrame API. Because of its popularity,
+pandas is just one library offering a DataFrame API. Because of its popularity,
pandas' API has become something of a standard that other libraries implement.
The pandas documentation maintains a list of libraries implementing a DataFrame API
in :ref:`our ecosystem page <ecosystem.out-of-core>`.
@@ -259,7 +259,7 @@ Inspecting the ``ddf`` object, we see a few things
* There are new attributes like ``.npartitions`` and ``.divisions``
The partitions and divisions are how Dask parallelizes computation. A **Dask**
-DataFrame is made up of many **Pandas** DataFrames. A single method call on a
+DataFrame is made up of many pandas DataFrames. A single method call on a
Dask DataFrame ends up making many pandas method calls, and Dask knows how to
coordinate everything to get the result.
diff --git a/doc/source/user_guide/sparse.rst b/doc/source/user_guide/sparse.rst
index 62e35cb994faf..3156e3088d860 100644
--- a/doc/source/user_guide/sparse.rst
+++ b/doc/source/user_guide/sparse.rst
@@ -6,7 +6,7 @@
Sparse data structures
**********************
-Pandas provides data structures for efficiently storing sparse data.
+pandas provides data structures for efficiently storing sparse data.
These are not necessarily sparse in the typical "mostly 0". Rather, you can view these
objects as being "compressed" where any data matching a specific value (``NaN`` / missing value, though any value
can be chosen, including 0) is omitted. The compressed values are not actually stored in the array.
@@ -116,7 +116,7 @@ Sparse accessor
.. versionadded:: 0.24.0
-Pandas provides a ``.sparse`` accessor, similar to ``.str`` for string data, ``.cat``
+pandas provides a ``.sparse`` accessor, similar to ``.str`` for string data, ``.cat``
for categorical data, and ``.dt`` for datetime-like data. This namespace provides
attributes and methods that are specific to sparse data.
diff --git a/doc/source/user_guide/timedeltas.rst b/doc/source/user_guide/timedeltas.rst
index 971a415088220..cb265d34229dd 100644
--- a/doc/source/user_guide/timedeltas.rst
+++ b/doc/source/user_guide/timedeltas.rst
@@ -100,7 +100,7 @@ The ``unit`` keyword argument specifies the unit of the Timedelta:
Timedelta limitations
~~~~~~~~~~~~~~~~~~~~~
-Pandas represents ``Timedeltas`` in nanosecond resolution using
+pandas represents ``Timedeltas`` in nanosecond resolution using
64 bit integers. As such, the 64 bit integer limits determine
the ``Timedelta`` limits.
diff --git a/doc/source/user_guide/timeseries.rst b/doc/source/user_guide/timeseries.rst
index 11ec90085d9bf..be2c67521dc5d 100644
--- a/doc/source/user_guide/timeseries.rst
+++ b/doc/source/user_guide/timeseries.rst
@@ -1549,7 +1549,7 @@ Converting to Python datetimes
Resampling
----------
-Pandas has a simple, powerful, and efficient functionality for performing
+pandas has a simple, powerful, and efficient functionality for performing
resampling operations during frequency conversion (e.g., converting secondly
data into 5-minutely data). This is extremely common in, but not limited to,
financial applications.
diff --git a/doc/source/user_guide/visualization.rst b/doc/source/user_guide/visualization.rst
index 46ab29a52747a..a6c3d9814b03d 100644
--- a/doc/source/user_guide/visualization.rst
+++ b/doc/source/user_guide/visualization.rst
@@ -776,7 +776,7 @@ See the `matplotlib pie documentation <https://matplotlib.org/api/pyplot_api.htm
Plotting with missing data
--------------------------
-Pandas tries to be pragmatic about plotting ``DataFrames`` or ``Series``
+pandas tries to be pragmatic about plotting ``DataFrames`` or ``Series``
that contain missing data. Missing values are dropped, left out, or filled
depending on the plot type.
@@ -1239,7 +1239,7 @@ Custom formatters for timeseries plots
.. versionchanged:: 1.0.0
-Pandas provides custom formatters for timeseries plots. These change the
+pandas provides custom formatters for timeseries plots. These change the
formatting of the axis labels for dates and times. By default,
the custom formatters are applied only to plots created by pandas with
:meth:`DataFrame.plot` or :meth:`Series.plot`. To have them apply to all
diff --git a/doc/source/whatsnew/v0.11.0.rst b/doc/source/whatsnew/v0.11.0.rst
index eb91ac427063f..a69d1ad1dec3b 100644
--- a/doc/source/whatsnew/v0.11.0.rst
+++ b/doc/source/whatsnew/v0.11.0.rst
@@ -24,7 +24,7 @@ Selection choices
~~~~~~~~~~~~~~~~~
Starting in 0.11.0, object selection has had a number of user-requested additions in
-order to support more explicit location based indexing. Pandas now supports
+order to support more explicit location based indexing. pandas now supports
three types of multi-axis indexing.
- ``.loc`` is strictly label based, will raise ``KeyError`` when the items are not found, allowed inputs are:
diff --git a/doc/source/whatsnew/v0.13.0.rst b/doc/source/whatsnew/v0.13.0.rst
index bc607409546c6..3c6b70fb21383 100644
--- a/doc/source/whatsnew/v0.13.0.rst
+++ b/doc/source/whatsnew/v0.13.0.rst
@@ -668,7 +668,7 @@ Enhancements
- ``Series`` now supports a ``to_frame`` method to convert it to a single-column DataFrame (:issue:`5164`)
-- All R datasets listed here http://stat.ethz.ch/R-manual/R-devel/library/datasets/html/00Index.html can now be loaded into Pandas objects
+- All R datasets listed here http://stat.ethz.ch/R-manual/R-devel/library/datasets/html/00Index.html can now be loaded into pandas objects
.. code-block:: python
diff --git a/doc/source/whatsnew/v0.17.0.rst b/doc/source/whatsnew/v0.17.0.rst
index e8f37a72f6417..3e49bb30401a3 100644
--- a/doc/source/whatsnew/v0.17.0.rst
+++ b/doc/source/whatsnew/v0.17.0.rst
@@ -762,7 +762,7 @@ Usually you simply want to know which values are null.
.. warning::
You generally will want to use ``isnull/notnull`` for these types of comparisons, as ``isnull/notnull`` tells you which elements are null. One has to be
- mindful that ``nan's`` don't compare equal, but ``None's`` do. Note that Pandas/numpy uses the fact that ``np.nan != np.nan``, and treats ``None`` like ``np.nan``.
+ mindful that ``nan's`` don't compare equal, but ``None's`` do. Note that pandas/numpy uses the fact that ``np.nan != np.nan``, and treats ``None`` like ``np.nan``.
.. ipython:: python
@@ -909,7 +909,7 @@ Other API changes
- The metadata properties of subclasses of pandas objects will now be serialized (:issue:`10553`).
- ``groupby`` using ``Categorical`` follows the same rule as ``Categorical.unique`` described above (:issue:`10508`)
- When constructing ``DataFrame`` with an array of ``complex64`` dtype previously meant the corresponding column
- was automatically promoted to the ``complex128`` dtype. Pandas will now preserve the itemsize of the input for complex data (:issue:`10952`)
+ was automatically promoted to the ``complex128`` dtype. pandas will now preserve the itemsize of the input for complex data (:issue:`10952`)
- some numeric reduction operators would return ``ValueError``, rather than ``TypeError`` on object types that includes strings and numbers (:issue:`11131`)
- Passing currently unsupported ``chunksize`` argument to ``read_excel`` or ``ExcelFile.parse`` will now raise ``NotImplementedError`` (:issue:`8011`)
- Allow an ``ExcelFile`` object to be passed into ``read_excel`` (:issue:`11198`)
diff --git a/doc/source/whatsnew/v0.19.0.rst b/doc/source/whatsnew/v0.19.0.rst
index 6e8c4273a0550..5732367a69af2 100644
--- a/doc/source/whatsnew/v0.19.0.rst
+++ b/doc/source/whatsnew/v0.19.0.rst
@@ -301,7 +301,7 @@ Categorical concatenation
Semi-month offsets
^^^^^^^^^^^^^^^^^^
-Pandas has gained new frequency offsets, ``SemiMonthEnd`` ('SM') and ``SemiMonthBegin`` ('SMS').
+pandas has gained new frequency offsets, ``SemiMonthEnd`` ('SM') and ``SemiMonthBegin`` ('SMS').
These provide date offsets anchored (by default) to the 15th and end of month, and 15th and 1st of month respectively.
(:issue:`1543`)
@@ -388,7 +388,7 @@ Google BigQuery enhancements
Fine-grained NumPy errstate
^^^^^^^^^^^^^^^^^^^^^^^^^^^
-Previous versions of pandas would permanently silence numpy's ufunc error handling when ``pandas`` was imported. Pandas did this in order to silence the warnings that would arise from using numpy ufuncs on missing data, which are usually represented as ``NaN`` s. Unfortunately, this silenced legitimate warnings arising in non-pandas code in the application. Starting with 0.19.0, pandas will use the ``numpy.errstate`` context manager to silence these warnings in a more fine-grained manner, only around where these operations are actually used in the pandas code base. (:issue:`13109`, :issue:`13145`)
+Previous versions of pandas would permanently silence numpy's ufunc error handling when ``pandas`` was imported. pandas did this in order to silence the warnings that would arise from using numpy ufuncs on missing data, which are usually represented as ``NaN`` s. Unfortunately, this silenced legitimate warnings arising in non-pandas code in the application. Starting with 0.19.0, pandas will use the ``numpy.errstate`` context manager to silence these warnings in a more fine-grained manner, only around where these operations are actually used in the pandas code base. (:issue:`13109`, :issue:`13145`)
After upgrading pandas, you may see *new* ``RuntimeWarnings`` being issued from your code. These are likely legitimate, and the underlying cause likely existed in the code when using previous versions of pandas that simply silenced the warning. Use `numpy.errstate <https://numpy.org/doc/stable/reference/generated/numpy.errstate.html>`__ around the source of the ``RuntimeWarning`` to control how these conditions are handled.
@@ -1372,7 +1372,7 @@ Deprecations
- ``Timestamp.offset`` property (and named arg in the constructor), has been deprecated in favor of ``freq`` (:issue:`12160`)
- ``pd.tseries.util.pivot_annual`` is deprecated. Use ``pivot_table`` as alternative, an example is :ref:`here <cookbook.pivot>` (:issue:`736`)
- ``pd.tseries.util.isleapyear`` has been deprecated and will be removed in a subsequent release. Datetime-likes now have a ``.is_leap_year`` property (:issue:`13727`)
-- ``Panel4D`` and ``PanelND`` constructors are deprecated and will be removed in a future version. The recommended way to represent these types of n-dimensional data are with the `xarray package <http://xarray.pydata.org/en/stable/>`__. Pandas provides a :meth:`~Panel4D.to_xarray` method to automate this conversion (:issue:`13564`).
+- ``Panel4D`` and ``PanelND`` constructors are deprecated and will be removed in a future version. The recommended way to represent these types of n-dimensional data are with the `xarray package <http://xarray.pydata.org/en/stable/>`__. pandas provides a :meth:`~Panel4D.to_xarray` method to automate this conversion (:issue:`13564`).
- ``pandas.tseries.frequencies.get_standard_freq`` is deprecated. Use ``pandas.tseries.frequencies.to_offset(freq).rule_code`` instead (:issue:`13874`)
- ``pandas.tseries.frequencies.to_offset``'s ``freqstr`` keyword is deprecated in favor of ``freq`` (:issue:`13874`)
- ``Categorical.from_array`` has been deprecated and will be removed in a future version (:issue:`13854`)
diff --git a/doc/source/whatsnew/v0.20.0.rst b/doc/source/whatsnew/v0.20.0.rst
index 3f7a89112958b..a9e57f0039735 100644
--- a/doc/source/whatsnew/v0.20.0.rst
+++ b/doc/source/whatsnew/v0.20.0.rst
@@ -26,7 +26,7 @@ Highlights include:
.. warning::
- Pandas has changed the internal structure and layout of the code base.
+ pandas has changed the internal structure and layout of the code base.
This can affect imports that are not from the top-level ``pandas.*`` namespace, please see the changes :ref:`here <whatsnew_0200.privacy>`.
Check the :ref:`API Changes <whatsnew_0200.api_breaking>` and :ref:`deprecations <whatsnew_0200.deprecations>` before updating.
@@ -243,7 +243,7 @@ The default is to infer the compression type from the extension (``compression='
UInt64 support improved
^^^^^^^^^^^^^^^^^^^^^^^
-Pandas has significantly improved support for operations involving unsigned,
+pandas has significantly improved support for operations involving unsigned,
or purely non-negative, integers. Previously, handling these integers would
result in improper rounding or data-type casting, leading to incorrect results.
Notably, a new numerical index, ``UInt64Index``, has been created (:issue:`14937`)
@@ -333,7 +333,7 @@ You must enable this by setting the ``display.html.table_schema`` option to ``Tr
SciPy sparse matrix from/to SparseDataFrame
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-Pandas now supports creating sparse dataframes directly from ``scipy.sparse.spmatrix`` instances.
+pandas now supports creating sparse dataframes directly from ``scipy.sparse.spmatrix`` instances.
See the :ref:`documentation <sparse.scipysparse>` for more information. (:issue:`4343`)
All sparse formats are supported, but matrices that are not in :mod:`COOrdinate <scipy.sparse>` format will be converted, copying data as needed.
@@ -1355,7 +1355,7 @@ Deprecate Panel
^^^^^^^^^^^^^^^
``Panel`` is deprecated and will be removed in a future version. The recommended way to represent 3-D data are
-with a ``MultiIndex`` on a ``DataFrame`` via the :meth:`~Panel.to_frame` or with the `xarray package <http://xarray.pydata.org/en/stable/>`__. Pandas
+with a ``MultiIndex`` on a ``DataFrame`` via the :meth:`~Panel.to_frame` or with the `xarray package <http://xarray.pydata.org/en/stable/>`__. pandas
provides a :meth:`~Panel.to_xarray` method to automate this conversion (:issue:`13563`).
.. code-block:: ipython
diff --git a/doc/source/whatsnew/v0.21.0.rst b/doc/source/whatsnew/v0.21.0.rst
index 926bcaa21ac3a..6035b89aa8643 100644
--- a/doc/source/whatsnew/v0.21.0.rst
+++ b/doc/source/whatsnew/v0.21.0.rst
@@ -900,13 +900,13 @@ New behavior:
No automatic Matplotlib converters
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-Pandas no longer registers our ``date``, ``time``, ``datetime``,
+pandas no longer registers our ``date``, ``time``, ``datetime``,
``datetime64``, and ``Period`` converters with matplotlib when pandas is
imported. Matplotlib plot methods (``plt.plot``, ``ax.plot``, ...), will not
nicely format the x-axis for ``DatetimeIndex`` or ``PeriodIndex`` values. You
must explicitly register these methods:
-Pandas built-in ``Series.plot`` and ``DataFrame.plot`` *will* register these
+pandas built-in ``Series.plot`` and ``DataFrame.plot`` *will* register these
converters on first-use (:issue:`17710`).
.. note::
diff --git a/doc/source/whatsnew/v0.21.1.rst b/doc/source/whatsnew/v0.21.1.rst
index f930dfac869cd..2d72f6470fc81 100644
--- a/doc/source/whatsnew/v0.21.1.rst
+++ b/doc/source/whatsnew/v0.21.1.rst
@@ -34,7 +34,7 @@ Highlights include:
Restore Matplotlib datetime converter registration
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Pandas implements some matplotlib converters for nicely formatting the axis
+pandas implements some matplotlib converters for nicely formatting the axis
labels on plots with ``datetime`` or ``Period`` values. Prior to pandas 0.21.0,
these were implicitly registered with matplotlib, as a side effect of ``import
pandas``.
diff --git a/doc/source/whatsnew/v0.22.0.rst b/doc/source/whatsnew/v0.22.0.rst
index 66d3ab3305565..92b514ce59660 100644
--- a/doc/source/whatsnew/v0.22.0.rst
+++ b/doc/source/whatsnew/v0.22.0.rst
@@ -20,7 +20,7 @@ release note (singular!).
Backwards incompatible API changes
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Pandas 0.22.0 changes the handling of empty and all-*NA* sums and products. The
+pandas 0.22.0 changes the handling of empty and all-*NA* sums and products. The
summary is that
* The sum of an empty or all-*NA* ``Series`` is now ``0``
diff --git a/doc/source/whatsnew/v0.23.0.rst b/doc/source/whatsnew/v0.23.0.rst
index cb811fd83d90d..f4caea9d363eb 100644
--- a/doc/source/whatsnew/v0.23.0.rst
+++ b/doc/source/whatsnew/v0.23.0.rst
@@ -189,7 +189,7 @@ resetting indexes. See the :ref:`Sorting by Indexes and Values
Extending pandas with custom types (experimental)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-Pandas now supports storing array-like objects that aren't necessarily 1-D NumPy
+pandas now supports storing array-like objects that aren't necessarily 1-D NumPy
arrays as columns in a DataFrame or values in a Series. This allows third-party
libraries to implement extensions to NumPy's types, similar to how pandas
implemented categoricals, datetimes with timezones, periods, and intervals.
@@ -553,7 +553,7 @@ Other enhancements
- :class:`~pandas.tseries.offsets.WeekOfMonth` constructor now supports ``n=0`` (:issue:`20517`).
- :class:`DataFrame` and :class:`Series` now support matrix multiplication (``@``) operator (:issue:`10259`) for Python>=3.5
- Updated :meth:`DataFrame.to_gbq` and :meth:`pandas.read_gbq` signature and documentation to reflect changes from
- the Pandas-GBQ library version 0.4.0. Adds intersphinx mapping to Pandas-GBQ
+ the pandas-gbq library version 0.4.0. Adds intersphinx mapping to pandas-gbq
library. (:issue:`20564`)
- Added new writer for exporting Stata dta files in version 117, ``StataWriter117``. This format supports exporting strings with lengths up to 2,000,000 characters (:issue:`16450`)
- :func:`to_hdf` and :func:`read_hdf` now accept an ``errors`` keyword argument to control encoding error handling (:issue:`20835`)
@@ -593,7 +593,7 @@ Instantiation from dicts preserves dict insertion order for Python 3.6+
Until Python 3.6, dicts in Python had no formally defined ordering. For Python
version 3.6 and later, dicts are ordered by insertion order, see
`PEP 468 <https://www.python.org/dev/peps/pep-0468/>`_.
-Pandas will use the dict's insertion order, when creating a ``Series`` or
+pandas will use the dict's insertion order, when creating a ``Series`` or
``DataFrame`` from a dict and you're using Python version 3.6 or
higher. (:issue:`19884`)
@@ -643,7 +643,7 @@ Deprecate Panel
^^^^^^^^^^^^^^^
``Panel`` was deprecated in the 0.20.x release, showing as a ``DeprecationWarning``. Using ``Panel`` will now show a ``FutureWarning``. The recommended way to represent 3-D data are
-with a ``MultiIndex`` on a ``DataFrame`` via the :meth:`~Panel.to_frame` or with the `xarray package <http://xarray.pydata.org/en/stable/>`__. Pandas
+with a ``MultiIndex`` on a ``DataFrame`` via the :meth:`~Panel.to_frame` or with the `xarray package <http://xarray.pydata.org/en/stable/>`__. pandas
provides a :meth:`~Panel.to_xarray` method to automate this conversion (:issue:`13563`, :issue:`18324`).
.. code-block:: ipython
@@ -884,7 +884,7 @@ Extraction of matching patterns from strings
By default, extracting matching patterns from strings with :func:`str.extract` used to return a
``Series`` if a single group was being extracted (a ``DataFrame`` if more than one group was
-extracted). As of Pandas 0.23.0 :func:`str.extract` always returns a ``DataFrame``, unless
+extracted). As of pandas 0.23.0 :func:`str.extract` always returns a ``DataFrame``, unless
``expand`` is set to ``False``. Finally, ``None`` was an accepted value for
the ``expand`` parameter (which was equivalent to ``False``), but now raises a ``ValueError``. (:issue:`11386`)
@@ -1175,7 +1175,7 @@ Performance improvements
Documentation changes
~~~~~~~~~~~~~~~~~~~~~
-Thanks to all of the contributors who participated in the Pandas Documentation
+Thanks to all of the contributors who participated in the pandas Documentation
Sprint, which took place on March 10th. We had about 500 participants from over
30 locations across the world. You should notice that many of the
:ref:`API docstrings <api>` have greatly improved.
diff --git a/doc/source/whatsnew/v0.23.2.rst b/doc/source/whatsnew/v0.23.2.rst
index 9f24092d1d4ae..99650e8291d3d 100644
--- a/doc/source/whatsnew/v0.23.2.rst
+++ b/doc/source/whatsnew/v0.23.2.rst
@@ -11,7 +11,7 @@ and bug fixes. We recommend that all users upgrade to this version.
.. note::
- Pandas 0.23.2 is first pandas release that's compatible with
+ pandas 0.23.2 is first pandas release that's compatible with
Python 3.7 (:issue:`20552`)
.. warning::
diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst
index 9a2e96f717d9b..9ef50045d5b5e 100644
--- a/doc/source/whatsnew/v0.24.0.rst
+++ b/doc/source/whatsnew/v0.24.0.rst
@@ -38,7 +38,7 @@ Enhancements
Optional integer NA support
^^^^^^^^^^^^^^^^^^^^^^^^^^^
-Pandas has gained the ability to hold integer dtypes with missing values. This long requested feature is enabled through the use of :ref:`extension types <extending.extension-types>`.
+pandas has gained the ability to hold integer dtypes with missing values. This long requested feature is enabled through the use of :ref:`extension types <extending.extension-types>`.
.. note::
@@ -384,7 +384,7 @@ Other enhancements
- :meth:`Series.droplevel` and :meth:`DataFrame.droplevel` are now implemented (:issue:`20342`)
- Added support for reading from/writing to Google Cloud Storage via the ``gcsfs`` library (:issue:`19454`, :issue:`23094`)
- :func:`DataFrame.to_gbq` and :func:`read_gbq` signature and documentation updated to
- reflect changes from the `Pandas-GBQ library version 0.8.0
+ reflect changes from the `pandas-gbq library version 0.8.0
<https://pandas-gbq.readthedocs.io/en/latest/changelog.html#changelog-0-8-0>`__.
Adds a ``credentials`` argument, which enables the use of any kind of
`google-auth credentials
@@ -432,7 +432,7 @@ Other enhancements
Backwards incompatible API changes
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Pandas 0.24.0 includes a number of API breaking changes.
+pandas 0.24.0 includes a number of API breaking changes.
.. _whatsnew_0240.api_breaking.deps:
@@ -1217,7 +1217,7 @@ Extension type changes
**Equality and hashability**
-Pandas now requires that extension dtypes be hashable (i.e. the respective
+pandas now requires that extension dtypes be hashable (i.e. the respective
``ExtensionDtype`` objects; hashability is not a requirement for the values
of the corresponding ``ExtensionArray``). The base class implements
a default ``__eq__`` and ``__hash__``. If you have a parametrized dtype, you should
@@ -1925,7 +1925,7 @@ Build changes
Other
^^^^^
-- Bug where C variables were declared with external linkage causing import errors if certain other C libraries were imported before Pandas. (:issue:`24113`)
+- Bug where C variables were declared with external linkage causing import errors if certain other C libraries were imported before pandas. (:issue:`24113`)
.. _whatsnew_0.24.0.contributors:
diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst
index 7b4440148677b..43b42c5cb5648 100644
--- a/doc/source/whatsnew/v0.25.0.rst
+++ b/doc/source/whatsnew/v0.25.0.rst
@@ -36,7 +36,7 @@ Enhancements
Groupby aggregation with relabeling
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-Pandas has added special groupby behavior, known as "named aggregation", for naming the
+pandas has added special groupby behavior, known as "named aggregation", for naming the
output columns when applying multiple aggregation functions to specific columns (:issue:`18366`, :issue:`26512`).
.. ipython:: python
@@ -53,7 +53,7 @@ output columns when applying multiple aggregation functions to specific columns
Pass the desired columns names as the ``**kwargs`` to ``.agg``. The values of ``**kwargs``
should be tuples where the first element is the column selection, and the second element is the
-aggregation function to apply. Pandas provides the ``pandas.NamedAgg`` namedtuple to make it clearer
+aggregation function to apply. pandas provides the ``pandas.NamedAgg`` namedtuple to make it clearer
what the arguments to the function are, but plain tuples are accepted as well.
.. ipython:: python
@@ -425,7 +425,7 @@ of ``object`` dtype. :attr:`Series.str` will now infer the dtype data *within* t
Categorical dtypes are preserved during groupby
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-Previously, columns that were categorical, but not the groupby key(s) would be converted to ``object`` dtype during groupby operations. Pandas now will preserve these dtypes. (:issue:`18502`)
+Previously, columns that were categorical, but not the groupby key(s) would be converted to ``object`` dtype during groupby operations. pandas now will preserve these dtypes. (:issue:`18502`)
.. ipython:: python
@@ -545,14 +545,14 @@ with :attr:`numpy.nan` in the case of an empty :class:`DataFrame` (:issue:`26397
``__str__`` methods now call ``__repr__`` rather than vice versa
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-Pandas has until now mostly defined string representations in a Pandas objects's
+pandas has until now mostly defined string representations in a pandas objects'
``__str__``/``__unicode__``/``__bytes__`` methods, and called ``__str__`` from the ``__repr__``
method, if a specific ``__repr__`` method is not found. This is not needed for Python3.
-In Pandas 0.25, the string representations of Pandas objects are now generally
+In pandas 0.25, the string representations of pandas objects are now generally
defined in ``__repr__``, and calls to ``__str__`` in general now pass the call on to
the ``__repr__``, if a specific ``__str__`` method doesn't exist, as is standard for Python.
-This change is backward compatible for direct usage of Pandas, but if you subclass
-Pandas objects *and* give your subclasses specific ``__str__``/``__repr__`` methods,
+This change is backward compatible for direct usage of pandas, but if you subclass
+pandas objects *and* give your subclasses specific ``__str__``/``__repr__`` methods,
you may have to adjust your ``__str__``/``__repr__`` methods (:issue:`26495`).
.. _whatsnew_0250.api_breaking.interval_indexing:
@@ -881,7 +881,7 @@ Other API changes
- Bug in :meth:`DatetimeIndex.snap` which didn't preserving the ``name`` of the input :class:`Index` (:issue:`25575`)
- The ``arg`` argument in :meth:`pandas.core.groupby.DataFrameGroupBy.agg` has been renamed to ``func`` (:issue:`26089`)
- The ``arg`` argument in :meth:`pandas.core.window._Window.aggregate` has been renamed to ``func`` (:issue:`26372`)
-- Most Pandas classes had a ``__bytes__`` method, which was used for getting a python2-style bytestring representation of the object. This method has been removed as a part of dropping Python2 (:issue:`26447`)
+- Most pandas classes had a ``__bytes__`` method, which was used for getting a python2-style bytestring representation of the object. This method has been removed as a part of dropping Python2 (:issue:`26447`)
- The ``.str``-accessor has been disabled for 1-level :class:`MultiIndex`, use :meth:`MultiIndex.to_flat_index` if necessary (:issue:`23679`)
- Removed support of gtk package for clipboards (:issue:`26563`)
- Using an unsupported version of Beautiful Soup 4 will now raise an ``ImportError`` instead of a ``ValueError`` (:issue:`27063`)
diff --git a/doc/source/whatsnew/v0.25.1.rst b/doc/source/whatsnew/v0.25.1.rst
index 2a2b511356a69..8a16bab63f1bf 100644
--- a/doc/source/whatsnew/v0.25.1.rst
+++ b/doc/source/whatsnew/v0.25.1.rst
@@ -10,7 +10,7 @@ I/O and LZMA
~~~~~~~~~~~~
Some users may unknowingly have an incomplete Python installation lacking the ``lzma`` module from the standard library. In this case, ``import pandas`` failed due to an ``ImportError`` (:issue:`27575`).
-Pandas will now warn, rather than raising an ``ImportError`` if the ``lzma`` module is not present. Any subsequent attempt to use ``lzma`` methods will raise a ``RuntimeError``.
+pandas will now warn, rather than raising an ``ImportError`` if the ``lzma`` module is not present. Any subsequent attempt to use ``lzma`` methods will raise a ``RuntimeError``.
A possible fix for the lack of the ``lzma`` module is to ensure you have the necessary libraries and then re-install Python.
For example, on MacOS installing Python with ``pyenv`` may lead to an incomplete Python installation due to unmet system dependencies at compilation time (like ``xz``). Compilation will succeed, but Python might fail at run time. The issue can be solved by installing the necessary dependencies and then re-installing Python.
diff --git a/doc/source/whatsnew/v0.25.2.rst b/doc/source/whatsnew/v0.25.2.rst
index c0c68ce4b1f44..a5ea8933762ab 100644
--- a/doc/source/whatsnew/v0.25.2.rst
+++ b/doc/source/whatsnew/v0.25.2.rst
@@ -8,7 +8,7 @@ including other versions of pandas.
.. note::
- Pandas 0.25.2 adds compatibility for Python 3.8 (:issue:`28147`).
+ pandas 0.25.2 adds compatibility for Python 3.8 (:issue:`28147`).
.. _whatsnew_0252.bug_fixes:
diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst
index 32175d344c320..ddc40d6d40594 100755
--- a/doc/source/whatsnew/v1.0.0.rst
+++ b/doc/source/whatsnew/v1.0.0.rst
@@ -18,7 +18,7 @@ including other versions of pandas.
New deprecation policy
~~~~~~~~~~~~~~~~~~~~~~
-Starting with Pandas 1.0.0, pandas will adopt a variant of `SemVer`_ to
+Starting with pandas 1.0.0, pandas will adopt a variant of `SemVer`_ to
version releases. Briefly,
* Deprecations will be introduced in minor releases (e.g. 1.1.0, 1.2.0, 2.1.0, ...)
@@ -676,7 +676,7 @@ depending on how the results are cast back to the original dtype.
Increased minimum version for Python
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-Pandas 1.0.0 supports Python 3.6.1 and higher (:issue:`29212`).
+pandas 1.0.0 supports Python 3.6.1 and higher (:issue:`29212`).
.. _whatsnew_100.api_breaking.deps:
@@ -749,7 +749,7 @@ See :ref:`install.dependencies` and :ref:`install.optional_dependencies` for mor
Build changes
^^^^^^^^^^^^^
-Pandas has added a `pyproject.toml <https://www.python.org/dev/peps/pep-0517/>`_ file and will no longer include
+pandas has added a `pyproject.toml <https://www.python.org/dev/peps/pep-0517/>`_ file and will no longer include
cythonized files in the source distribution uploaded to PyPI (:issue:`28341`, :issue:`20775`). If you're installing
a built distribution (wheel) or via conda, this shouldn't have any effect on you. If you're building pandas from
source, you should no longer need to install Cython into your build environment before calling ``pip install pandas``.
@@ -763,7 +763,7 @@ Other API changes
- :class:`core.groupby.GroupBy.transform` now raises on invalid operation names (:issue:`27489`)
- :meth:`pandas.api.types.infer_dtype` will now return "integer-na" for integer and ``np.nan`` mix (:issue:`27283`)
- :meth:`MultiIndex.from_arrays` will no longer infer names from arrays if ``names=None`` is explicitly provided (:issue:`27292`)
-- In order to improve tab-completion, Pandas does not include most deprecated attributes when introspecting a pandas object using ``dir`` (e.g. ``dir(df)``).
+- In order to improve tab-completion, pandas does not include most deprecated attributes when introspecting a pandas object using ``dir`` (e.g. ``dir(df)``).
To see which attributes are excluded, see an object's ``_deprecations`` attribute, for example ``pd.DataFrame._deprecations`` (:issue:`28805`).
- The returned dtype of :func:`unique` now matches the input dtype. (:issue:`27874`)
- Changed the default configuration value for ``options.matplotlib.register_converters`` from ``True`` to ``"auto"`` (:issue:`18720`).
diff --git a/doc/source/whatsnew/v1.1.3.rst b/doc/source/whatsnew/v1.1.3.rst
index acf1dafc59885..af714b1bb2ab1 100644
--- a/doc/source/whatsnew/v1.1.3.rst
+++ b/doc/source/whatsnew/v1.1.3.rst
@@ -16,7 +16,7 @@ Enhancements
Added support for new Python version
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-Pandas 1.1.3 now supports Python 3.9 (:issue:`36296`).
+pandas 1.1.3 now supports Python 3.9 (:issue:`36296`).
Development Changes
^^^^^^^^^^^^^^^^^^^
diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index cb0858fd678f8..2b63f6f99ba12 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -33,7 +33,7 @@ By default, duplicates continue to be allowed
pd.Series([1, 2], index=['a', 'a']).set_flags(allows_duplicate_labels=False)
-Pandas will propagate the ``allows_duplicate_labels`` property through many operations.
+pandas will propagate the ``allows_duplicate_labels`` property through many operations.
.. ipython:: python
:okexcept:
@@ -175,7 +175,7 @@ Other enhancements
Increased minimum version for Python
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-Pandas 1.2.0 supports Python 3.7.1 and higher (:issue:`35214`).
+pandas 1.2.0 supports Python 3.7.1 and higher (:issue:`35214`).
.. _whatsnew_120.api_breaking.deps:
| changes references to the library, pandas, to match the standard lowercase spelling. This changes applicable `.rst` files under the `doc/source` path.
References such as Pandas, **pandas**, *pandas*, and ``pandas`` have been replaced with pandas.
- [x] closes #32316
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/36845 | 2020-10-04T00:34:52Z | 2020-10-04T20:13:32Z | 2020-10-04T20:13:32Z | 2020-10-04T23:16:35Z |
DOC: Typo fix | diff --git a/doc/source/user_guide/style.ipynb b/doc/source/user_guide/style.ipynb
index 77a1fef28f373..12dd72f761408 100644
--- a/doc/source/user_guide/style.ipynb
+++ b/doc/source/user_guide/style.ipynb
@@ -793,7 +793,7 @@
"source": [
"The next option you have are \"table styles\".\n",
"These are styles that apply to the table as a whole, but don't look at the data.\n",
- "Certain sytlings, including pseudo-selectors like `:hover` can only be used this way."
+ "Certain stylings, including pseudo-selectors like `:hover` can only be used this way."
]
},
{
| Noticed a minor typo when using the docs
| https://api.github.com/repos/pandas-dev/pandas/pulls/36844 | 2020-10-04T00:09:36Z | 2020-10-04T03:27:50Z | 2020-10-04T03:27:50Z | 2020-10-04T03:27:58Z |
REF: separate flex from non-flex DataFrame ops | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index 2f462b16ddf78..ae84ad08270f6 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -362,6 +362,7 @@ Numeric
- Bug in :class:`Series` flex arithmetic methods where the result when operating with a ``list``, ``tuple`` or ``np.ndarray`` would have an incorrect name (:issue:`36760`)
- Bug in :class:`IntegerArray` multiplication with ``timedelta`` and ``np.timedelta64`` objects (:issue:`36870`)
- Bug in :meth:`DataFrame.diff` with ``datetime64`` dtypes including ``NaT`` values failing to fill ``NaT`` results correctly (:issue:`32441`)
+- Bug in :class:`DataFrame` arithmetic ops incorrectly accepting keyword arguments (:issue:`36843`)
Conversion
^^^^^^^^^^
diff --git a/pandas/core/ops/__init__.py b/pandas/core/ops/__init__.py
index 0de842e8575af..b656aef64cde9 100644
--- a/pandas/core/ops/__init__.py
+++ b/pandas/core/ops/__init__.py
@@ -533,18 +533,13 @@ def _maybe_align_series_as_frame(frame: "DataFrame", series: "Series", axis: int
return type(frame)(rvalues, index=frame.index, columns=frame.columns)
-def arith_method_FRAME(cls: Type["DataFrame"], op, special: bool):
- # This is the only function where `special` can be either True or False
+def flex_arith_method_FRAME(cls: Type["DataFrame"], op, special: bool):
+ assert not special
op_name = _get_op_name(op, special)
default_axis = None if special else "columns"
na_op = get_array_op(op)
-
- if op_name in _op_descriptions:
- # i.e. include "add" but not "__add__"
- doc = _make_flex_doc(op_name, "dataframe")
- else:
- doc = _arith_doc_FRAME % op_name
+ doc = _make_flex_doc(op_name, "dataframe")
@Appender(doc)
def f(self, other, axis=default_axis, level=None, fill_value=None):
@@ -561,8 +556,6 @@ def f(self, other, axis=default_axis, level=None, fill_value=None):
axis = self._get_axis_number(axis) if axis is not None else 1
- # TODO: why are we passing flex=True instead of flex=not special?
- # 15 tests fail if we pass flex=not special instead
self, other = align_method_FRAME(self, other, axis, flex=True, level=level)
if isinstance(other, ABCDataFrame):
@@ -585,6 +578,29 @@ def f(self, other, axis=default_axis, level=None, fill_value=None):
return f
+def arith_method_FRAME(cls: Type["DataFrame"], op, special: bool):
+ assert special
+ op_name = _get_op_name(op, special)
+ doc = _arith_doc_FRAME % op_name
+
+ @Appender(doc)
+ def f(self, other):
+
+ if _should_reindex_frame_op(self, other, op, 1, 1, None, None):
+ return _frame_arith_method_with_reindex(self, other, op)
+
+ axis = 1 # only relevant for Series other case
+
+ self, other = align_method_FRAME(self, other, axis, flex=True, level=None)
+
+ new_data = dispatch_to_series(self, other, op, axis=axis)
+ return self._construct_result(new_data)
+
+ f.__name__ = op_name
+
+ return f
+
+
def flex_comp_method_FRAME(cls: Type["DataFrame"], op, special: bool):
assert not special # "special" also means "not flex"
op_name = _get_op_name(op, special)
@@ -616,7 +632,7 @@ def comp_method_FRAME(cls: Type["DataFrame"], op, special: bool):
def f(self, other):
axis = 1 # only relevant for Series other case
- self, other = align_method_FRAME(self, other, axis, level=None, flex=False)
+ self, other = align_method_FRAME(self, other, axis, flex=False, level=None)
# See GH#4537 for discussion of scalar op behavior
new_data = dispatch_to_series(self, other, op, axis=axis)
diff --git a/pandas/core/ops/methods.py b/pandas/core/ops/methods.py
index 05da378f8964d..86981f007a678 100644
--- a/pandas/core/ops/methods.py
+++ b/pandas/core/ops/methods.py
@@ -46,6 +46,7 @@ def _get_method_wrappers(cls):
from pandas.core.ops import (
arith_method_FRAME,
comp_method_FRAME,
+ flex_arith_method_FRAME,
flex_comp_method_FRAME,
flex_method_SERIES,
)
@@ -58,7 +59,7 @@ def _get_method_wrappers(cls):
comp_special = None
bool_special = None
elif issubclass(cls, ABCDataFrame):
- arith_flex = arith_method_FRAME
+ arith_flex = flex_arith_method_FRAME
comp_flex = flex_comp_method_FRAME
arith_special = arith_method_FRAME
comp_special = comp_method_FRAME
diff --git a/pandas/tests/frame/test_arithmetic.py b/pandas/tests/frame/test_arithmetic.py
index d9ef19e174700..94f813fd08128 100644
--- a/pandas/tests/frame/test_arithmetic.py
+++ b/pandas/tests/frame/test_arithmetic.py
@@ -1484,6 +1484,13 @@ def test_no_warning(self, all_arithmetic_operators):
df = pd.DataFrame({"A": [0.0, 0.0], "B": [0.0, None]})
b = df["B"]
with tm.assert_produces_warning(None):
+ getattr(df, all_arithmetic_operators)(b)
+
+ def test_dunder_methods_binary(self, all_arithmetic_operators):
+ # GH#??? frame.__foo__ should only accept one argument
+ df = pd.DataFrame({"A": [0.0, 0.0], "B": [0.0, None]})
+ b = df["B"]
+ with pytest.raises(TypeError, match="takes 2 positional arguments"):
getattr(df, all_arithmetic_operators)(b, 0)
diff --git a/pandas/tests/series/test_operators.py b/pandas/tests/series/test_operators.py
index a796023c75b78..df6b8187964e8 100644
--- a/pandas/tests/series/test_operators.py
+++ b/pandas/tests/series/test_operators.py
@@ -276,25 +276,12 @@ def test_scalar_na_logical_ops_corners_aligns(self):
expected = DataFrame(False, index=range(9), columns=["A"] + list(range(9)))
- result = d.__and__(s, axis="columns")
- tm.assert_frame_equal(result, expected)
-
- result = d.__and__(s, axis=1)
- tm.assert_frame_equal(result, expected)
-
result = s & d
tm.assert_frame_equal(result, expected)
result = d & s
tm.assert_frame_equal(result, expected)
- expected = (s & s).to_frame("A")
- result = d.__and__(s, axis="index")
- tm.assert_frame_equal(result, expected)
-
- result = d.__and__(s, axis=0)
- tm.assert_frame_equal(result, expected)
-
@pytest.mark.parametrize("op", [operator.and_, operator.or_, operator.xor])
def test_logical_ops_with_index(self, op):
# GH#22092, GH#19792
| - [x] closes #36796
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
Unless supporting this is intentional, in which case we should deprecate and explicitly implement flex-bool operations.
If/when this is done, we can stop passing `special` and even `cls`. | https://api.github.com/repos/pandas-dev/pandas/pulls/36843 | 2020-10-03T23:42:40Z | 2020-10-10T22:27:19Z | 2020-10-10T22:27:19Z | 2020-10-10T22:45:58Z |
BUG: Groupy dropped nan groups from result when grouping over single column | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index 7c850ffedfcab..1cb8710799d30 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -519,6 +519,7 @@ Groupby/resample/rolling
- Using :meth:`Rolling.var()` instead of :meth:`Rolling.std()` avoids numerical issues for :meth:`Rolling.corr()` when :meth:`Rolling.var()` is still within floating point precision while :meth:`Rolling.std()` is not (:issue:`31286`)
- Bug in :meth:`df.groupby(..).quantile() <pandas.core.groupby.DataFrameGroupBy.quantile>` and :meth:`df.resample(..).quantile() <pandas.core.resample.Resampler.quantile>` raised ``TypeError`` when values were of type ``Timedelta`` (:issue:`29485`)
- Bug in :meth:`Rolling.median` and :meth:`Rolling.quantile` returned wrong values for :class:`BaseIndexer` subclasses with non-monotonic starting or ending points for windows (:issue:`37153`)
+- Bug in :meth:`DataFrame.groupby` dropped ``nan`` groups from result with ``dropna=False`` when grouping over a single column (:issue:`35646`, :issue:`35542`)
Reshaping
^^^^^^^^^
diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx
index e493e5e9d41d3..0b0334d52c1e9 100644
--- a/pandas/_libs/lib.pyx
+++ b/pandas/_libs/lib.pyx
@@ -896,21 +896,28 @@ def indices_fast(ndarray index, const int64_t[:] labels, list keys,
if lab != cur:
if lab != -1:
- tup = PyTuple_New(k)
- for j in range(k):
- val = keys[j][sorted_labels[j][i - 1]]
- PyTuple_SET_ITEM(tup, j, val)
- Py_INCREF(val)
-
+ if k == 1:
+ # When k = 1 we do not want to return a tuple as key
+ tup = keys[0][sorted_labels[0][i - 1]]
+ else:
+ tup = PyTuple_New(k)
+ for j in range(k):
+ val = keys[j][sorted_labels[j][i - 1]]
+ PyTuple_SET_ITEM(tup, j, val)
+ Py_INCREF(val)
result[tup] = index[start:i]
start = i
cur = lab
- tup = PyTuple_New(k)
- for j in range(k):
- val = keys[j][sorted_labels[j][n - 1]]
- PyTuple_SET_ITEM(tup, j, val)
- Py_INCREF(val)
+ if k == 1:
+ # When k = 1 we do not want to return a tuple as key
+ tup = keys[0][sorted_labels[0][n - 1]]
+ else:
+ tup = PyTuple_New(k)
+ for j in range(k):
+ val = keys[j][sorted_labels[j][n - 1]]
+ PyTuple_SET_ITEM(tup, j, val)
+ Py_INCREF(val)
result[tup] = index[start:]
return result
diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py
index bca71b5c9646b..ccf23a6f24c42 100644
--- a/pandas/core/groupby/ops.py
+++ b/pandas/core/groupby/ops.py
@@ -229,12 +229,9 @@ def apply(self, f: F, data: FrameOrSeries, axis: int = 0):
@cache_readonly
def indices(self):
""" dict {group name -> group indices} """
- if len(self.groupings) == 1:
- return self.groupings[0].indices
- else:
- codes_list = [ping.codes for ping in self.groupings]
- keys = [ping.group_index for ping in self.groupings]
- return get_indexer_dict(codes_list, keys)
+ codes_list = [ping.codes for ping in self.groupings]
+ keys = [ping.group_index for ping in self.groupings]
+ return get_indexer_dict(codes_list, keys)
@property
def codes(self) -> List[np.ndarray]:
diff --git a/pandas/core/sorting.py b/pandas/core/sorting.py
index 2e32a7572adc7..e390229b5dcba 100644
--- a/pandas/core/sorting.py
+++ b/pandas/core/sorting.py
@@ -4,6 +4,7 @@
TYPE_CHECKING,
Callable,
DefaultDict,
+ Dict,
Iterable,
List,
Optional,
@@ -528,16 +529,22 @@ def get_flattened_list(
return [tuple(array) for array in arrays.values()]
-def get_indexer_dict(label_list, keys):
+def get_indexer_dict(
+ label_list: List[np.ndarray], keys: List["Index"]
+) -> Dict[Union[str, Tuple], np.ndarray]:
"""
Returns
-------
- dict
+ dict:
Labels mapped to indexers.
"""
shape = [len(x) for x in keys]
group_index = get_group_index(label_list, shape, sort=True, xnull=True)
+ if np.all(group_index == -1):
+ # When all keys are nan and dropna=True, indices_fast can't handle this
+ # and the return is empty anyway
+ return {}
ngroups = (
((group_index.size and group_index.max()) + 1)
if is_int64_overflow_possible(shape)
diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py
index 2563eeeb68672..a0c228200e73a 100644
--- a/pandas/tests/groupby/test_groupby.py
+++ b/pandas/tests/groupby/test_groupby.py
@@ -1298,6 +1298,13 @@ def test_groupby_nat_exclude():
grouped.get_group(pd.NaT)
+def test_groupby_two_group_keys_all_nan():
+ # GH #36842: Grouping over two group keys shouldn't raise an error
+ df = DataFrame({"a": [np.nan, np.nan], "b": [np.nan, np.nan], "c": [1, 2]})
+ result = df.groupby(["a", "b"]).indices
+ assert result == {}
+
+
def test_groupby_2d_malformed():
d = DataFrame(index=range(2))
d["group"] = ["g1", "g2"]
diff --git a/pandas/tests/groupby/test_groupby_dropna.py b/pandas/tests/groupby/test_groupby_dropna.py
index 29a8f883f0ff5..02ce4dcf2ae2b 100644
--- a/pandas/tests/groupby/test_groupby_dropna.py
+++ b/pandas/tests/groupby/test_groupby_dropna.py
@@ -2,7 +2,7 @@
import pytest
import pandas as pd
-import pandas.testing as tm
+import pandas._testing as tm
@pytest.mark.parametrize(
@@ -335,3 +335,21 @@ def test_groupby_apply_with_dropna_for_multi_index(dropna, data, selected_data,
expected = pd.DataFrame(selected_data, index=mi)
tm.assert_frame_equal(result, expected)
+
+
+def test_groupby_nan_included():
+ # GH 35646
+ data = {"group": ["g1", np.nan, "g1", "g2", np.nan], "B": [0, 1, 2, 3, 4]}
+ df = pd.DataFrame(data)
+ grouped = df.groupby("group", dropna=False)
+ result = grouped.indices
+ dtype = "int64"
+ expected = {
+ "g1": np.array([0, 2], dtype=dtype),
+ "g2": np.array([3], dtype=dtype),
+ np.nan: np.array([1, 4], dtype=dtype),
+ }
+ for result_values, expected_values in zip(result.values(), expected.values()):
+ tm.assert_numpy_array_equal(result_values, expected_values)
+ assert np.isnan(list(result.keys())[2])
+ assert list(result.keys())[0:2] == ["g1", "g2"]
diff --git a/pandas/tests/window/test_rolling.py b/pandas/tests/window/test_rolling.py
index 2c8439aae75e5..02bcfab8d3388 100644
--- a/pandas/tests/window/test_rolling.py
+++ b/pandas/tests/window/test_rolling.py
@@ -1087,3 +1087,18 @@ def test_rolling_corr_timedelta_index(index, window):
result = x.rolling(window).corr(y)
expected = Series([np.nan, np.nan, 1, 1, 1], index=index)
tm.assert_almost_equal(result, expected)
+
+
+def test_groupby_rolling_nan_included():
+ # GH 35542
+ data = {"group": ["g1", np.nan, "g1", "g2", np.nan], "B": [0, 1, 2, 3, 4]}
+ df = DataFrame(data)
+ result = df.groupby("group", dropna=False).rolling(1, min_periods=1).mean()
+ expected = DataFrame(
+ {"B": [0.0, 2.0, 3.0, 1.0, 4.0]},
+ index=pd.MultiIndex.from_tuples(
+ [("g1", 0), ("g1", 2), ("g2", 3), (np.nan, 1), (np.nan, 4)],
+ names=["group", None],
+ ),
+ )
+ tm.assert_frame_equal(result, expected)
| - [x] closes #35646
- [x] closes #35542
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
When grouping over 2 columns with only ``nans`` this raised an error previously. Realised this, when running tests for my fix, because this case was tests with one grouping column. Added test to cover fixed behavior for more than one column.
cc @mroeschke Takes same path as two or more grouping columns now. | https://api.github.com/repos/pandas-dev/pandas/pulls/36842 | 2020-10-03T23:31:55Z | 2020-11-04T02:59:03Z | 2020-11-04T02:59:03Z | 2020-11-04T21:12:38Z |
TST: Verify parsing of data with encoded special characters (16218) | diff --git a/pandas/tests/io/parser/test_encoding.py b/pandas/tests/io/parser/test_encoding.py
index f23b498c7388a..876696ecdad9c 100644
--- a/pandas/tests/io/parser/test_encoding.py
+++ b/pandas/tests/io/parser/test_encoding.py
@@ -10,7 +10,7 @@
import numpy as np
import pytest
-from pandas import DataFrame
+from pandas import DataFrame, read_csv
import pandas._testing as tm
@@ -199,3 +199,17 @@ def test_encoding_named_temp_file(all_parsers):
result = parser.read_csv(f, encoding=encoding)
tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.parametrize(
+ "encoding", ["utf-8", "utf-16", "utf-16-be", "utf-16-le", "utf-32"]
+)
+def test_parse_encoded_special_characters(encoding):
+ # GH16218 Verify parsing of data with encoded special characters
+ # Data contains a Unicode 'FULLWIDTH COLON' (U+FF1A) at position (0,"a")
+ data = "a\tb\n:foo\t0\nbar\t1\nbaz\t2"
+ encoded_data = BytesIO(data.encode(encoding))
+ result = read_csv(encoded_data, delimiter="\t", encoding=encoding)
+
+ expected = DataFrame(data=[[":foo", 0], ["bar", 1], ["baz", 2]], columns=["a", "b"])
+ tm.assert_frame_equal(result, expected)
|
- [x] closes #16218
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/36841 | 2020-10-03T22:23:18Z | 2020-10-12T06:38:30Z | 2020-10-12T06:38:30Z | 2020-10-12T06:38:38Z |
PERF: Index._shallow_copy shares _cache with copies of self | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index 47ebd962b367c..81e310a2c44f1 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -285,6 +285,8 @@ Performance improvements
- ``Styler`` uuid method altered to compress data transmission over web whilst maintaining reasonably low table collision probability (:issue:`36345`)
- Performance improvement in :meth:`pd.to_datetime` with non-ns time unit for ``float`` ``dtype`` columns (:issue:`20445`)
- Performance improvement in setting values on a :class:`IntervalArray` (:issue:`36310`)
+- The internal index method :meth:`~Index._shallow_copy` now makes the new index and original index share cached attributes,
+ avoiding creating these again, if created on either. This can speed up operations that depend on creating copies of existing indexes (:issue:`36840`)
- Performance improvement in :meth:`RollingGroupby.count` (:issue:`35625`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index d603797370ce3..4967e13a9855a 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -561,12 +561,12 @@ def _shallow_copy(self, values=None, name: Label = no_default):
name : Label, defaults to self.name
"""
name = self.name if name is no_default else name
- cache = self._cache.copy() if values is None else {}
- if values is None:
- values = self._values
- result = self._simple_new(values, name=name)
- result._cache = cache
+ if values is not None:
+ return self._simple_new(values, name=name)
+
+ result = self._simple_new(self._values, name=name)
+ result._cache = self._cache
return result
def is_(self, other) -> bool:
diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py
index 5128c644e6bcb..4440238dbd493 100644
--- a/pandas/core/indexes/datetimelike.py
+++ b/pandas/core/indexes/datetimelike.py
@@ -671,17 +671,15 @@ def _with_freq(self, freq):
def _shallow_copy(self, values=None, name: Label = lib.no_default):
name = self.name if name is lib.no_default else name
- cache = self._cache.copy() if values is None else {}
- if values is None:
- values = self._data
-
- if isinstance(values, np.ndarray):
+ if values is not None:
# TODO: We would rather not get here
- values = type(self._data)(values, dtype=self.dtype)
+ if isinstance(values, np.ndarray):
+ values = type(self._data)(values, dtype=self.dtype)
+ return self._simple_new(values, name=name)
- result = type(self)._simple_new(values, name=name)
- result._cache = cache
+ result = self._simple_new(self._data, name=name)
+ result._cache = self._cache
return result
# --------------------------------------------------------------------
diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py
index a56f6a5bb0340..4a877621a94c2 100644
--- a/pandas/core/indexes/interval.py
+++ b/pandas/core/indexes/interval.py
@@ -335,12 +335,12 @@ def _shallow_copy(
self, values: Optional[IntervalArray] = None, name: Label = lib.no_default
):
name = self.name if name is lib.no_default else name
- cache = self._cache.copy() if values is None else {}
- if values is None:
- values = self._data
- result = self._simple_new(values, name=name)
- result._cache = cache
+ if values is not None:
+ return self._simple_new(values, name=name)
+
+ result = self._simple_new(self._data, name=name)
+ result._cache = self._cache
return result
@cache_readonly
diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py
index 27b60747015de..adf7a75b33b38 100644
--- a/pandas/core/indexes/period.py
+++ b/pandas/core/indexes/period.py
@@ -260,12 +260,12 @@ def _has_complex_internals(self) -> bool:
def _shallow_copy(self, values=None, name: Label = no_default):
name = name if name is not no_default else self.name
- cache = self._cache.copy() if values is None else {}
- if values is None:
- values = self._data
- result = self._simple_new(values, name=name)
- result._cache = cache
+ if values is not None:
+ return self._simple_new(values, name=name)
+
+ result = self._simple_new(self._data, name=name)
+ result._cache = self._cache
return result
def _maybe_convert_timedelta(self, other):
diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py
index 4dffda2605ef7..d5d9a9b5bc0a3 100644
--- a/pandas/core/indexes/range.py
+++ b/pandas/core/indexes/range.py
@@ -397,13 +397,13 @@ def __iter__(self):
def _shallow_copy(self, values=None, name: Label = no_default):
name = self.name if name is no_default else name
- if values is None:
- result = self._simple_new(self._range, name=name)
- result._cache = self._cache.copy()
- return result
- else:
+ if values is not None:
return Int64Index._simple_new(values, name=name)
+ result = self._simple_new(self._range, name=name)
+ result._cache = self._cache
+ return result
+
@doc(Int64Index.copy)
def copy(self, name=None, deep=False, dtype=None, names=None):
name = self._validate_names(name=name, names=names, deep=deep)[0]
diff --git a/pandas/tests/base/test_misc.py b/pandas/tests/base/test_misc.py
index b8468a5acf277..2dc2fe6d2ad07 100644
--- a/pandas/tests/base/test_misc.py
+++ b/pandas/tests/base/test_misc.py
@@ -128,7 +128,8 @@ def test_memory_usage(index_or_series_obj):
)
if len(obj) == 0:
- assert res_deep == res == 0
+ expected = 0 if isinstance(obj, Index) else 80
+ assert res_deep == res == expected
elif is_object or is_categorical:
# only deep will pick them up
assert res_deep > res
diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py
index c40f7b1bc2120..73d2e99d3ff5e 100644
--- a/pandas/tests/indexes/common.py
+++ b/pandas/tests/indexes/common.py
@@ -935,28 +935,22 @@ def test_contains_requires_hashable_raises(self):
with pytest.raises(TypeError, match=msg):
{} in idx._engine
- def test_copy_copies_cache(self):
- # GH32898
+ def test_copy_shares_cache(self):
+ # GH32898, GH36840
idx = self.create_index()
idx.get_loc(idx[0]) # populates the _cache.
copy = idx.copy()
- # check that the copied cache is a copy of the original
- assert idx._cache == copy._cache
- assert idx._cache is not copy._cache
- # cache values should reference the same object
- for key, val in idx._cache.items():
- assert copy._cache[key] is val, key
+ assert copy._cache is idx._cache
- def test_shallow_copy_copies_cache(self):
- # GH32669
+ def test_shallow_copy_shares_cache(self):
+ # GH32669, GH36840
idx = self.create_index()
idx.get_loc(idx[0]) # populates the _cache.
shallow_copy = idx._shallow_copy()
- # check that the shallow_copied cache is a copy of the original
- assert idx._cache == shallow_copy._cache
- assert idx._cache is not shallow_copy._cache
- # cache values should reference the same object
- for key, val in idx._cache.items():
- assert shallow_copy._cache[key] is val, key
+ assert shallow_copy._cache is idx._cache
+
+ shallow_copy = idx._shallow_copy(idx._data)
+ assert shallow_copy._cache is not idx._cache
+ assert shallow_copy._cache == {}
| #32640 copied the cache when copying indexes. Indexes are immutable, so some refactoring allows us to __share__ the cache instead. This means potentially fewer expensive index ops.
```python
>>> idx = pd.CategoricalIndex(np.arange(100_000))
>>> copied = idx.copy()
>>> copied._cache
{}
>>> idx.get_loc(99_999)
>>> copied._cache
{'_engine': <pandas._libs.index.Int32Engine at 0x1fd3a6934c8>} # don't need to recreate _engine on copied
```
Performance example:
```python
>>> idx = pd.CategoricalIndex(np.arange(100_000))
>>> %timeit idx._shallow_copy().get_loc(99_999)
4.09 ms ± 65 µs per loop # master
7.71 µs ± 355 ns per loop # this PR
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/36840 | 2020-10-03T22:20:37Z | 2020-10-06T15:33:14Z | 2020-10-06T15:33:14Z | 2020-11-28T18:25:04Z |
PERF: Add asv benchmarks for select_dtypes (14588) | diff --git a/asv_bench/benchmarks/dtypes.py b/asv_bench/benchmarks/dtypes.py
index bd17b710b108d..a5ed5c389fee4 100644
--- a/asv_bench/benchmarks/dtypes.py
+++ b/asv_bench/benchmarks/dtypes.py
@@ -1,5 +1,9 @@
+import string
+
import numpy as np
+from pandas import DataFrame
+import pandas._testing as tm
from pandas.api.types import pandas_dtype
from .pandas_vb_common import (
@@ -62,4 +66,57 @@ def time_infer(self, dtype):
lib.infer_dtype(self.data_dict[dtype], skipna=False)
+class SelectDtypes:
+
+ params = [
+ tm.ALL_INT_DTYPES
+ + tm.ALL_EA_INT_DTYPES
+ + tm.FLOAT_DTYPES
+ + tm.COMPLEX_DTYPES
+ + tm.DATETIME64_DTYPES
+ + tm.TIMEDELTA64_DTYPES
+ + tm.BOOL_DTYPES
+ ]
+ param_names = ["dtype"]
+
+ def setup(self, dtype):
+ N, K = 5000, 50
+ self.index = tm.makeStringIndex(N)
+ self.columns = tm.makeStringIndex(K)
+
+ def create_df(data):
+ return DataFrame(data, index=self.index, columns=self.columns)
+
+ self.df_int = create_df(np.random.randint(low=100, size=(N, K)))
+ self.df_float = create_df(np.random.randn(N, K))
+ self.df_bool = create_df(np.random.choice([True, False], size=(N, K)))
+ self.df_string = create_df(
+ np.random.choice(list(string.ascii_letters), size=(N, K))
+ )
+
+ def time_select_dtype_int_include(self, dtype):
+ self.df_int.select_dtypes(include=dtype)
+
+ def time_select_dtype_int_exclude(self, dtype):
+ self.df_int.select_dtypes(exclude=dtype)
+
+ def time_select_dtype_float_include(self, dtype):
+ self.df_float.select_dtypes(include=dtype)
+
+ def time_select_dtype_float_exclude(self, dtype):
+ self.df_float.select_dtypes(exclude=dtype)
+
+ def time_select_dtype_bool_include(self, dtype):
+ self.df_bool.select_dtypes(include=dtype)
+
+ def time_select_dtype_bool_exclude(self, dtype):
+ self.df_bool.select_dtypes(exclude=dtype)
+
+ def time_select_dtype_string_include(self, dtype):
+ self.df_string.select_dtypes(include=dtype)
+
+ def time_select_dtype_string_exclude(self, dtype):
+ self.df_string.select_dtypes(exclude=dtype)
+
+
from .pandas_vb_common import setup # noqa: F401 isort:skip
| - [x] closes #14588
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/36839 | 2020-10-03T20:31:48Z | 2020-10-05T17:41:30Z | 2020-10-05T17:41:30Z | 2020-10-05T17:41:34Z |
DEPR: Deprecate use of strings denoting units with 'M', 'Y' or 'y' in pd.to_timedelta (36666) | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index d10cb28a3f588..4e0c1ae01fdf3 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -336,6 +336,7 @@ Deprecations
- :meth:`Rolling.count` with ``min_periods=None`` will default to the size of the window in a future version (:issue:`31302`)
- Deprecated slice-indexing on timezone-aware :class:`DatetimeIndex` with naive ``datetime`` objects, to match scalar indexing behavior (:issue:`36148`)
- :meth:`Index.ravel` returning a ``np.ndarray`` is deprecated, in the future this will return a view on the same index (:issue:`19956`)
+- Deprecate use of strings denoting units with 'M', 'Y' or 'y' in :func:`~pandas.to_timedelta` (:issue:`36666`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx
index e21526a8f69e4..45f32d92c7a74 100644
--- a/pandas/_libs/tslibs/timedeltas.pyx
+++ b/pandas/_libs/tslibs/timedeltas.pyx
@@ -1,4 +1,5 @@
import collections
+import warnings
import cython
@@ -466,6 +467,15 @@ cdef inline timedelta_from_spec(object number, object frac, object unit):
try:
unit = ''.join(unit)
+
+ if unit in ["M", "Y", "y"]:
+ warnings.warn(
+ "Units 'M', 'Y' and 'y' do not represent unambiguous "
+ "timedelta values and will be removed in a future version",
+ FutureWarning,
+ stacklevel=2,
+ )
+
if unit == 'M':
# To parse ISO 8601 string, 'M' should be treated as minute,
# not month
@@ -634,9 +644,11 @@ cdef inline int64_t parse_iso_format_string(str ts) except? -1:
else:
neg = 1
elif c in ['W', 'D', 'H', 'M']:
- unit.append(c)
if c in ['H', 'M'] and len(number) > 2:
raise ValueError(err_msg)
+ if c == 'M':
+ c = 'min'
+ unit.append(c)
r = timedelta_from_spec(number, '0', unit)
result += timedelta_as_neg(r, neg)
diff --git a/pandas/core/tools/timedeltas.py b/pandas/core/tools/timedeltas.py
index 372eac29bad9e..e8faebd6b2542 100644
--- a/pandas/core/tools/timedeltas.py
+++ b/pandas/core/tools/timedeltas.py
@@ -25,10 +25,12 @@ def to_timedelta(arg, unit=None, errors="raise"):
Parameters
----------
arg : str, timedelta, list-like or Series
- The data to be converted to timedelta. The character M by itself,
- e.g. '1M', is treated as minute, not month. The characters Y and y
- are treated as the mean length of the Gregorian calendar year -
- 365.2425 days or 365 days 5 hours 49 minutes 12 seconds.
+ The data to be converted to timedelta.
+
+ .. deprecated:: 1.2
+ Strings with units 'M', 'Y' and 'y' do not represent
+ unambiguous timedelta values and will be removed in a future version
+
unit : str, optional
Denotes the unit of the arg for numeric `arg`. Defaults to ``"ns"``.
diff --git a/pandas/tests/indexes/interval/test_interval.py b/pandas/tests/indexes/interval/test_interval.py
index 1fbbb12b64dc5..67e031b53e44e 100644
--- a/pandas/tests/indexes/interval/test_interval.py
+++ b/pandas/tests/indexes/interval/test_interval.py
@@ -86,7 +86,7 @@ def test_properties(self, closed):
[1, 1, 2, 5, 15, 53, 217, 1014, 5335, 31240, 201608],
[-np.inf, -100, -10, 0.5, 1, 1.5, 3.8, 101, 202, np.inf],
pd.to_datetime(["20170101", "20170202", "20170303", "20170404"]),
- pd.to_timedelta(["1ns", "2ms", "3s", "4M", "5H", "6D"]),
+ pd.to_timedelta(["1ns", "2ms", "3s", "4min", "5H", "6D"]),
],
)
def test_length(self, closed, breaks):
diff --git a/pandas/tests/scalar/interval/test_interval.py b/pandas/tests/scalar/interval/test_interval.py
index 8ad9a2c7a9c70..5071c5cdec6c8 100644
--- a/pandas/tests/scalar/interval/test_interval.py
+++ b/pandas/tests/scalar/interval/test_interval.py
@@ -79,8 +79,8 @@ def test_hash(self, interval):
(-np.inf, np.inf, np.inf),
(Timedelta("0 days"), Timedelta("5 days"), Timedelta("5 days")),
(Timedelta("10 days"), Timedelta("10 days"), Timedelta("0 days")),
- (Timedelta("1H10M"), Timedelta("5H5M"), Timedelta("3H55M")),
- (Timedelta("5S"), Timedelta("1H"), Timedelta("59M55S")),
+ (Timedelta("1H10min"), Timedelta("5H5min"), Timedelta("3H55min")),
+ (Timedelta("5S"), Timedelta("1H"), Timedelta("59min55S")),
],
)
def test_length(self, left, right, expected):
diff --git a/pandas/tests/scalar/timedelta/test_timedelta.py b/pandas/tests/scalar/timedelta/test_timedelta.py
index a01921bd6c4c2..89b45b7266daa 100644
--- a/pandas/tests/scalar/timedelta/test_timedelta.py
+++ b/pandas/tests/scalar/timedelta/test_timedelta.py
@@ -160,108 +160,117 @@ def test_nat_converters(self):
assert result.astype("int64") == iNaT
@pytest.mark.parametrize(
- "units, np_unit",
- [
- (["W", "w"], "W"),
- (["D", "d", "days", "day", "Days", "Day"], "D"),
- (
- ["m", "minute", "min", "minutes", "t", "Minute", "Min", "Minutes", "T"],
+ "unit, np_unit",
+ [(value, "W") for value in ["W", "w"]]
+ + [(value, "D") for value in ["D", "d", "days", "day", "Days", "Day"]]
+ + [
+ (value, "m")
+ for value in [
"m",
- ),
- (["s", "seconds", "sec", "second", "S", "Seconds", "Sec", "Second"], "s"),
- (
- [
- "ms",
- "milliseconds",
- "millisecond",
- "milli",
- "millis",
- "l",
- "MS",
- "Milliseconds",
- "Millisecond",
- "Milli",
- "Millis",
- "L",
- ],
+ "minute",
+ "min",
+ "minutes",
+ "t",
+ "Minute",
+ "Min",
+ "Minutes",
+ "T",
+ ]
+ ]
+ + [
+ (value, "s")
+ for value in [
+ "s",
+ "seconds",
+ "sec",
+ "second",
+ "S",
+ "Seconds",
+ "Sec",
+ "Second",
+ ]
+ ]
+ + [
+ (value, "ms")
+ for value in [
"ms",
- ),
- (
- [
- "us",
- "microseconds",
- "microsecond",
- "micro",
- "micros",
- "u",
- "US",
- "Microseconds",
- "Microsecond",
- "Micro",
- "Micros",
- "U",
- ],
+ "milliseconds",
+ "millisecond",
+ "milli",
+ "millis",
+ "l",
+ "MS",
+ "Milliseconds",
+ "Millisecond",
+ "Milli",
+ "Millis",
+ "L",
+ ]
+ ]
+ + [
+ (value, "us")
+ for value in [
"us",
- ),
- (
- [
- "ns",
- "nanoseconds",
- "nanosecond",
- "nano",
- "nanos",
- "n",
- "NS",
- "Nanoseconds",
- "Nanosecond",
- "Nano",
- "Nanos",
- "N",
- ],
+ "microseconds",
+ "microsecond",
+ "micro",
+ "micros",
+ "u",
+ "US",
+ "Microseconds",
+ "Microsecond",
+ "Micro",
+ "Micros",
+ "U",
+ ]
+ ]
+ + [
+ (value, "ns")
+ for value in [
"ns",
- ),
+ "nanoseconds",
+ "nanosecond",
+ "nano",
+ "nanos",
+ "n",
+ "NS",
+ "Nanoseconds",
+ "Nanosecond",
+ "Nano",
+ "Nanos",
+ "N",
+ ]
],
)
@pytest.mark.parametrize("wrapper", [np.array, list, pd.Index])
- def test_unit_parser(self, units, np_unit, wrapper):
+ def test_unit_parser(self, unit, np_unit, wrapper):
# validate all units, GH 6855, GH 21762
- for unit in units:
- # array-likes
- expected = TimedeltaIndex(
- [np.timedelta64(i, np_unit) for i in np.arange(5).tolist()]
- )
- result = to_timedelta(wrapper(range(5)), unit=unit)
- tm.assert_index_equal(result, expected)
- result = TimedeltaIndex(wrapper(range(5)), unit=unit)
- tm.assert_index_equal(result, expected)
-
- if unit == "M":
- # M is treated as minutes in string repr
- expected = TimedeltaIndex(
- [np.timedelta64(i, "m") for i in np.arange(5).tolist()]
- )
-
- str_repr = [f"{x}{unit}" for x in np.arange(5)]
- result = to_timedelta(wrapper(str_repr))
- tm.assert_index_equal(result, expected)
- result = TimedeltaIndex(wrapper(str_repr))
- tm.assert_index_equal(result, expected)
-
- # scalar
- expected = Timedelta(np.timedelta64(2, np_unit).astype("timedelta64[ns]"))
-
- result = to_timedelta(2, unit=unit)
- assert result == expected
- result = Timedelta(2, unit=unit)
- assert result == expected
-
- if unit == "M":
- expected = Timedelta(np.timedelta64(2, "m").astype("timedelta64[ns]"))
-
- result = to_timedelta(f"2{unit}")
- assert result == expected
- result = Timedelta(f"2{unit}")
- assert result == expected
+ # array-likes
+ expected = TimedeltaIndex(
+ [np.timedelta64(i, np_unit) for i in np.arange(5).tolist()]
+ )
+ result = to_timedelta(wrapper(range(5)), unit=unit)
+ tm.assert_index_equal(result, expected)
+ result = TimedeltaIndex(wrapper(range(5)), unit=unit)
+ tm.assert_index_equal(result, expected)
+
+ str_repr = [f"{x}{unit}" for x in np.arange(5)]
+ result = to_timedelta(wrapper(str_repr))
+ tm.assert_index_equal(result, expected)
+ result = to_timedelta(wrapper(str_repr))
+ tm.assert_index_equal(result, expected)
+
+ # scalar
+ expected = Timedelta(np.timedelta64(2, np_unit).astype("timedelta64[ns]"))
+ result = to_timedelta(2, unit=unit)
+ assert result == expected
+ result = Timedelta(2, unit=unit)
+ assert result == expected
+
+ result = to_timedelta(f"2{unit}")
+ assert result == expected
+ result = Timedelta(f"2{unit}")
+ assert result == expected
@pytest.mark.parametrize("unit", ["Y", "y", "M"])
def test_unit_m_y_raises(self, unit):
diff --git a/pandas/tests/series/methods/test_shift.py b/pandas/tests/series/methods/test_shift.py
index 4b23820caeeb4..d38d70abba923 100644
--- a/pandas/tests/series/methods/test_shift.py
+++ b/pandas/tests/series/methods/test_shift.py
@@ -32,7 +32,7 @@ def test_shift_always_copy(self, ser, shift_size):
# GH22397
assert ser.shift(shift_size) is not ser
- @pytest.mark.parametrize("move_by_freq", [pd.Timedelta("1D"), pd.Timedelta("1M")])
+ @pytest.mark.parametrize("move_by_freq", [pd.Timedelta("1D"), pd.Timedelta("1min")])
def test_datetime_shift_always_copy(self, move_by_freq):
# GH#22397
ser = Series(range(5), index=date_range("2017", periods=5))
diff --git a/pandas/tests/tools/test_to_timedelta.py b/pandas/tests/tools/test_to_timedelta.py
index f68d83f7f4d58..8e48295c533cc 100644
--- a/pandas/tests/tools/test_to_timedelta.py
+++ b/pandas/tests/tools/test_to_timedelta.py
@@ -121,6 +121,27 @@ def test_to_timedelta_invalid(self):
invalid_data, to_timedelta(invalid_data, errors="ignore")
)
+ @pytest.mark.parametrize(
+ "val, warning",
+ [
+ ("1M", FutureWarning),
+ ("1 M", FutureWarning),
+ ("1Y", FutureWarning),
+ ("1 Y", FutureWarning),
+ ("1y", FutureWarning),
+ ("1 y", FutureWarning),
+ ("1m", None),
+ ("1 m", None),
+ ("1 day", None),
+ ("2day", None),
+ ],
+ )
+ def test_unambiguous_timedelta_values(self, val, warning):
+ # GH36666 Deprecate use of strings denoting units with 'M', 'Y', 'm' or 'y'
+ # in pd.to_timedelta
+ with tm.assert_produces_warning(warning, check_stacklevel=False):
+ to_timedelta(val)
+
def test_to_timedelta_via_apply(self):
# GH 5458
expected = Series([np.timedelta64(1, "s")])
| - [x] closes #36666
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/36838 | 2020-10-03T17:35:09Z | 2020-10-31T18:52:07Z | 2020-10-31T18:52:07Z | 2020-10-31T18:52:13Z |
DOC: Update docs on estimated time of running full asv suite (36344) | diff --git a/doc/source/development/contributing.rst b/doc/source/development/contributing.rst
index d6955c5d4b8d2..e96da41918259 100644
--- a/doc/source/development/contributing.rst
+++ b/doc/source/development/contributing.rst
@@ -1362,16 +1362,16 @@ environments. If you want to use virtualenv instead, write::
The ``-E virtualenv`` option should be added to all ``asv`` commands
that run benchmarks. The default value is defined in ``asv.conf.json``.
-Running the full test suite can take up to one hour and use up to 3GB of RAM.
-Usually it is sufficient to paste only a subset of the results into the pull
-request to show that the committed changes do not cause unexpected performance
-regressions. You can run specific benchmarks using the ``-b`` flag, which
-takes a regular expression. For example, this will only run tests from a
-``pandas/asv_bench/benchmarks/groupby.py`` file::
+Running the full benchmark suite can be an all-day process, depending on your
+hardware and its resource utilization. However, usually it is sufficient to paste
+only a subset of the results into the pull request to show that the committed changes
+do not cause unexpected performance regressions. You can run specific benchmarks
+using the ``-b`` flag, which takes a regular expression. For example, this will
+only run benchmarks from a ``pandas/asv_bench/benchmarks/groupby.py`` file::
asv continuous -f 1.1 upstream/master HEAD -b ^groupby
-If you want to only run a specific group of tests from a file, you can do it
+If you want to only run a specific group of benchmarks from a file, you can do it
using ``.`` as a separator. For example::
asv continuous -f 1.1 upstream/master HEAD -b groupby.GroupByMethods
| - [x] closes #36344
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/36836 | 2020-10-03T15:32:45Z | 2020-10-08T00:59:25Z | 2020-10-08T00:59:25Z | 2020-10-08T00:59:47Z |
DOC: update code style for remaining intro tutorial docs for #36777 computation.rst | diff --git a/doc/source/user_guide/computation.rst b/doc/source/user_guide/computation.rst
index e7edda90610b5..46531de2fd763 100644
--- a/doc/source/user_guide/computation.rst
+++ b/doc/source/user_guide/computation.rst
@@ -64,7 +64,7 @@ series in the DataFrame, also excluding NA/null values.
.. ipython:: python
frame = pd.DataFrame(np.random.randn(1000, 5),
- columns=['a', 'b', 'c', 'd', 'e'])
+ columns=["a", "b", "c", "d", "e"])
frame.cov()
``DataFrame.cov`` also supports an optional ``min_periods`` keyword that
@@ -73,9 +73,9 @@ in order to have a valid result.
.. ipython:: python
- frame = pd.DataFrame(np.random.randn(20, 3), columns=['a', 'b', 'c'])
- frame.loc[frame.index[:5], 'a'] = np.nan
- frame.loc[frame.index[5:10], 'b'] = np.nan
+ frame = pd.DataFrame(np.random.randn(20, 3), columns=["a", "b", "c"])
+ frame.loc[frame.index[:5], "a"] = np.nan
+ frame.loc[frame.index[5:10], "b"] = np.nan
frame.cov()
@@ -117,12 +117,12 @@ Wikipedia has articles covering the above correlation coefficients:
.. ipython:: python
frame = pd.DataFrame(np.random.randn(1000, 5),
- columns=['a', 'b', 'c', 'd', 'e'])
+ columns=["a", "b", "c", "d", "e"])
frame.iloc[::2] = np.nan
# Series with Series
- frame['a'].corr(frame['b'])
- frame['a'].corr(frame['b'], method='spearman')
+ frame["a"].corr(frame["b"])
+ frame["a"].corr(frame["b"], method="spearman")
# Pairwise correlation of DataFrame columns
frame.corr()
@@ -134,9 +134,9 @@ Like ``cov``, ``corr`` also supports the optional ``min_periods`` keyword:
.. ipython:: python
- frame = pd.DataFrame(np.random.randn(20, 3), columns=['a', 'b', 'c'])
- frame.loc[frame.index[:5], 'a'] = np.nan
- frame.loc[frame.index[5:10], 'b'] = np.nan
+ frame = pd.DataFrame(np.random.randn(20, 3), columns=["a", "b", "c"])
+ frame.loc[frame.index[:5], "a"] = np.nan
+ frame.loc[frame.index[5:10], "b"] = np.nan
frame.corr()
@@ -165,8 +165,8 @@ DataFrame objects.
.. ipython:: python
- index = ['a', 'b', 'c', 'd', 'e']
- columns = ['one', 'two', 'three', 'four']
+ index = ["a", "b", "c", "d", "e"]
+ columns = ["one", "two", "three", "four"]
df1 = pd.DataFrame(np.random.randn(5, 4), index=index, columns=columns)
df2 = pd.DataFrame(np.random.randn(4, 4), index=index[:4], columns=columns)
df1.corrwith(df2)
@@ -182,8 +182,8 @@ assigned the mean of the ranks (by default) for the group:
.. ipython:: python
- s = pd.Series(np.random.randn(5), index=list('abcde'))
- s['d'] = s['b'] # so there's a tie
+ s = pd.Series(np.random.randn(5), index=list("abcde"))
+ s["d"] = s["b"] # so there's a tie
s.rank()
:meth:`~DataFrame.rank` is also a DataFrame method and can rank either the rows
@@ -244,7 +244,7 @@ objects, :class:`~pandas.core.window.Rolling`, :class:`~pandas.core.window.Expan
.. ipython:: python
s = pd.Series(np.random.randn(1000),
- index=pd.date_range('1/1/2000', periods=1000))
+ index=pd.date_range("1/1/2000", periods=1000))
s = s.cumsum()
s
@@ -279,15 +279,15 @@ We can then call methods on these ``rolling`` objects. These return like-indexed
.. ipython:: python
- s.plot(style='k--')
+ s.plot(style="k--")
@savefig rolling_mean_ex.png
- r.mean().plot(style='k')
+ r.mean().plot(style="k")
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
They can also be applied to DataFrame objects. This is really just syntactic
sugar for applying the moving window operator to all of the DataFrame's columns:
@@ -295,8 +295,8 @@ sugar for applying the moving window operator to all of the DataFrame's columns:
.. ipython:: python
df = pd.DataFrame(np.random.randn(1000, 4),
- index=pd.date_range('1/1/2000', periods=1000),
- columns=['A', 'B', 'C', 'D'])
+ index=pd.date_range("1/1/2000", periods=1000),
+ columns=["A", "B", "C", "D"])
df = df.cumsum()
@savefig rolling_mean_frame.png
@@ -368,7 +368,7 @@ compute the mean absolute deviation on a rolling basis:
return np.fabs(x - x.mean()).mean()
@savefig rolling_apply_ex.png
- s.rolling(window=60).apply(mad, raw=True).plot(style='k')
+ s.rolling(window=60).apply(mad, raw=True).plot(style="k")
Using the Numba engine
~~~~~~~~~~~~~~~~~~~~~~
@@ -377,7 +377,7 @@ Using the Numba engine
Additionally, :meth:`~Rolling.apply` can leverage `Numba <https://numba.pydata.org/>`__
if installed as an optional dependency. The apply aggregation can be executed using Numba by specifying
-``engine='numba'`` and ``engine_kwargs`` arguments (``raw`` must also be set to ``True``).
+``engine="numba"`` and ``engine_kwargs`` arguments (``raw`` must also be set to ``True``).
Numba will be applied in potentially two routines:
1. If ``func`` is a standard Python function, the engine will `JIT <https://numba.pydata.org/numba-doc/latest/user/overview.html>`__
@@ -407,13 +407,13 @@ and their default values are set to ``False``, ``True`` and ``False`` respective
In [3]: def f(x):
...: return np.sum(x) + 5
# Run the first time, compilation time will affect performance
- In [4]: %timeit -r 1 -n 1 roll.apply(f, engine='numba', raw=True) # noqa: E225
+ In [4]: %timeit -r 1 -n 1 roll.apply(f, engine="numba", raw=True) # noqa: E225
1.23 s ± 0 ns per loop (mean ± std. dev. of 1 run, 1 loop each)
# Function is cached and performance will improve
- In [5]: %timeit roll.apply(f, engine='numba', raw=True)
+ In [5]: %timeit roll.apply(f, engine="numba", raw=True)
188 ms ± 1.93 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)
- In [6]: %timeit roll.apply(f, engine='cython', raw=True)
+ In [6]: %timeit roll.apply(f, engine="cython", raw=True)
3.92 s ± 59 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
.. _stats.rolling_window:
@@ -454,22 +454,22 @@ The list of recognized types are the `scipy.signal window functions
.. ipython:: python
ser = pd.Series(np.random.randn(10),
- index=pd.date_range('1/1/2000', periods=10))
+ index=pd.date_range("1/1/2000", periods=10))
- ser.rolling(window=5, win_type='triang').mean()
+ ser.rolling(window=5, win_type="triang").mean()
Note that the ``boxcar`` window is equivalent to :meth:`~Rolling.mean`.
.. ipython:: python
- ser.rolling(window=5, win_type='boxcar').mean()
+ ser.rolling(window=5, win_type="boxcar").mean()
ser.rolling(window=5).mean()
For some windowing functions, additional parameters must be specified:
.. ipython:: python
- ser.rolling(window=5, win_type='gaussian').mean(std=0.1)
+ ser.rolling(window=5, win_type="gaussian").mean(std=0.1)
.. _stats.moments.normalization:
@@ -498,10 +498,10 @@ This can be particularly useful for a non-regular time frequency index.
.. ipython:: python
- dft = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]},
- index=pd.date_range('20130101 09:00:00',
+ dft = pd.DataFrame({"B": [0, 1, 2, np.nan, 4]},
+ index=pd.date_range("20130101 09:00:00",
periods=5,
- freq='s'))
+ freq="s"))
dft
This is a regular frequency index. Using an integer window parameter works to roll along the window frequency.
@@ -515,20 +515,24 @@ Specifying an offset allows a more intuitive specification of the rolling freque
.. ipython:: python
- dft.rolling('2s').sum()
+ dft.rolling("2s").sum()
Using a non-regular, but still monotonic index, rolling with an integer window does not impart any special calculation.
.. ipython:: python
- dft = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]},
- index=pd.Index([pd.Timestamp('20130101 09:00:00'),
- pd.Timestamp('20130101 09:00:02'),
- pd.Timestamp('20130101 09:00:03'),
- pd.Timestamp('20130101 09:00:05'),
- pd.Timestamp('20130101 09:00:06')],
- name='foo'))
+ dft = pd.DataFrame({"B": [0, 1, 2, np.nan, 4]},
+ index=pd.Index(
+ [
+ pd.Timestamp("20130101 09:00:00"),
+ pd.Timestamp("20130101 09:00:02"),
+ pd.Timestamp("20130101 09:00:03"),
+ pd.Timestamp("20130101 09:00:05"),
+ pd.Timestamp("20130101 09:00:06")
+ ],
+ name="foo")
+ )
dft
dft.rolling(2).sum()
@@ -537,7 +541,7 @@ Using the time-specification generates variable windows for this sparse data.
.. ipython:: python
- dft.rolling('2s').sum()
+ dft.rolling("2s").sum()
Furthermore, we now allow an optional ``on`` parameter to specify a column (rather than the
default of the index) in a DataFrame.
@@ -546,7 +550,7 @@ default of the index) in a DataFrame.
dft = dft.reset_index()
dft
- dft.rolling('2s', on='foo').sum()
+ dft.rolling("2s", on="foo").sum()
.. _stats.custom_rolling_window:
@@ -569,7 +573,7 @@ For example, if we have the following ``DataFrame``:
use_expanding = [True, False, True, False, True]
use_expanding
- df = pd.DataFrame({'values': range(5)})
+ df = pd.DataFrame({"values": range(5)})
df
and we want to use an expanding window where ``use_expanding`` is ``True`` otherwise a window of size
@@ -615,7 +619,8 @@ rolling operations over a non-fixed offset like a ``BusinessDay``.
.. ipython:: python
from pandas.api.indexers import VariableOffsetWindowIndexer
- df = pd.DataFrame(range(10), index=pd.date_range('2020', periods=10))
+
+ df = pd.DataFrame(range(10), index=pd.date_range("2020", periods=10))
offset = pd.offsets.BDay(1)
indexer = VariableOffsetWindowIndexer(index=df.index, offset=offset)
df
@@ -631,6 +636,7 @@ forward-looking rolling window, and we can use it as follows:
.. ipython:: ipython
from pandas.api.indexers import FixedForwardWindowIndexer
+
indexer = FixedForwardWindowIndexer(window_size=2)
df.rolling(indexer, min_periods=1).sum()
@@ -657,17 +663,21 @@ from present information back to past information. This allows the rolling windo
.. ipython:: python
- df = pd.DataFrame({'x': 1},
- index=[pd.Timestamp('20130101 09:00:01'),
- pd.Timestamp('20130101 09:00:02'),
- pd.Timestamp('20130101 09:00:03'),
- pd.Timestamp('20130101 09:00:04'),
- pd.Timestamp('20130101 09:00:06')])
-
- df["right"] = df.rolling('2s', closed='right').x.sum() # default
- df["both"] = df.rolling('2s', closed='both').x.sum()
- df["left"] = df.rolling('2s', closed='left').x.sum()
- df["neither"] = df.rolling('2s', closed='neither').x.sum()
+ df = pd.DataFrame(
+ {"x": 1},
+ index=[
+ pd.Timestamp("20130101 09:00:01"),
+ pd.Timestamp("20130101 09:00:02"),
+ pd.Timestamp("20130101 09:00:03"),
+ pd.Timestamp("20130101 09:00:04"),
+ pd.Timestamp("20130101 09:00:06")
+ ]
+ )
+
+ df["right"] = df.rolling("2s", closed="right").x.sum() # default
+ df["both"] = df.rolling("2s", closed="both").x.sum()
+ df["left"] = df.rolling("2s", closed="left").x.sum()
+ df["neither"] = df.rolling("2s", closed="neither").x.sum()
df
@@ -746,12 +756,12 @@ For example:
.. ipython:: python
df = pd.DataFrame(np.random.randn(1000, 4),
- index=pd.date_range('1/1/2000', periods=1000),
- columns=['A', 'B', 'C', 'D'])
+ index=pd.date_range("1/1/2000", periods=1000),
+ columns=["A", "B", "C", "D"])
df = df.cumsum()
df2 = df[:20]
- df2.rolling(window=5).corr(df2['B'])
+ df2.rolling(window=5).corr(df2["B"])
.. _stats.moments.corr_pairwise:
@@ -776,14 +786,14 @@ can even be omitted:
.. ipython:: python
- covs = (df[['B', 'C', 'D']].rolling(window=50)
- .cov(df[['A', 'B', 'C']], pairwise=True))
- covs.loc['2002-09-22':]
+ covs = (df[["B", "C", "D"]].rolling(window=50)
+ .cov(df[["A", "B", "C"]], pairwise=True))
+ covs.loc["2002-09-22":]
.. ipython:: python
correls = df.rolling(window=50).corr()
- correls.loc['2002-09-22':]
+ correls.loc["2002-09-22":]
You can efficiently retrieve the time series of correlations between two
columns by reshaping and indexing:
@@ -791,12 +801,12 @@ columns by reshaping and indexing:
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
.. ipython:: python
@savefig rolling_corr_pairwise_ex.png
- correls.unstack(1)[('A', 'C')].plot()
+ correls.unstack(1)[("A", "C")].plot()
.. _stats.aggregate:
@@ -811,8 +821,8 @@ perform multiple computations on the data. These operations are similar to the :
.. ipython:: python
dfa = pd.DataFrame(np.random.randn(1000, 3),
- index=pd.date_range('1/1/2000', periods=1000),
- columns=['A', 'B', 'C'])
+ index=pd.date_range("1/1/2000", periods=1000),
+ columns=["A", "B", "C"])
r = dfa.rolling(window=60, min_periods=1)
r
@@ -823,9 +833,9 @@ Series (or multiple Series) via standard ``__getitem__``.
r.aggregate(np.sum)
- r['A'].aggregate(np.sum)
+ r["A"].aggregate(np.sum)
- r[['A', 'B']].aggregate(np.sum)
+ r[["A", "B"]].aggregate(np.sum)
As you can see, the result of the aggregation will have the selected columns, or all
columns if none are selected.
@@ -840,7 +850,7 @@ aggregation with, outputting a DataFrame:
.. ipython:: python
- r['A'].agg([np.sum, np.mean, np.std])
+ r["A"].agg([np.sum, np.mean, np.std])
On a windowed DataFrame, you can pass a list of functions to apply to each
column, which produces an aggregated result with a hierarchical index:
@@ -860,20 +870,20 @@ columns of a ``DataFrame``:
.. ipython:: python
- r.agg({'A': np.sum, 'B': lambda x: np.std(x, ddof=1)})
+ r.agg({"A": np.sum, "B": lambda x: np.std(x, ddof=1)})
The function names can also be strings. In order for a string to be valid it
must be implemented on the windowed object
.. ipython:: python
- r.agg({'A': 'sum', 'B': 'std'})
+ r.agg({"A": "sum", "B": "std"})
Furthermore you can pass a nested dict to indicate different aggregations on different columns.
.. ipython:: python
- r.agg({'A': ['sum', 'std'], 'B': ['mean', 'std']})
+ r.agg({"A": ["sum", "std"], "B": ["mean", "std"]})
.. _stats.moments.expanding:
@@ -967,7 +977,7 @@ all accept are:
sn.expanding().sum()
sn.cumsum()
- sn.cumsum().fillna(method='ffill')
+ sn.cumsum().fillna(method="ffill")
An expanding window statistic will be more stable (and less responsive) than
@@ -978,14 +988,14 @@ relative impact of an individual data point. As an example, here is the
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
.. ipython:: python
- s.plot(style='k--')
+ s.plot(style="k--")
@savefig expanding_mean_frame.png
- s.expanding().mean().plot(style='k')
+ s.expanding().mean().plot(style="k")
.. _stats.moments.exponentially_weighted:
@@ -1115,10 +1125,10 @@ of ``times``.
.. ipython:: python
- df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})
+ df = pd.DataFrame({"B": [0, 1, 2, np.nan, 4]})
df
- times = ['2020-01-01', '2020-01-03', '2020-01-10', '2020-01-15', '2020-01-17']
- df.ewm(halflife='4 days', times=pd.DatetimeIndex(times)).mean()
+ times = ["2020-01-01", "2020-01-03", "2020-01-10", "2020-01-15", "2020-01-17"]
+ df.ewm(halflife="4 days", times=pd.DatetimeIndex(times)).mean()
The following formula is used to compute exponentially weighted mean with an input vector of times:
@@ -1130,10 +1140,10 @@ Here is an example for a univariate time series:
.. ipython:: python
- s.plot(style='k--')
+ s.plot(style="k--")
@savefig ewma_ex.png
- s.ewm(span=20).mean().plot(style='k')
+ s.ewm(span=20).mean().plot(style="k")
ExponentialMovingWindow has a ``min_periods`` argument, which has the same
meaning it does for all the ``.expanding`` and ``.rolling`` methods:
diff --git a/doc/source/user_guide/gotchas.rst b/doc/source/user_guide/gotchas.rst
index a96c70405d859..3023e47820a59 100644
--- a/doc/source/user_guide/gotchas.rst
+++ b/doc/source/user_guide/gotchas.rst
@@ -21,12 +21,12 @@ when calling :meth:`~DataFrame.info`:
.. ipython:: python
- dtypes = ['int64', 'float64', 'datetime64[ns]', 'timedelta64[ns]',
- 'complex128', 'object', 'bool']
+ dtypes = ["int64", "float64", "datetime64[ns]", "timedelta64[ns]",
+ "complex128", "object", "bool"]
n = 5000
data = {t: np.random.randint(100, size=n).astype(t) for t in dtypes}
df = pd.DataFrame(data)
- df['categorical'] = df['object'].astype('category')
+ df["categorical"] = df["object"].astype("category")
df.info()
@@ -40,7 +40,7 @@ as it can be expensive to do this deeper introspection.
.. ipython:: python
- df.info(memory_usage='deep')
+ df.info(memory_usage="deep")
By default the display option is set to ``True`` but can be explicitly
overridden by passing the ``memory_usage`` argument when invoking ``df.info()``.
@@ -155,9 +155,9 @@ index, not membership among the values.
.. ipython:: python
- s = pd.Series(range(5), index=list('abcde'))
+ s = pd.Series(range(5), index=list("abcde"))
2 in s
- 'b' in s
+ "b" in s
If this behavior is surprising, keep in mind that using ``in`` on a Python
dictionary tests keys, not values, and ``Series`` are dict-like.
@@ -206,11 +206,11 @@ arrays. For example:
.. ipython:: python
- s = pd.Series([1, 2, 3, 4, 5], index=list('abcde'))
+ s = pd.Series([1, 2, 3, 4, 5], index=list("abcde"))
s
s.dtype
- s2 = s.reindex(['a', 'b', 'c', 'f', 'u'])
+ s2 = s.reindex(["a", "b", "c", "f", "u"])
s2
s2.dtype
@@ -227,12 +227,12 @@ the nullable-integer extension dtypes provided by pandas
.. ipython:: python
- s_int = pd.Series([1, 2, 3, 4, 5], index=list('abcde'),
+ s_int = pd.Series([1, 2, 3, 4, 5], index=list("abcde"),
dtype=pd.Int64Dtype())
s_int
s_int.dtype
- s2_int = s_int.reindex(['a', 'b', 'c', 'f', 'u'])
+ s2_int = s_int.reindex(["a", "b", "c", "f", "u"])
s2_int
s2_int.dtype
@@ -334,7 +334,7 @@ constructors using something similar to the following:
.. ipython:: python
- x = np.array(list(range(10)), '>i4') # big endian
+ x = np.array(list(range(10)), ">i4") # big endian
newx = x.byteswap().newbyteorder() # force native byteorder
s = pd.Series(newx)
| …omputation.rst #36832
- [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/36835 | 2020-10-03T14:12:21Z | 2020-10-03T14:59:04Z | null | 2020-10-03T14:59:04Z |
Fix/empty string datetimelike conversion/issue 36550 | diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx
index b1b38505b9476..b7229c8f56080 100644
--- a/pandas/_libs/tslib.pyx
+++ b/pandas/_libs/tslib.pyx
@@ -485,7 +485,16 @@ cpdef array_to_datetime(
# string
seen_string = True
- if len(val) == 0 or val in nat_strings:
+ if len(val) == 0:
+ if is_coerce:
+ iresult[i] = NPY_NAT
+ continue
+ elif is_ignore:
+ raise TypeError("Empty string is not a valid datetime")
+ else:
+ raise ValueError("Empty string is not a valid datetime")
+
+ if val in nat_strings:
iresult[i] = NPY_NAT
continue
@@ -710,7 +719,10 @@ cdef array_to_datetime_object(
# GH 25978. No need to parse NaT-like or datetime-like vals
oresult[i] = val
elif isinstance(val, str):
- if len(val) == 0 or val in nat_strings:
+ if len(val) == 0:
+ oresult[i] = val
+ continue
+ if val in nat_strings:
oresult[i] = 'NaT'
continue
try:
diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx
index adf1dfbc1ac72..349c0b625de98 100644
--- a/pandas/_libs/tslibs/conversion.pyx
+++ b/pandas/_libs/tslibs/conversion.pyx
@@ -596,7 +596,9 @@ cdef _TSObject _convert_str_to_tsobject(object ts, tzinfo tz, str unit,
int out_local = 0, out_tzoffset = 0
bint do_parse_datetime_string = False
- if len(ts) == 0 or ts in nat_strings:
+ if len(ts) == 0:
+ raise ValueError("Empty string is not a valid timestamp")
+ elif ts in nat_strings:
ts = NaT
elif ts == 'now':
# Issue 9000, we short-circuit rather than going
diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx
index ee32ed53a908b..1c06178363c43 100644
--- a/pandas/_libs/tslibs/timedeltas.pyx
+++ b/pandas/_libs/tslibs/timedeltas.pyx
@@ -294,7 +294,9 @@ cdef inline int64_t parse_timedelta_string(str ts) except? -1:
# have_value : track if we have at least 1 leading unit
# have_hhmmss : tracks if we have a regular format hh:mm:ss
- if len(ts) == 0 or ts in nat_strings:
+ if len(ts) == 0:
+ raise ValueError("Empty string is not a valid timedelta")
+ if ts in nat_strings:
return NPY_NAT
for c in ts:
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 5f2b901844dad..5baadf237e264 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -2182,7 +2182,7 @@ def isna(self):
For datetimes, `NaT` (Not a Time) is considered as an NA value.
>>> idx = pd.DatetimeIndex([pd.Timestamp('1940-04-25'),
- ... pd.Timestamp(''), None, pd.NaT])
+ ... pd.Timestamp('NaT'), None, pd.NaT])
>>> idx
DatetimeIndex(['1940-04-25', 'NaT', 'NaT', 'NaT'],
dtype='datetime64[ns]', freq=None)
diff --git a/pandas/tests/groupby/test_apply.py b/pandas/tests/groupby/test_apply.py
index db5c4af9c6f53..212b45639d7c2 100644
--- a/pandas/tests/groupby/test_apply.py
+++ b/pandas/tests/groupby/test_apply.py
@@ -643,7 +643,7 @@ def predictions(tool):
{
"Key": ["B", "B", "A", "A"],
"State": ["step1", "step2", "step1", "step2"],
- "oTime": ["", "2016-09-19 05:24:33", "", "2016-09-19 23:59:04"],
+ "oTime": ["NaT", "2016-09-19 05:24:33", "NaT", "2016-09-19 23:59:04"],
"Machine": ["23", "36L", "36R", "36R"],
}
)
diff --git a/pandas/tests/io/parser/test_parse_dates.py b/pandas/tests/io/parser/test_parse_dates.py
index 662659982c0b3..c7f166726f397 100644
--- a/pandas/tests/io/parser/test_parse_dates.py
+++ b/pandas/tests/io/parser/test_parse_dates.py
@@ -1129,9 +1129,7 @@ def test_parse_dates_empty_string(all_parsers):
data = "Date,test\n2012-01-01,1\n,2"
result = parser.read_csv(StringIO(data), parse_dates=["Date"], na_filter=False)
- expected = DataFrame(
- [[datetime(2012, 1, 1), 1], [pd.NaT, 2]], columns=["Date", "test"]
- )
+ expected = DataFrame([[datetime(2012, 1, 1), 1], ["", 2]], columns=["Date", "test"])
tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/scalar/test_nat.py b/pandas/tests/scalar/test_nat.py
index 09d5d9c1677d0..91fac70b886f5 100644
--- a/pandas/tests/scalar/test_nat.py
+++ b/pandas/tests/scalar/test_nat.py
@@ -104,8 +104,13 @@ def test_identity(klass, value):
@pytest.mark.parametrize("klass", [Timestamp, Timedelta, Period])
@pytest.mark.parametrize("value", ["", "nat", "NAT", None, np.nan])
def test_equality(klass, value):
- if klass is Period and value == "":
- pytest.skip("Period cannot parse empty string")
+ if value == "":
+ if klass is Period and value == "":
+ pytest.skip("Period cannot parse empty string")
+ elif klass is Timedelta:
+ pytest.skip("Timedelta cannot parse empty string")
+ elif klass is Timestamp:
+ pytest.skip("Timestamp cannot parse empty string")
assert klass(value).value == iNaT
diff --git a/pandas/tests/scalar/timedelta/test_constructors.py b/pandas/tests/scalar/timedelta/test_constructors.py
index 23fb25b838da6..d48182076ccb2 100644
--- a/pandas/tests/scalar/timedelta/test_constructors.py
+++ b/pandas/tests/scalar/timedelta/test_constructors.py
@@ -131,6 +131,12 @@ def test_construction():
Timedelta("foo bar")
+def test_construction_empty_string():
+ # Issue #36550, empty string
+ with pytest.raises(ValueError):
+ Timedelta("")
+
+
@pytest.mark.parametrize(
"item",
list(
diff --git a/pandas/tests/scalar/timestamp/test_constructors.py b/pandas/tests/scalar/timestamp/test_constructors.py
index d1c3ad508d877..6aa6a5f88a22e 100644
--- a/pandas/tests/scalar/timestamp/test_constructors.py
+++ b/pandas/tests/scalar/timestamp/test_constructors.py
@@ -552,6 +552,10 @@ def test_constructor_fromisocalendar(self):
assert result == expected_stdlib
assert isinstance(result, Timestamp)
+ def test_constructior_empty_string(self):
+ with pytest.raises(ValueError):
+ Timestamp("")
+
def test_constructor_ambigous_dst():
# GH 24329
diff --git a/pandas/tests/series/methods/test_isin.py b/pandas/tests/series/methods/test_isin.py
index 3836c1d56bf87..9badc92c5b89d 100644
--- a/pandas/tests/series/methods/test_isin.py
+++ b/pandas/tests/series/methods/test_isin.py
@@ -80,3 +80,21 @@ def test_isin_empty(self, empty):
result = s.isin(empty)
tm.assert_series_equal(expected, result)
+
+ @pytest.mark.parametrize(
+ "values, in_list, expected",
+ [
+ ([""], ["", pd.Timedelta(0)], [True]),
+ (["", pd.Timedelta(0)], [""], [True, False]),
+ ([""], ["", pd.to_datetime("2020-01-01")], [True]),
+ (["", pd.to_datetime("2020-01-01")], [""], [True, False]),
+ ],
+ )
+ def test_empty_string_category(self, values, in_list, expected):
+ # Issue #36550
+ # Mixed empty string with datetimelike
+ s = pd.Series(values)
+ pd.testing.assert_series_equal(
+ s.isin(in_list),
+ pd.Series(expected),
+ )
diff --git a/pandas/tests/tools/test_to_datetime.py b/pandas/tests/tools/test_to_datetime.py
index 819474e1f32e7..c6231f40debaa 100644
--- a/pandas/tests/tools/test_to_datetime.py
+++ b/pandas/tests/tools/test_to_datetime.py
@@ -1090,6 +1090,16 @@ def test_to_datetime_fixed_offset(self):
result = to_datetime(dates)
assert result.tz == fixed_off
+ def test_to_datetime_empty_string(self):
+ with pytest.raises(ValueError):
+ pd.to_datetime("", errors="raise")
+
+ result = pd.to_datetime("", errors="ignore")
+ assert result == ""
+
+ result = pd.to_datetime("", errors="coerce")
+ assert result is pd.NaT
+
class TestToDatetimeUnit:
@pytest.mark.parametrize("cache", [True, False])
@@ -1574,11 +1584,11 @@ def test_to_datetime_with_apply(self, cache):
def test_to_datetime_types(self, cache):
# empty string
- result = to_datetime("", cache=cache)
- assert result is NaT
+ with pytest.raises(ValueError):
+ result = to_datetime("", cache=cache)
- result = to_datetime(["", ""], cache=cache)
- assert isna(result).all()
+ with pytest.raises(ValueError):
+ result = to_datetime(["", ""], cache=cache)
# ints
result = Timestamp(0)
diff --git a/pandas/tests/tools/test_to_timedelta.py b/pandas/tests/tools/test_to_timedelta.py
index f68d83f7f4d58..26a692970d581 100644
--- a/pandas/tests/tools/test_to_timedelta.py
+++ b/pandas/tests/tools/test_to_timedelta.py
@@ -4,15 +4,15 @@
import pytest
import pandas as pd
-from pandas import Series, TimedeltaIndex, isna, to_timedelta
+from pandas import Series, TimedeltaIndex, to_timedelta
import pandas._testing as tm
class TestTimedeltas:
def test_to_timedelta(self):
- result = to_timedelta(["", ""])
- assert isna(result).all()
+ with pytest.raises(ValueError):
+ to_timedelta(["", ""])
# pass thru
result = to_timedelta(np.array([np.timedelta64(1, "s")]))
| - [x] xref #36550
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
# Empty string conversion
The actual comportment of Timedelta, Timestamp and to_datetime is to return NaT.
```python
import pandas as pd
pd.to_datetime("", errors="raise")
# NaT
pd.to_datetime("", errors="coerce")
# NaT
pd.to_datetime("", errors="ignore")
# NaT
pd.Timedelta("")
# NaT
pd.Timestamp("")
# NaT
```
I would propose to raise a ValueError instead, same as for string that are not datetimelike, as follow:
```python
import pandas as pd
import pytest
with pytest.raises(ValueError):
pd.to_datetime("", errors="raise")
pd.to_datetime("", errors="coerce")
# NaT
pd.to_datetime("", errors="ignore")
# ''
with pytest.raises(ValueError):
pd.Timedelta("")
with pytest.raises(ValueError):
pd.Timestamp("")
```
# Tests
I added the relevant test for the expected comportment.
I still will have to fix some test that rely on the current behaviors.
# Benchmark
I ran the following command
`asv continuous -E virtualenv -f 1.1 upstream/master HEAD -b "^tslibs.(timestamp|timedelta|tslib)"`
<details>
```
before after ratio
[8f6ec1e8] [4393cc9e]
<master> <fix/empty-string-datetimelike-conversion/issue-36550>
+ 173±1ns 202±10ns 1.17 tslibs.timestamp.TimestampProperties.time_is_leap_year(tzlocal(), 'B')
+ 162±2ns 188±4ns 1.16 tslibs.timestamp.TimestampProperties.time_is_quarter_start(tzlocal(), None)
+ 178±1ns 205±20ns 1.15 tslibs.timestamp.TimestampProperties.time_dayofweek(tzfile('/usr/share/zoneinfo/US/Central'), 'B')
+ 163±2ns 186±10ns 1.15 tslibs.timestamp.TimestampProperties.time_is_year_end(tzlocal(), None)
+ 162±1ns 185±10ns 1.15 tslibs.timestamp.TimestampProperties.time_is_year_end(tzfile('/usr/share/zoneinfo/US/Central'), None)
+ 162±2ns 185±10ns 1.14 tslibs.timestamp.TimestampProperties.time_is_year_end(<UTC>, None)
+ 169±2ns 193±10ns 1.14 tslibs.timestamp.TimestampProperties.time_is_quarter_end(tzlocal(), None)
+ 164±2ns 186±4ns 1.14 tslibs.timestamp.TimestampProperties.time_is_quarter_start(tzutc(), None)
+ 4.60±0.03μs 5.16±0.3μs 1.12 tslibs.timestamp.TimestampProperties.time_is_year_end(datetime.timezone(datetime.timedelta(seconds=3600)), 'B')
+ 147±1ns 163±4ns 1.11 tslibs.timestamp.TimestampProperties.time_microsecond(<DstTzInfo 'Europe/Amsterdam' LMT+0:20:00 STD>, 'B')
+ 170±2ns 188±7ns 1.11 tslibs.timestamp.TimestampProperties.time_is_quarter_end(tzfile('/usr/share/zoneinfo/US/Central'), None)
+ 179±0.7ns 197±10ns 1.10 tslibs.timestamp.TimestampProperties.time_dayofweek(datetime.timezone(datetime.timedelta(seconds=3600)), None)
+ 174±0.3ns 191±10ns 1.10 tslibs.timestamp.TimestampProperties.time_is_leap_year(<DstTzInfo 'Europe/Amsterdam' LMT+0:20:00 STD>, 'B')
SOME BENCHMARKS HAVE CHANGED SIGNIFICANTLY.
PERFORMANCE DECREASED.
```
</details>
| https://api.github.com/repos/pandas-dev/pandas/pulls/36834 | 2020-10-03T13:57:31Z | 2020-11-27T18:47:26Z | null | 2020-11-27T18:47:26Z |
REF: NDFrame describe | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 24c1ae971686e..b0b60fea2bf27 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -55,11 +55,7 @@
from pandas.compat.numpy import function as nv
from pandas.errors import AbstractMethodError, InvalidIndexError
from pandas.util._decorators import doc, rewrite_axis_style_signature
-from pandas.util._validators import (
- validate_bool_kwarg,
- validate_fillna_kwargs,
- validate_percentile,
-)
+from pandas.util._validators import validate_bool_kwarg, validate_fillna_kwargs
from pandas.core.dtypes.common import (
ensure_int64,
@@ -109,11 +105,8 @@
from pandas.core.window import Expanding, ExponentialMovingWindow, Rolling, Window
from pandas.io.formats import format as fmt
-from pandas.io.formats.format import (
- DataFrameFormatter,
- DataFrameRenderer,
- format_percentiles,
-)
+from pandas.io.formats.describe import describe_ndframe
+from pandas.io.formats.format import DataFrameFormatter, DataFrameRenderer
from pandas.io.formats.printing import pprint_thing
if TYPE_CHECKING:
@@ -10237,145 +10230,13 @@ def describe(
75% NaN 2.5
max NaN 3.0
"""
- if self.ndim == 2 and self.columns.size == 0:
- raise ValueError("Cannot describe a DataFrame without columns")
-
- if percentiles is not None:
- # explicit conversion of `percentiles` to list
- percentiles = list(percentiles)
-
- # get them all to be in [0, 1]
- validate_percentile(percentiles)
-
- # median should always be included
- if 0.5 not in percentiles:
- percentiles.append(0.5)
- percentiles = np.asarray(percentiles)
- else:
- percentiles = np.array([0.25, 0.5, 0.75])
-
- # sort and check for duplicates
- unique_pcts = np.unique(percentiles)
- if len(unique_pcts) < len(percentiles):
- raise ValueError("percentiles cannot contain duplicates")
- percentiles = unique_pcts
-
- formatted_percentiles = format_percentiles(percentiles)
-
- def describe_numeric_1d(series) -> "Series":
- stat_index = (
- ["count", "mean", "std", "min"] + formatted_percentiles + ["max"]
- )
- d = (
- [series.count(), series.mean(), series.std(), series.min()]
- + series.quantile(percentiles).tolist()
- + [series.max()]
- )
- return pd.Series(d, index=stat_index, name=series.name)
-
- def describe_categorical_1d(data) -> "Series":
- names = ["count", "unique"]
- objcounts = data.value_counts()
- count_unique = len(objcounts[objcounts != 0])
- result = [data.count(), count_unique]
- dtype = None
- if result[1] > 0:
- top, freq = objcounts.index[0], objcounts.iloc[0]
- if is_datetime64_any_dtype(data.dtype):
- if self.ndim == 1:
- stacklevel = 4
- else:
- stacklevel = 5
- warnings.warn(
- "Treating datetime data as categorical rather than numeric in "
- "`.describe` is deprecated and will be removed in a future "
- "version of pandas. Specify `datetime_is_numeric=True` to "
- "silence this warning and adopt the future behavior now.",
- FutureWarning,
- stacklevel=stacklevel,
- )
- tz = data.dt.tz
- asint = data.dropna().values.view("i8")
- top = Timestamp(top)
- if top.tzinfo is not None and tz is not None:
- # Don't tz_localize(None) if key is already tz-aware
- top = top.tz_convert(tz)
- else:
- top = top.tz_localize(tz)
- names += ["top", "freq", "first", "last"]
- result += [
- top,
- freq,
- Timestamp(asint.min(), tz=tz),
- Timestamp(asint.max(), tz=tz),
- ]
- else:
- names += ["top", "freq"]
- result += [top, freq]
-
- # If the DataFrame is empty, set 'top' and 'freq' to None
- # to maintain output shape consistency
- else:
- names += ["top", "freq"]
- result += [np.nan, np.nan]
- dtype = "object"
-
- return pd.Series(result, index=names, name=data.name, dtype=dtype)
-
- def describe_timestamp_1d(data) -> "Series":
- # GH-30164
- stat_index = ["count", "mean", "min"] + formatted_percentiles + ["max"]
- d = (
- [data.count(), data.mean(), data.min()]
- + data.quantile(percentiles).tolist()
- + [data.max()]
- )
- return pd.Series(d, index=stat_index, name=data.name)
-
- def describe_1d(data) -> "Series":
- if is_bool_dtype(data.dtype):
- return describe_categorical_1d(data)
- elif is_numeric_dtype(data):
- return describe_numeric_1d(data)
- elif is_datetime64_any_dtype(data.dtype) and datetime_is_numeric:
- return describe_timestamp_1d(data)
- elif is_timedelta64_dtype(data.dtype):
- return describe_numeric_1d(data)
- else:
- return describe_categorical_1d(data)
-
- if self.ndim == 1:
- # Incompatible return value type
- # (got "Series", expected "FrameOrSeries") [return-value]
- return describe_1d(self) # type:ignore[return-value]
- elif (include is None) and (exclude is None):
- # when some numerics are found, keep only numerics
- default_include = [np.number]
- if datetime_is_numeric:
- default_include.append("datetime")
- data = self.select_dtypes(include=default_include)
- if len(data.columns) == 0:
- data = self
- elif include == "all":
- if exclude is not None:
- msg = "exclude must be None when include is 'all'"
- raise ValueError(msg)
- data = self
- else:
- data = self.select_dtypes(include=include, exclude=exclude)
-
- ldesc = [describe_1d(s) for _, s in data.items()]
- # set a convenient order for rows
- names: List[Label] = []
- ldesc_indexes = sorted((x.index for x in ldesc), key=len)
- for idxnames in ldesc_indexes:
- for name in idxnames:
- if name not in names:
- names.append(name)
-
- d = pd.concat([x.reindex(names, copy=False) for x in ldesc], axis=1, sort=False)
- d.columns = data.columns.copy()
- return d
+ return describe_ndframe(
+ data=self,
+ include=include,
+ exclude=exclude,
+ datetime_is_numeric=datetime_is_numeric,
+ percentiles=percentiles,
+ )
@final
def pct_change(
diff --git a/pandas/io/formats/describe.py b/pandas/io/formats/describe.py
new file mode 100644
index 0000000000000..d9fa236c54899
--- /dev/null
+++ b/pandas/io/formats/describe.py
@@ -0,0 +1,466 @@
+"""Module responsible for execution of NDFrame.describe() method.
+
+Method NDFrame.describe() delegates actual execution to function describe_ndframe().
+
+Strategy pattern is utilized.
+ - The appropriate strategy is selected based on the series datatype.
+ - The strategy is responsible for running proper description.
+"""
+
+from abc import ABC, abstractmethod
+from typing import (
+ TYPE_CHECKING,
+ Any,
+ List,
+ Optional,
+ Sequence,
+ Tuple,
+ Type,
+ Union,
+ cast,
+)
+import warnings
+
+import numpy as np
+
+from pandas._libs.tslibs import Timestamp
+from pandas._typing import Dtype, FrameOrSeries, FrameOrSeriesUnion, Label
+from pandas.util._validators import validate_percentile
+
+from pandas.core.dtypes.common import (
+ is_bool_dtype,
+ is_datetime64_any_dtype,
+ is_numeric_dtype,
+ is_timedelta64_dtype,
+)
+
+from pandas.core.reshape.concat import concat
+
+from pandas.io.formats.format import format_percentiles
+
+if TYPE_CHECKING:
+ from pandas import DataFrame, Series
+
+
+def describe_ndframe(
+ *,
+ data: FrameOrSeries,
+ include: Optional[Union[str, Sequence[str]]],
+ exclude: Optional[Union[str, Sequence[str]]],
+ datetime_is_numeric: bool,
+ percentiles: Optional[Sequence[float]],
+) -> FrameOrSeries:
+ """Describe series or dataframe.
+
+ Called from pandas.core.generic.NDFrame.describe()
+
+ Parameters
+ ----------
+ data : FrameOrSeries
+ Either dataframe or series.
+ include : 'all', list-like of dtypes or None (default), optional
+ A white list of data types to include in the result. Ignored for ``Series``.
+ exclude : list-like of dtypes or None (default), optional,
+ A black list of data types to omit from the result. Ignored for ``Series``.
+ datetime_is_numeric : bool, default False
+ Whether to treat datetime dtypes as numeric.
+ percentiles : list-like of numbers, optional
+ The percentiles to include in the output. All should fall between 0 and 1.
+ The default is ``[.25, .5, .75]``, which returns the 25th, 50th, and
+ 75th percentiles.
+
+ Returns
+ -------
+ FrameOrSeries
+ Dataframe or series description.
+ """
+ describer: "NDFrameDescriber"
+ if data.ndim == 1:
+ series = cast("Series", data)
+ describer = SeriesDescriber(
+ data=series,
+ datetime_is_numeric=datetime_is_numeric,
+ )
+ else:
+ dataframe = cast("DataFrame", data)
+ describer = DataFrameDescriber(
+ data=dataframe,
+ include=include,
+ exclude=exclude,
+ datetime_is_numeric=datetime_is_numeric,
+ )
+ result = describer.describe(percentiles)
+ return cast(FrameOrSeries, result)
+
+
+class StrategyCreatorMixin:
+ """Mixin for creating instance of appropriate strategy for describing series."""
+
+ datetime_is_numeric: bool
+
+ def create_strategy(
+ self,
+ series: "Series",
+ percentiles: Optional[Sequence[float]],
+ ) -> "StrategyAbstract":
+ """Create strategy instance for description."""
+ klass = self._select_strategy(series)
+ return klass(series, percentiles)
+
+ def _select_strategy(self, series: "Series") -> Type["StrategyAbstract"]:
+ """Select strategy for description."""
+ strategy: Type[StrategyAbstract] = CategoricalStrategy
+ if is_bool_dtype(series.dtype):
+ strategy = CategoricalStrategy
+ elif is_numeric_dtype(series):
+ strategy = NumericStrategy
+ elif is_datetime64_any_dtype(series.dtype) and self.datetime_is_numeric:
+ strategy = TimestampStrategy
+ elif is_timedelta64_dtype(series.dtype):
+ strategy = NumericStrategy
+
+ if strategy == CategoricalStrategy and is_datetime64_any_dtype(series.dtype):
+ strategy = TimestampAsCategoricalStrategy
+ warnings.warn(
+ "Treating datetime data as categorical rather than numeric in "
+ "`.describe` is deprecated and will be removed in a future "
+ "version of pandas. Specify `datetime_is_numeric=True` to "
+ "silence this warning and adopt the future behavior now.",
+ FutureWarning,
+ stacklevel=6,
+ )
+ return strategy
+
+
+class NDFrameDescriber(ABC):
+ """Abstract class for describing dataframe or series."""
+
+ @abstractmethod
+ def describe(self, percentiles: Optional[Sequence[float]]) -> FrameOrSeriesUnion:
+ """Do describe either series or dataframe.
+
+ Parameters
+ ----------
+ percentiles : list-like of numbers, optional
+ The percentiles to include in the output. All should fall between 0 and 1.
+ The default is ``[.25, .5, .75]``, which returns the 25th, 50th, and
+ 75th percentiles.
+ """
+
+
+class SeriesDescriber(NDFrameDescriber, StrategyCreatorMixin):
+ """Class responsible for creating series description.
+
+ Parameters
+ ----------
+ data : Series
+ Series to be described.
+ datetime_is_numeric : bool, default False
+ Whether to treat datetime dtypes as numeric.
+ """
+
+ def __init__(
+ self,
+ *,
+ data: "Series",
+ datetime_is_numeric: bool,
+ ):
+ self.data = data
+ self.datetime_is_numeric = datetime_is_numeric
+
+ def describe(self, percentiles: Optional[Sequence[float]]) -> "Series":
+ """Do describe series."""
+ strategy = self.create_strategy(self.data, percentiles)
+ result = strategy.describe()
+ return result
+
+
+class DataFrameDescriber(NDFrameDescriber, StrategyCreatorMixin):
+ """Class responsible for creating dataframe description.
+
+ Parameters
+ ----------
+ data : DataFrame
+ Dataframe to be described.
+ include : 'all', list-like of dtypes or None (default), optional
+ A white list of data types to include in the result.
+ exclude : list-like of dtypes or None (default), optional,
+ A black list of data types to omit from the result.
+ datetime_is_numeric : bool, default False
+ Whether to treat datetime dtypes as numeric.
+ """
+
+ def __init__(
+ self,
+ *,
+ data: "DataFrame",
+ include: Optional[Union[str, Sequence[str]]],
+ exclude: Optional[Union[str, Sequence[str]]],
+ datetime_is_numeric: bool,
+ ):
+ self.include = include
+ self.exclude = exclude
+ self.datetime_is_numeric = datetime_is_numeric
+ self.data: "DataFrame" = self._initialize_data(data)
+
+ def describe(self, percentiles: Optional[Sequence[float]]) -> "DataFrame":
+ """Do describe dataframe."""
+ ldesc: List["Series"] = []
+ for _, series in self.data.items():
+ strategy = self.create_strategy(series, percentiles)
+ ldesc.append(strategy.describe())
+
+ df = concat(
+ self._reindex_columns(ldesc),
+ axis=1,
+ sort=False,
+ )
+ df.columns = self.data.columns.copy()
+ return cast("DataFrame", df)
+
+ def _reindex_columns(self, column_data) -> List["Series"]:
+ """Set a convenient order for rows."""
+ names: List[Label] = []
+ ldesc_indexes = sorted((x.index for x in column_data), key=len)
+ for idxnames in ldesc_indexes:
+ for name in idxnames:
+ if name not in names:
+ names.append(name)
+ return [x.reindex(names, copy=False) for x in column_data]
+
+ def _initialize_data(self, data: "DataFrame") -> "DataFrame":
+ _validate_dframe_size(data)
+
+ if self.include is None and self.exclude is None:
+ return self._extract_numeric_data(data)
+
+ if self.include == "all":
+ if self.exclude is not None:
+ msg = "exclude must be None when include is 'all'"
+ raise ValueError(msg)
+ return data
+
+ return data.select_dtypes(include=self.include, exclude=self.exclude)
+
+ def _extract_numeric_data(self, data: "DataFrame") -> "DataFrame":
+ """When some numerics are found, keep only numerics."""
+ include = [np.number]
+ if self.datetime_is_numeric:
+ include.append("datetime")
+ numeric_only = data.select_dtypes(include=include)
+ if len(numeric_only.columns) == 0:
+ return data
+ else:
+ return numeric_only
+
+
+class StrategyAbstract(ABC):
+ """Abstract strategy for describing series."""
+
+ def __init__(
+ self,
+ data: "Series",
+ percentiles: Optional[Sequence[float]],
+ ):
+ self.data = data
+ self.percentiles = self._initialize_percentiles(percentiles)
+
+ def describe(self) -> "Series":
+ """Describe series."""
+ from pandas.core.series import Series
+
+ return Series(
+ self.array,
+ index=self.names,
+ name=self.data.name,
+ dtype=self.dtype,
+ )
+
+ @property
+ @abstractmethod
+ def array(self) -> List[object]:
+ """Series data."""
+
+ @property
+ @abstractmethod
+ def names(self) -> List[str]:
+ """Series index names."""
+
+ @property
+ @abstractmethod
+ def dtype(self) -> Optional[Dtype]:
+ """Series dtype."""
+
+ @property
+ def formatted_percentiles(self) -> List[str]:
+ """Percentiles formatted as strings, rounded."""
+ return format_percentiles(self.percentiles)
+
+ @staticmethod
+ def _initialize_percentiles(
+ percentiles: Optional[Sequence[float]],
+ ) -> Sequence[float]:
+ if percentiles is None:
+ return np.array([0.25, 0.5, 0.75])
+
+ # explicit conversion of `percentiles` to list
+ percentiles = list(percentiles)
+
+ # get them all to be in [0, 1]
+ validate_percentile(percentiles)
+
+ # median should always be included
+ if 0.5 not in percentiles:
+ percentiles.append(0.5)
+ percentiles = np.asarray(percentiles)
+
+ # sort and check for duplicates
+ unique_pcts = np.unique(percentiles)
+ assert percentiles is not None
+ if len(unique_pcts) < len(percentiles):
+ raise ValueError("percentiles cannot contain duplicates")
+ return unique_pcts
+
+
+class CategoricalStrategy(StrategyAbstract):
+ """Strategy for series with categorical values."""
+
+ def __init__(self, data, percentiles):
+ self.data = data
+ super().__init__(data, percentiles)
+ self.objcounts = self.data.value_counts()
+
+ @property
+ def array(self) -> List[object]:
+ top, freq = self._get_top_and_freq()
+ return [
+ self.count,
+ self.count_unique,
+ top,
+ freq,
+ ]
+
+ @property
+ def names(self) -> List[str]:
+ return ["count", "unique", "top", "freq"]
+
+ @property
+ def dtype(self) -> Optional[Dtype]:
+ if self.count_unique == 0:
+ return "object"
+ return None
+
+ @property
+ def count(self) -> "Series":
+ return self.data.count()
+
+ @property
+ def count_unique(self) -> int:
+ return len(self.objcounts[self.objcounts != 0])
+
+ def _get_top_and_freq(self) -> Tuple[Any, Any]:
+ if self.count_unique > 0:
+ return self.objcounts.index[0], self.objcounts.iloc[0]
+ return np.nan, np.nan
+
+
+class TimestampAsCategoricalStrategy(CategoricalStrategy):
+ """Strategy for series with timestamp values treated as categorical values."""
+
+ @property
+ def array(self) -> List[object]:
+ result = [self.count, self.count_unique]
+ if self.count_unique > 0:
+ top, freq = self.objcounts.index[0], self.objcounts.iloc[0]
+ tz = self.data.dt.tz
+ asint = self.data.dropna().values.view("i8")
+ top = Timestamp(top)
+ if top.tzinfo is not None and tz is not None:
+ # Don't tz_localize(None) if key is already tz-aware
+ top = top.tz_convert(tz)
+ else:
+ top = top.tz_localize(tz)
+
+ result += [
+ top,
+ freq,
+ Timestamp(asint.min(), tz=tz),
+ Timestamp(asint.max(), tz=tz),
+ ]
+
+ # If the DataFrame is empty, set 'top' and 'freq' to None
+ # to maintain output shape consistency
+ else:
+ result += [np.nan, np.nan]
+ return result
+
+ @property
+ def names(self) -> List[str]:
+ names = ["count", "unique"]
+ if self.count_unique > 0:
+ names += ["top", "freq", "first", "last"]
+ return names
+
+
+class NumericStrategy(StrategyAbstract):
+ """Strategy for series with numeric values."""
+
+ @property
+ def array(self) -> List[object]:
+ return [
+ self.data.count(),
+ self.data.mean(),
+ self.data.std(),
+ self.data.min(),
+ *self.data.quantile(self.percentiles).tolist(),
+ self.data.max(),
+ ]
+
+ @property
+ def names(self) -> List[str]:
+ return [
+ "count",
+ "mean",
+ "std",
+ "min",
+ *self.formatted_percentiles,
+ "max",
+ ]
+
+ @property
+ def dtype(self) -> Optional[Dtype]:
+ return None
+
+
+class TimestampStrategy(StrategyAbstract):
+ """Strategy for series with timestamp values."""
+
+ @property
+ def array(self) -> List[object]:
+ return [
+ self.data.count(),
+ self.data.mean(),
+ self.data.min(),
+ *self.data.quantile(self.percentiles).tolist(),
+ self.data.max(),
+ ]
+
+ @property
+ def names(self) -> List[str]:
+ return [
+ "count",
+ "mean",
+ "min",
+ *self.formatted_percentiles,
+ "max",
+ ]
+
+ @property
+ def dtype(self) -> Optional[Dtype]:
+ return None
+
+
+def _validate_dframe_size(df: FrameOrSeriesUnion) -> None:
+ """Validate correct size of dataframe."""
+ if df.ndim == 2 and df.columns.size == 0:
+ raise ValueError("Cannot describe a DataFrame without columns")
diff --git a/pandas/tests/frame/methods/test_describe.py b/pandas/tests/frame/methods/test_describe.py
index f77b7cd4a6c3b..0b7da5e862fd4 100644
--- a/pandas/tests/frame/methods/test_describe.py
+++ b/pandas/tests/frame/methods/test_describe.py
@@ -1,4 +1,5 @@
import numpy as np
+import pytest
import pandas as pd
from pandas import Categorical, DataFrame, Series, Timestamp, date_range
@@ -332,6 +333,16 @@ def test_describe_tz_values2(self):
result = df.describe(include="all")
tm.assert_frame_equal(result, expected)
+ @pytest.mark.parametrize("exclude", ["x", "y", ["x", "y"], ["x", "z"]])
+ def test_describe_when_include_all_exclude_not_allowed(self, exclude):
+ """
+ When include is 'all', then setting exclude != None is not allowed.
+ """
+ df = DataFrame({"x": [1], "y": [2], "z": [3]})
+ msg = "exclude must be None when include is 'all'"
+ with pytest.raises(ValueError, match=msg):
+ df.describe(include="all", exclude=exclude)
+
def test_describe_percentiles_integer_idx(self):
# GH#26660
df = DataFrame({"x": [1]})
| - [ ] closes #xxxx
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
Refactor ``NDFrame.describe`` method.
- Create module ``pandas/io/formats/describe.py``
- Delegate ``NDFrame.describe`` to function ``describe_ndframe`` in the new module
- Implement polymorphism for describing series and dataframe
- Implement strategy pattern for describing series of different kinds (categorical, numeric, timestamp)
Benefits:
- Reduce complexity in ``pandas/core/generic.py``
- Straightforward logic how to treat each datatype (reduced if/elif/else workflow)
- Enable potential for further extension | https://api.github.com/repos/pandas-dev/pandas/pulls/36833 | 2020-10-03T12:31:44Z | 2020-12-29T20:46:38Z | null | 2021-01-11T12:27:21Z |
DOC: update code style for remaining intro tutorial docs for #36777 computation.rst | diff --git a/doc/source/user_guide/computation.rst b/doc/source/user_guide/computation.rst
index e7edda90610b5..0f323f7796433 100644
--- a/doc/source/user_guide/computation.rst
+++ b/doc/source/user_guide/computation.rst
@@ -64,7 +64,7 @@ series in the DataFrame, also excluding NA/null values.
.. ipython:: python
frame = pd.DataFrame(np.random.randn(1000, 5),
- columns=['a', 'b', 'c', 'd', 'e'])
+ columns=["a", "b", "c", "d", "e"])
frame.cov()
``DataFrame.cov`` also supports an optional ``min_periods`` keyword that
@@ -73,9 +73,9 @@ in order to have a valid result.
.. ipython:: python
- frame = pd.DataFrame(np.random.randn(20, 3), columns=['a', 'b', 'c'])
- frame.loc[frame.index[:5], 'a'] = np.nan
- frame.loc[frame.index[5:10], 'b'] = np.nan
+ frame = pd.DataFrame(np.random.randn(20, 3), columns=["a", "b", "c"])
+ frame.loc[frame.index[:5], "a"] = np.nan
+ frame.loc[frame.index[5:10], "b"] = np.nan
frame.cov()
@@ -117,12 +117,12 @@ Wikipedia has articles covering the above correlation coefficients:
.. ipython:: python
frame = pd.DataFrame(np.random.randn(1000, 5),
- columns=['a', 'b', 'c', 'd', 'e'])
+ columns=["a", "b", "c", "d", "e"])
frame.iloc[::2] = np.nan
# Series with Series
- frame['a'].corr(frame['b'])
- frame['a'].corr(frame['b'], method='spearman')
+ frame["a"].corr(frame["b"])
+ frame["a"].corr(frame["b"], method="spearman")
# Pairwise correlation of DataFrame columns
frame.corr()
@@ -134,9 +134,9 @@ Like ``cov``, ``corr`` also supports the optional ``min_periods`` keyword:
.. ipython:: python
- frame = pd.DataFrame(np.random.randn(20, 3), columns=['a', 'b', 'c'])
- frame.loc[frame.index[:5], 'a'] = np.nan
- frame.loc[frame.index[5:10], 'b'] = np.nan
+ frame = pd.DataFrame(np.random.randn(20, 3), columns=["a", "b", "c"])
+ frame.loc[frame.index[:5], "a"] = np.nan
+ frame.loc[frame.index[5:10], "b"] = np.nan
frame.corr()
@@ -165,8 +165,8 @@ DataFrame objects.
.. ipython:: python
- index = ['a', 'b', 'c', 'd', 'e']
- columns = ['one', 'two', 'three', 'four']
+ index = ["a", "b", "c", "d", "e"]
+ columns = ["one", "two", "three", "four"]
df1 = pd.DataFrame(np.random.randn(5, 4), index=index, columns=columns)
df2 = pd.DataFrame(np.random.randn(4, 4), index=index[:4], columns=columns)
df1.corrwith(df2)
@@ -182,8 +182,8 @@ assigned the mean of the ranks (by default) for the group:
.. ipython:: python
- s = pd.Series(np.random.randn(5), index=list('abcde'))
- s['d'] = s['b'] # so there's a tie
+ s = pd.Series(np.random.randn(5), index=list("abcde"))
+ s["d"] = s["b"] # so there's a tie
s.rank()
:meth:`~DataFrame.rank` is also a DataFrame method and can rank either the rows
@@ -244,7 +244,7 @@ objects, :class:`~pandas.core.window.Rolling`, :class:`~pandas.core.window.Expan
.. ipython:: python
s = pd.Series(np.random.randn(1000),
- index=pd.date_range('1/1/2000', periods=1000))
+ index=pd.date_range("1/1/2000", periods=1000))
s = s.cumsum()
s
@@ -279,15 +279,15 @@ We can then call methods on these ``rolling`` objects. These return like-indexed
.. ipython:: python
- s.plot(style='k--')
+ s.plot(style="k--")
@savefig rolling_mean_ex.png
- r.mean().plot(style='k')
+ r.mean().plot(style="k")
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
They can also be applied to DataFrame objects. This is really just syntactic
sugar for applying the moving window operator to all of the DataFrame's columns:
@@ -295,8 +295,8 @@ sugar for applying the moving window operator to all of the DataFrame's columns:
.. ipython:: python
df = pd.DataFrame(np.random.randn(1000, 4),
- index=pd.date_range('1/1/2000', periods=1000),
- columns=['A', 'B', 'C', 'D'])
+ index=pd.date_range("1/1/2000", periods=1000),
+ columns=["A", "B", "C", "D"])
df = df.cumsum()
@savefig rolling_mean_frame.png
@@ -368,7 +368,7 @@ compute the mean absolute deviation on a rolling basis:
return np.fabs(x - x.mean()).mean()
@savefig rolling_apply_ex.png
- s.rolling(window=60).apply(mad, raw=True).plot(style='k')
+ s.rolling(window=60).apply(mad, raw=True).plot(style="k")
Using the Numba engine
~~~~~~~~~~~~~~~~~~~~~~
@@ -377,7 +377,7 @@ Using the Numba engine
Additionally, :meth:`~Rolling.apply` can leverage `Numba <https://numba.pydata.org/>`__
if installed as an optional dependency. The apply aggregation can be executed using Numba by specifying
-``engine='numba'`` and ``engine_kwargs`` arguments (``raw`` must also be set to ``True``).
+``engine="numba"`` and ``engine_kwargs`` arguments (``raw`` must also be set to ``True``).
Numba will be applied in potentially two routines:
1. If ``func`` is a standard Python function, the engine will `JIT <https://numba.pydata.org/numba-doc/latest/user/overview.html>`__
@@ -407,13 +407,13 @@ and their default values are set to ``False``, ``True`` and ``False`` respective
In [3]: def f(x):
...: return np.sum(x) + 5
# Run the first time, compilation time will affect performance
- In [4]: %timeit -r 1 -n 1 roll.apply(f, engine='numba', raw=True) # noqa: E225
+ In [4]: %timeit -r 1 -n 1 roll.apply(f, engine="numba", raw=True) # noqa: E225
1.23 s ± 0 ns per loop (mean ± std. dev. of 1 run, 1 loop each)
# Function is cached and performance will improve
- In [5]: %timeit roll.apply(f, engine='numba', raw=True)
+ In [5]: %timeit roll.apply(f, engine="numba", raw=True)
188 ms ± 1.93 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)
- In [6]: %timeit roll.apply(f, engine='cython', raw=True)
+ In [6]: %timeit roll.apply(f, engine="cython", raw=True)
3.92 s ± 59 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
.. _stats.rolling_window:
@@ -454,22 +454,22 @@ The list of recognized types are the `scipy.signal window functions
.. ipython:: python
ser = pd.Series(np.random.randn(10),
- index=pd.date_range('1/1/2000', periods=10))
+ index=pd.date_range("1/1/2000", periods=10))
- ser.rolling(window=5, win_type='triang').mean()
+ ser.rolling(window=5, win_type="triang").mean()
Note that the ``boxcar`` window is equivalent to :meth:`~Rolling.mean`.
.. ipython:: python
- ser.rolling(window=5, win_type='boxcar').mean()
+ ser.rolling(window=5, win_type="boxcar").mean()
ser.rolling(window=5).mean()
For some windowing functions, additional parameters must be specified:
.. ipython:: python
- ser.rolling(window=5, win_type='gaussian').mean(std=0.1)
+ ser.rolling(window=5, win_type="gaussian").mean(std=0.1)
.. _stats.moments.normalization:
@@ -498,10 +498,10 @@ This can be particularly useful for a non-regular time frequency index.
.. ipython:: python
- dft = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]},
- index=pd.date_range('20130101 09:00:00',
+ dft = pd.DataFrame({"B": [0, 1, 2, np.nan, 4]},
+ index=pd.date_range("20130101 09:00:00",
periods=5,
- freq='s'))
+ freq="s"))
dft
This is a regular frequency index. Using an integer window parameter works to roll along the window frequency.
@@ -515,20 +515,24 @@ Specifying an offset allows a more intuitive specification of the rolling freque
.. ipython:: python
- dft.rolling('2s').sum()
+ dft.rolling("2s").sum()
Using a non-regular, but still monotonic index, rolling with an integer window does not impart any special calculation.
.. ipython:: python
- dft = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]},
- index=pd.Index([pd.Timestamp('20130101 09:00:00'),
- pd.Timestamp('20130101 09:00:02'),
- pd.Timestamp('20130101 09:00:03'),
- pd.Timestamp('20130101 09:00:05'),
- pd.Timestamp('20130101 09:00:06')],
- name='foo'))
+ dft = pd.DataFrame({"B": [0, 1, 2, np.nan, 4]},
+ index=pd.Index(
+ [
+ pd.Timestamp("20130101 09:00:00"),
+ pd.Timestamp("20130101 09:00:02"),
+ pd.Timestamp("20130101 09:00:03"),
+ pd.Timestamp("20130101 09:00:05"),
+ pd.Timestamp("20130101 09:00:06")
+ ],
+ name="foo")
+ )
dft
dft.rolling(2).sum()
@@ -537,7 +541,7 @@ Using the time-specification generates variable windows for this sparse data.
.. ipython:: python
- dft.rolling('2s').sum()
+ dft.rolling("2s").sum()
Furthermore, we now allow an optional ``on`` parameter to specify a column (rather than the
default of the index) in a DataFrame.
@@ -546,7 +550,7 @@ default of the index) in a DataFrame.
dft = dft.reset_index()
dft
- dft.rolling('2s', on='foo').sum()
+ dft.rolling("2s", on="foo").sum()
.. _stats.custom_rolling_window:
@@ -569,7 +573,7 @@ For example, if we have the following ``DataFrame``:
use_expanding = [True, False, True, False, True]
use_expanding
- df = pd.DataFrame({'values': range(5)})
+ df = pd.DataFrame({"values": range(5)})
df
and we want to use an expanding window where ``use_expanding`` is ``True`` otherwise a window of size
@@ -615,7 +619,8 @@ rolling operations over a non-fixed offset like a ``BusinessDay``.
.. ipython:: python
from pandas.api.indexers import VariableOffsetWindowIndexer
- df = pd.DataFrame(range(10), index=pd.date_range('2020', periods=10))
+
+ df = pd.DataFrame(range(10), index=pd.date_range("2020", periods=10))
offset = pd.offsets.BDay(1)
indexer = VariableOffsetWindowIndexer(index=df.index, offset=offset)
df
@@ -631,6 +636,7 @@ forward-looking rolling window, and we can use it as follows:
.. ipython:: ipython
from pandas.api.indexers import FixedForwardWindowIndexer
+
indexer = FixedForwardWindowIndexer(window_size=2)
df.rolling(indexer, min_periods=1).sum()
@@ -657,17 +663,17 @@ from present information back to past information. This allows the rolling windo
.. ipython:: python
- df = pd.DataFrame({'x': 1},
- index=[pd.Timestamp('20130101 09:00:01'),
- pd.Timestamp('20130101 09:00:02'),
- pd.Timestamp('20130101 09:00:03'),
- pd.Timestamp('20130101 09:00:04'),
- pd.Timestamp('20130101 09:00:06')])
+ df = pd.DataFrame({"x": 1},
+ index=[pd.Timestamp("20130101 09:00:01"),
+ pd.Timestamp("20130101 09:00:02"),
+ pd.Timestamp("20130101 09:00:03"),
+ pd.Timestamp("20130101 09:00:04"),
+ pd.Timestamp("20130101 09:00:06")])
- df["right"] = df.rolling('2s', closed='right').x.sum() # default
- df["both"] = df.rolling('2s', closed='both').x.sum()
- df["left"] = df.rolling('2s', closed='left').x.sum()
- df["neither"] = df.rolling('2s', closed='neither').x.sum()
+ df["right"] = df.rolling("2s", closed="right").x.sum() # default
+ df["both"] = df.rolling("2s", closed="both").x.sum()
+ df["left"] = df.rolling("2s", closed="left").x.sum()
+ df["neither"] = df.rolling("2s", closed="neither").x.sum()
df
@@ -746,12 +752,12 @@ For example:
.. ipython:: python
df = pd.DataFrame(np.random.randn(1000, 4),
- index=pd.date_range('1/1/2000', periods=1000),
- columns=['A', 'B', 'C', 'D'])
+ index=pd.date_range("1/1/2000", periods=1000),
+ columns=["A", "B", "C", "D"])
df = df.cumsum()
df2 = df[:20]
- df2.rolling(window=5).corr(df2['B'])
+ df2.rolling(window=5).corr(df2["B"])
.. _stats.moments.corr_pairwise:
@@ -776,14 +782,14 @@ can even be omitted:
.. ipython:: python
- covs = (df[['B', 'C', 'D']].rolling(window=50)
- .cov(df[['A', 'B', 'C']], pairwise=True))
- covs.loc['2002-09-22':]
+ covs = (df[["B", "C", "D"]].rolling(window=50)
+ .cov(df[["A", "B", "C"]], pairwise=True))
+ covs.loc["2002-09-22":]
.. ipython:: python
correls = df.rolling(window=50).corr()
- correls.loc['2002-09-22':]
+ correls.loc["2002-09-22":]
You can efficiently retrieve the time series of correlations between two
columns by reshaping and indexing:
@@ -791,12 +797,12 @@ columns by reshaping and indexing:
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
.. ipython:: python
@savefig rolling_corr_pairwise_ex.png
- correls.unstack(1)[('A', 'C')].plot()
+ correls.unstack(1)[("A", "C")].plot()
.. _stats.aggregate:
@@ -811,8 +817,8 @@ perform multiple computations on the data. These operations are similar to the :
.. ipython:: python
dfa = pd.DataFrame(np.random.randn(1000, 3),
- index=pd.date_range('1/1/2000', periods=1000),
- columns=['A', 'B', 'C'])
+ index=pd.date_range("1/1/2000", periods=1000),
+ columns=["A", "B", "C"])
r = dfa.rolling(window=60, min_periods=1)
r
@@ -823,9 +829,9 @@ Series (or multiple Series) via standard ``__getitem__``.
r.aggregate(np.sum)
- r['A'].aggregate(np.sum)
+ r["A"].aggregate(np.sum)
- r[['A', 'B']].aggregate(np.sum)
+ r[["A", "B"]].aggregate(np.sum)
As you can see, the result of the aggregation will have the selected columns, or all
columns if none are selected.
@@ -840,7 +846,7 @@ aggregation with, outputting a DataFrame:
.. ipython:: python
- r['A'].agg([np.sum, np.mean, np.std])
+ r["A"].agg([np.sum, np.mean, np.std])
On a windowed DataFrame, you can pass a list of functions to apply to each
column, which produces an aggregated result with a hierarchical index:
@@ -860,20 +866,20 @@ columns of a ``DataFrame``:
.. ipython:: python
- r.agg({'A': np.sum, 'B': lambda x: np.std(x, ddof=1)})
+ r.agg({"A": np.sum, "B": lambda x: np.std(x, ddof=1)})
The function names can also be strings. In order for a string to be valid it
must be implemented on the windowed object
.. ipython:: python
- r.agg({'A': 'sum', 'B': 'std'})
+ r.agg({"A": "sum", "B": "std"})
Furthermore you can pass a nested dict to indicate different aggregations on different columns.
.. ipython:: python
- r.agg({'A': ['sum', 'std'], 'B': ['mean', 'std']})
+ r.agg({"A": ["sum", "std"], "B": ["mean", "std"]})
.. _stats.moments.expanding:
@@ -967,7 +973,7 @@ all accept are:
sn.expanding().sum()
sn.cumsum()
- sn.cumsum().fillna(method='ffill')
+ sn.cumsum().fillna(method="ffill")
An expanding window statistic will be more stable (and less responsive) than
@@ -978,14 +984,14 @@ relative impact of an individual data point. As an example, here is the
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
.. ipython:: python
- s.plot(style='k--')
+ s.plot(style="k--")
@savefig expanding_mean_frame.png
- s.expanding().mean().plot(style='k')
+ s.expanding().mean().plot(style="k")
.. _stats.moments.exponentially_weighted:
@@ -1115,10 +1121,10 @@ of ``times``.
.. ipython:: python
- df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})
+ df = pd.DataFrame({"B": [0, 1, 2, np.nan, 4]})
df
- times = ['2020-01-01', '2020-01-03', '2020-01-10', '2020-01-15', '2020-01-17']
- df.ewm(halflife='4 days', times=pd.DatetimeIndex(times)).mean()
+ times = ["2020-01-01", "2020-01-03", "2020-01-10", "2020-01-15", "2020-01-17"]
+ df.ewm(halflife="4 days", times=pd.DatetimeIndex(times)).mean()
The following formula is used to compute exponentially weighted mean with an input vector of times:
@@ -1130,10 +1136,10 @@ Here is an example for a univariate time series:
.. ipython:: python
- s.plot(style='k--')
+ s.plot(style="k--")
@savefig ewma_ex.png
- s.ewm(span=20).mean().plot(style='k')
+ s.ewm(span=20).mean().plot(style="k")
ExponentialMovingWindow has a ``min_periods`` argument, which has the same
meaning it does for all the ``.expanding`` and ``.rolling`` methods:
| update code style computation.rst for #36777 | https://api.github.com/repos/pandas-dev/pandas/pulls/36832 | 2020-10-03T12:01:16Z | 2020-10-03T14:00:50Z | null | 2020-10-03T14:00:50Z |
DOC: reformat doc groupby.rst | diff --git a/doc/source/user_guide/groupby.rst b/doc/source/user_guide/groupby.rst
index 9696f14f03b56..ec64442319a84 100644
--- a/doc/source/user_guide/groupby.rst
+++ b/doc/source/user_guide/groupby.rst
@@ -677,7 +677,7 @@ and unpack the keyword arguments
animals.groupby("kind").agg(
**{
- "total weight": pd.NamedAgg(column="weight", aggfunc=sum),
+ "total weight": pd.NamedAgg(column="weight", aggfunc=sum)
}
)
| ref #36777
| https://api.github.com/repos/pandas-dev/pandas/pulls/36831 | 2020-10-03T11:12:18Z | 2020-10-03T18:09:22Z | 2020-10-03T18:09:22Z | 2020-10-04T22:33:14Z |
DOC: update code style for remaining intro tutorial docs for #36777 computation.rst | diff --git a/doc/source/user_guide/computation.rst b/doc/source/user_guide/computation.rst
index e7edda90610b5..0f323f7796433 100644
--- a/doc/source/user_guide/computation.rst
+++ b/doc/source/user_guide/computation.rst
@@ -64,7 +64,7 @@ series in the DataFrame, also excluding NA/null values.
.. ipython:: python
frame = pd.DataFrame(np.random.randn(1000, 5),
- columns=['a', 'b', 'c', 'd', 'e'])
+ columns=["a", "b", "c", "d", "e"])
frame.cov()
``DataFrame.cov`` also supports an optional ``min_periods`` keyword that
@@ -73,9 +73,9 @@ in order to have a valid result.
.. ipython:: python
- frame = pd.DataFrame(np.random.randn(20, 3), columns=['a', 'b', 'c'])
- frame.loc[frame.index[:5], 'a'] = np.nan
- frame.loc[frame.index[5:10], 'b'] = np.nan
+ frame = pd.DataFrame(np.random.randn(20, 3), columns=["a", "b", "c"])
+ frame.loc[frame.index[:5], "a"] = np.nan
+ frame.loc[frame.index[5:10], "b"] = np.nan
frame.cov()
@@ -117,12 +117,12 @@ Wikipedia has articles covering the above correlation coefficients:
.. ipython:: python
frame = pd.DataFrame(np.random.randn(1000, 5),
- columns=['a', 'b', 'c', 'd', 'e'])
+ columns=["a", "b", "c", "d", "e"])
frame.iloc[::2] = np.nan
# Series with Series
- frame['a'].corr(frame['b'])
- frame['a'].corr(frame['b'], method='spearman')
+ frame["a"].corr(frame["b"])
+ frame["a"].corr(frame["b"], method="spearman")
# Pairwise correlation of DataFrame columns
frame.corr()
@@ -134,9 +134,9 @@ Like ``cov``, ``corr`` also supports the optional ``min_periods`` keyword:
.. ipython:: python
- frame = pd.DataFrame(np.random.randn(20, 3), columns=['a', 'b', 'c'])
- frame.loc[frame.index[:5], 'a'] = np.nan
- frame.loc[frame.index[5:10], 'b'] = np.nan
+ frame = pd.DataFrame(np.random.randn(20, 3), columns=["a", "b", "c"])
+ frame.loc[frame.index[:5], "a"] = np.nan
+ frame.loc[frame.index[5:10], "b"] = np.nan
frame.corr()
@@ -165,8 +165,8 @@ DataFrame objects.
.. ipython:: python
- index = ['a', 'b', 'c', 'd', 'e']
- columns = ['one', 'two', 'three', 'four']
+ index = ["a", "b", "c", "d", "e"]
+ columns = ["one", "two", "three", "four"]
df1 = pd.DataFrame(np.random.randn(5, 4), index=index, columns=columns)
df2 = pd.DataFrame(np.random.randn(4, 4), index=index[:4], columns=columns)
df1.corrwith(df2)
@@ -182,8 +182,8 @@ assigned the mean of the ranks (by default) for the group:
.. ipython:: python
- s = pd.Series(np.random.randn(5), index=list('abcde'))
- s['d'] = s['b'] # so there's a tie
+ s = pd.Series(np.random.randn(5), index=list("abcde"))
+ s["d"] = s["b"] # so there's a tie
s.rank()
:meth:`~DataFrame.rank` is also a DataFrame method and can rank either the rows
@@ -244,7 +244,7 @@ objects, :class:`~pandas.core.window.Rolling`, :class:`~pandas.core.window.Expan
.. ipython:: python
s = pd.Series(np.random.randn(1000),
- index=pd.date_range('1/1/2000', periods=1000))
+ index=pd.date_range("1/1/2000", periods=1000))
s = s.cumsum()
s
@@ -279,15 +279,15 @@ We can then call methods on these ``rolling`` objects. These return like-indexed
.. ipython:: python
- s.plot(style='k--')
+ s.plot(style="k--")
@savefig rolling_mean_ex.png
- r.mean().plot(style='k')
+ r.mean().plot(style="k")
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
They can also be applied to DataFrame objects. This is really just syntactic
sugar for applying the moving window operator to all of the DataFrame's columns:
@@ -295,8 +295,8 @@ sugar for applying the moving window operator to all of the DataFrame's columns:
.. ipython:: python
df = pd.DataFrame(np.random.randn(1000, 4),
- index=pd.date_range('1/1/2000', periods=1000),
- columns=['A', 'B', 'C', 'D'])
+ index=pd.date_range("1/1/2000", periods=1000),
+ columns=["A", "B", "C", "D"])
df = df.cumsum()
@savefig rolling_mean_frame.png
@@ -368,7 +368,7 @@ compute the mean absolute deviation on a rolling basis:
return np.fabs(x - x.mean()).mean()
@savefig rolling_apply_ex.png
- s.rolling(window=60).apply(mad, raw=True).plot(style='k')
+ s.rolling(window=60).apply(mad, raw=True).plot(style="k")
Using the Numba engine
~~~~~~~~~~~~~~~~~~~~~~
@@ -377,7 +377,7 @@ Using the Numba engine
Additionally, :meth:`~Rolling.apply` can leverage `Numba <https://numba.pydata.org/>`__
if installed as an optional dependency. The apply aggregation can be executed using Numba by specifying
-``engine='numba'`` and ``engine_kwargs`` arguments (``raw`` must also be set to ``True``).
+``engine="numba"`` and ``engine_kwargs`` arguments (``raw`` must also be set to ``True``).
Numba will be applied in potentially two routines:
1. If ``func`` is a standard Python function, the engine will `JIT <https://numba.pydata.org/numba-doc/latest/user/overview.html>`__
@@ -407,13 +407,13 @@ and their default values are set to ``False``, ``True`` and ``False`` respective
In [3]: def f(x):
...: return np.sum(x) + 5
# Run the first time, compilation time will affect performance
- In [4]: %timeit -r 1 -n 1 roll.apply(f, engine='numba', raw=True) # noqa: E225
+ In [4]: %timeit -r 1 -n 1 roll.apply(f, engine="numba", raw=True) # noqa: E225
1.23 s ± 0 ns per loop (mean ± std. dev. of 1 run, 1 loop each)
# Function is cached and performance will improve
- In [5]: %timeit roll.apply(f, engine='numba', raw=True)
+ In [5]: %timeit roll.apply(f, engine="numba", raw=True)
188 ms ± 1.93 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)
- In [6]: %timeit roll.apply(f, engine='cython', raw=True)
+ In [6]: %timeit roll.apply(f, engine="cython", raw=True)
3.92 s ± 59 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
.. _stats.rolling_window:
@@ -454,22 +454,22 @@ The list of recognized types are the `scipy.signal window functions
.. ipython:: python
ser = pd.Series(np.random.randn(10),
- index=pd.date_range('1/1/2000', periods=10))
+ index=pd.date_range("1/1/2000", periods=10))
- ser.rolling(window=5, win_type='triang').mean()
+ ser.rolling(window=5, win_type="triang").mean()
Note that the ``boxcar`` window is equivalent to :meth:`~Rolling.mean`.
.. ipython:: python
- ser.rolling(window=5, win_type='boxcar').mean()
+ ser.rolling(window=5, win_type="boxcar").mean()
ser.rolling(window=5).mean()
For some windowing functions, additional parameters must be specified:
.. ipython:: python
- ser.rolling(window=5, win_type='gaussian').mean(std=0.1)
+ ser.rolling(window=5, win_type="gaussian").mean(std=0.1)
.. _stats.moments.normalization:
@@ -498,10 +498,10 @@ This can be particularly useful for a non-regular time frequency index.
.. ipython:: python
- dft = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]},
- index=pd.date_range('20130101 09:00:00',
+ dft = pd.DataFrame({"B": [0, 1, 2, np.nan, 4]},
+ index=pd.date_range("20130101 09:00:00",
periods=5,
- freq='s'))
+ freq="s"))
dft
This is a regular frequency index. Using an integer window parameter works to roll along the window frequency.
@@ -515,20 +515,24 @@ Specifying an offset allows a more intuitive specification of the rolling freque
.. ipython:: python
- dft.rolling('2s').sum()
+ dft.rolling("2s").sum()
Using a non-regular, but still monotonic index, rolling with an integer window does not impart any special calculation.
.. ipython:: python
- dft = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]},
- index=pd.Index([pd.Timestamp('20130101 09:00:00'),
- pd.Timestamp('20130101 09:00:02'),
- pd.Timestamp('20130101 09:00:03'),
- pd.Timestamp('20130101 09:00:05'),
- pd.Timestamp('20130101 09:00:06')],
- name='foo'))
+ dft = pd.DataFrame({"B": [0, 1, 2, np.nan, 4]},
+ index=pd.Index(
+ [
+ pd.Timestamp("20130101 09:00:00"),
+ pd.Timestamp("20130101 09:00:02"),
+ pd.Timestamp("20130101 09:00:03"),
+ pd.Timestamp("20130101 09:00:05"),
+ pd.Timestamp("20130101 09:00:06")
+ ],
+ name="foo")
+ )
dft
dft.rolling(2).sum()
@@ -537,7 +541,7 @@ Using the time-specification generates variable windows for this sparse data.
.. ipython:: python
- dft.rolling('2s').sum()
+ dft.rolling("2s").sum()
Furthermore, we now allow an optional ``on`` parameter to specify a column (rather than the
default of the index) in a DataFrame.
@@ -546,7 +550,7 @@ default of the index) in a DataFrame.
dft = dft.reset_index()
dft
- dft.rolling('2s', on='foo').sum()
+ dft.rolling("2s", on="foo").sum()
.. _stats.custom_rolling_window:
@@ -569,7 +573,7 @@ For example, if we have the following ``DataFrame``:
use_expanding = [True, False, True, False, True]
use_expanding
- df = pd.DataFrame({'values': range(5)})
+ df = pd.DataFrame({"values": range(5)})
df
and we want to use an expanding window where ``use_expanding`` is ``True`` otherwise a window of size
@@ -615,7 +619,8 @@ rolling operations over a non-fixed offset like a ``BusinessDay``.
.. ipython:: python
from pandas.api.indexers import VariableOffsetWindowIndexer
- df = pd.DataFrame(range(10), index=pd.date_range('2020', periods=10))
+
+ df = pd.DataFrame(range(10), index=pd.date_range("2020", periods=10))
offset = pd.offsets.BDay(1)
indexer = VariableOffsetWindowIndexer(index=df.index, offset=offset)
df
@@ -631,6 +636,7 @@ forward-looking rolling window, and we can use it as follows:
.. ipython:: ipython
from pandas.api.indexers import FixedForwardWindowIndexer
+
indexer = FixedForwardWindowIndexer(window_size=2)
df.rolling(indexer, min_periods=1).sum()
@@ -657,17 +663,17 @@ from present information back to past information. This allows the rolling windo
.. ipython:: python
- df = pd.DataFrame({'x': 1},
- index=[pd.Timestamp('20130101 09:00:01'),
- pd.Timestamp('20130101 09:00:02'),
- pd.Timestamp('20130101 09:00:03'),
- pd.Timestamp('20130101 09:00:04'),
- pd.Timestamp('20130101 09:00:06')])
+ df = pd.DataFrame({"x": 1},
+ index=[pd.Timestamp("20130101 09:00:01"),
+ pd.Timestamp("20130101 09:00:02"),
+ pd.Timestamp("20130101 09:00:03"),
+ pd.Timestamp("20130101 09:00:04"),
+ pd.Timestamp("20130101 09:00:06")])
- df["right"] = df.rolling('2s', closed='right').x.sum() # default
- df["both"] = df.rolling('2s', closed='both').x.sum()
- df["left"] = df.rolling('2s', closed='left').x.sum()
- df["neither"] = df.rolling('2s', closed='neither').x.sum()
+ df["right"] = df.rolling("2s", closed="right").x.sum() # default
+ df["both"] = df.rolling("2s", closed="both").x.sum()
+ df["left"] = df.rolling("2s", closed="left").x.sum()
+ df["neither"] = df.rolling("2s", closed="neither").x.sum()
df
@@ -746,12 +752,12 @@ For example:
.. ipython:: python
df = pd.DataFrame(np.random.randn(1000, 4),
- index=pd.date_range('1/1/2000', periods=1000),
- columns=['A', 'B', 'C', 'D'])
+ index=pd.date_range("1/1/2000", periods=1000),
+ columns=["A", "B", "C", "D"])
df = df.cumsum()
df2 = df[:20]
- df2.rolling(window=5).corr(df2['B'])
+ df2.rolling(window=5).corr(df2["B"])
.. _stats.moments.corr_pairwise:
@@ -776,14 +782,14 @@ can even be omitted:
.. ipython:: python
- covs = (df[['B', 'C', 'D']].rolling(window=50)
- .cov(df[['A', 'B', 'C']], pairwise=True))
- covs.loc['2002-09-22':]
+ covs = (df[["B", "C", "D"]].rolling(window=50)
+ .cov(df[["A", "B", "C"]], pairwise=True))
+ covs.loc["2002-09-22":]
.. ipython:: python
correls = df.rolling(window=50).corr()
- correls.loc['2002-09-22':]
+ correls.loc["2002-09-22":]
You can efficiently retrieve the time series of correlations between two
columns by reshaping and indexing:
@@ -791,12 +797,12 @@ columns by reshaping and indexing:
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
.. ipython:: python
@savefig rolling_corr_pairwise_ex.png
- correls.unstack(1)[('A', 'C')].plot()
+ correls.unstack(1)[("A", "C")].plot()
.. _stats.aggregate:
@@ -811,8 +817,8 @@ perform multiple computations on the data. These operations are similar to the :
.. ipython:: python
dfa = pd.DataFrame(np.random.randn(1000, 3),
- index=pd.date_range('1/1/2000', periods=1000),
- columns=['A', 'B', 'C'])
+ index=pd.date_range("1/1/2000", periods=1000),
+ columns=["A", "B", "C"])
r = dfa.rolling(window=60, min_periods=1)
r
@@ -823,9 +829,9 @@ Series (or multiple Series) via standard ``__getitem__``.
r.aggregate(np.sum)
- r['A'].aggregate(np.sum)
+ r["A"].aggregate(np.sum)
- r[['A', 'B']].aggregate(np.sum)
+ r[["A", "B"]].aggregate(np.sum)
As you can see, the result of the aggregation will have the selected columns, or all
columns if none are selected.
@@ -840,7 +846,7 @@ aggregation with, outputting a DataFrame:
.. ipython:: python
- r['A'].agg([np.sum, np.mean, np.std])
+ r["A"].agg([np.sum, np.mean, np.std])
On a windowed DataFrame, you can pass a list of functions to apply to each
column, which produces an aggregated result with a hierarchical index:
@@ -860,20 +866,20 @@ columns of a ``DataFrame``:
.. ipython:: python
- r.agg({'A': np.sum, 'B': lambda x: np.std(x, ddof=1)})
+ r.agg({"A": np.sum, "B": lambda x: np.std(x, ddof=1)})
The function names can also be strings. In order for a string to be valid it
must be implemented on the windowed object
.. ipython:: python
- r.agg({'A': 'sum', 'B': 'std'})
+ r.agg({"A": "sum", "B": "std"})
Furthermore you can pass a nested dict to indicate different aggregations on different columns.
.. ipython:: python
- r.agg({'A': ['sum', 'std'], 'B': ['mean', 'std']})
+ r.agg({"A": ["sum", "std"], "B": ["mean", "std"]})
.. _stats.moments.expanding:
@@ -967,7 +973,7 @@ all accept are:
sn.expanding().sum()
sn.cumsum()
- sn.cumsum().fillna(method='ffill')
+ sn.cumsum().fillna(method="ffill")
An expanding window statistic will be more stable (and less responsive) than
@@ -978,14 +984,14 @@ relative impact of an individual data point. As an example, here is the
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
.. ipython:: python
- s.plot(style='k--')
+ s.plot(style="k--")
@savefig expanding_mean_frame.png
- s.expanding().mean().plot(style='k')
+ s.expanding().mean().plot(style="k")
.. _stats.moments.exponentially_weighted:
@@ -1115,10 +1121,10 @@ of ``times``.
.. ipython:: python
- df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})
+ df = pd.DataFrame({"B": [0, 1, 2, np.nan, 4]})
df
- times = ['2020-01-01', '2020-01-03', '2020-01-10', '2020-01-15', '2020-01-17']
- df.ewm(halflife='4 days', times=pd.DatetimeIndex(times)).mean()
+ times = ["2020-01-01", "2020-01-03", "2020-01-10", "2020-01-15", "2020-01-17"]
+ df.ewm(halflife="4 days", times=pd.DatetimeIndex(times)).mean()
The following formula is used to compute exponentially weighted mean with an input vector of times:
@@ -1130,10 +1136,10 @@ Here is an example for a univariate time series:
.. ipython:: python
- s.plot(style='k--')
+ s.plot(style="k--")
@savefig ewma_ex.png
- s.ewm(span=20).mean().plot(style='k')
+ s.ewm(span=20).mean().plot(style="k")
ExponentialMovingWindow has a ``min_periods`` argument, which has the same
meaning it does for all the ``.expanding`` and ``.rolling`` methods:
| https://api.github.com/repos/pandas-dev/pandas/pulls/36829 | 2020-10-03T10:39:00Z | 2020-10-03T14:00:04Z | null | 2020-10-03T14:00:04Z | |
CI add end-of-file-fixer | diff --git a/.github/CODE_OF_CONDUCT.md b/.github/CODE_OF_CONDUCT.md
index 7dd2e04249492..87a5b7905fc6d 100644
--- a/.github/CODE_OF_CONDUCT.md
+++ b/.github/CODE_OF_CONDUCT.md
@@ -60,4 +60,3 @@ and the [Swift Code of Conduct][swift].
[homepage]: https://www.contributor-covenant.org
[version]: https://www.contributor-covenant.org/version/1/3/0/
[swift]: https://swift.org/community/#code-of-conduct
-
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index d0c9f12614d0d..64d477507fab7 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -47,3 +47,8 @@ repos:
rev: v1.2.2
hooks:
- id: yesqa
+- repo: https://github.com/pre-commit/pre-commit-hooks
+ rev: v3.2.0
+ hooks:
+ - id: end-of-file-fixer
+ exclude: '.html$|^LICENSES/|.csv$|.txt$|.svg$|.py$'
diff --git a/AUTHORS.md b/AUTHORS.md
index f576e333f9448..84fcfe05e3043 100644
--- a/AUTHORS.md
+++ b/AUTHORS.md
@@ -54,4 +54,3 @@ pandas is distributed under a 3-clause ("Simplified" or "New") BSD
license. Parts of NumPy, SciPy, numpydoc, bottleneck, which all have
BSD-compatible licenses, are included. Their licenses follow the pandas
license.
-
diff --git a/ci/travis_process_gbq_encryption.sh b/ci/travis_process_gbq_encryption.sh
index fccf8e1e8deff..b5118ad5defc6 100755
--- a/ci/travis_process_gbq_encryption.sh
+++ b/ci/travis_process_gbq_encryption.sh
@@ -10,4 +10,3 @@ elif [[ -n ${!TRAVIS_IV_ENV} ]]; then
export GBQ_PROJECT_ID='pandas-gbq-tests';
echo 'Successfully decrypted gbq credentials'
fi
-
diff --git a/doc/data/iris.data b/doc/data/iris.data
index c19b9c3688515..026e214e5f754 100644
--- a/doc/data/iris.data
+++ b/doc/data/iris.data
@@ -148,4 +148,4 @@ SepalLength,SepalWidth,PetalLength,PetalWidth,Name
6.3,2.5,5.0,1.9,Iris-virginica
6.5,3.0,5.2,2.0,Iris-virginica
6.2,3.4,5.4,2.3,Iris-virginica
-5.9,3.0,5.1,1.8,Iris-virginica
\ No newline at end of file
+5.9,3.0,5.1,1.8,Iris-virginica
diff --git a/doc/source/development/contributing.rst b/doc/source/development/contributing.rst
index d6955c5d4b8d2..27b743f6452db 100644
--- a/doc/source/development/contributing.rst
+++ b/doc/source/development/contributing.rst
@@ -837,6 +837,9 @@ to run its checks by running::
without having to have done ``pre-commit install`` beforehand.
+Note that if you have conflicting installations of ``virtualenv``, then you may get an
+error - see `here <https://github.com/pypa/virtualenv/issues/1875>`_.
+
Backwards compatibility
~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/source/development/developer.rst b/doc/source/development/developer.rst
index bdbcf5ca337b8..d701208792a4c 100644
--- a/doc/source/development/developer.rst
+++ b/doc/source/development/developer.rst
@@ -184,4 +184,4 @@ As an example of fully-formed metadata:
'creator': {
'library': 'pyarrow',
'version': '0.13.0'
- }}
\ No newline at end of file
+ }}
diff --git a/doc/source/getting_started/intro_tutorials/index.rst b/doc/source/getting_started/intro_tutorials/index.rst
index 28e7610866461..c67e18043c175 100644
--- a/doc/source/getting_started/intro_tutorials/index.rst
+++ b/doc/source/getting_started/intro_tutorials/index.rst
@@ -19,4 +19,3 @@ Getting started tutorials
08_combine_dataframes
09_timeseries
10_text_data
-
diff --git a/doc/source/getting_started/overview.rst b/doc/source/getting_started/overview.rst
index 57d87d4ec8a91..613856472a045 100644
--- a/doc/source/getting_started/overview.rst
+++ b/doc/source/getting_started/overview.rst
@@ -174,4 +174,3 @@ License
-------
.. literalinclude:: ../../../LICENSE
-
diff --git a/doc/source/reference/general_utility_functions.rst b/doc/source/reference/general_utility_functions.rst
index 3cba0a81a7011..37fe980dbf68c 100644
--- a/doc/source/reference/general_utility_functions.rst
+++ b/doc/source/reference/general_utility_functions.rst
@@ -122,4 +122,3 @@ Bug report function
:toctree: api/
show_versions
-
diff --git a/doc/source/whatsnew/v0.14.0.rst b/doc/source/whatsnew/v0.14.0.rst
index 421ef81427210..f2401c812a979 100644
--- a/doc/source/whatsnew/v0.14.0.rst
+++ b/doc/source/whatsnew/v0.14.0.rst
@@ -1084,4 +1084,4 @@ Bug fixes
Contributors
~~~~~~~~~~~~
-.. contributors:: v0.13.1..v0.14.0
\ No newline at end of file
+.. contributors:: v0.13.1..v0.14.0
diff --git a/flake8/cython-template.cfg b/flake8/cython-template.cfg
index 61562bd7701b1..3d7b288fd8055 100644
--- a/flake8/cython-template.cfg
+++ b/flake8/cython-template.cfg
@@ -1,4 +1,3 @@
[flake8]
filename = *.pxi.in
select = E501,E302,E203,E111,E114,E221,E303,E231,E126,F403
-
diff --git a/pandas/_libs/src/klib/khash_python.h b/pandas/_libs/src/klib/khash_python.h
index 82251744915a5..2b46d30c3adb6 100644
--- a/pandas/_libs/src/klib/khash_python.h
+++ b/pandas/_libs/src/klib/khash_python.h
@@ -121,4 +121,4 @@ void PANDAS_INLINE kh_destroy_str_starts(kh_str_starts_t* table) {
void PANDAS_INLINE kh_resize_str_starts(kh_str_starts_t* table, khint_t val) {
kh_resize_str(table->table, val);
-}
\ No newline at end of file
+}
diff --git a/pandas/_libs/util.pxd b/pandas/_libs/util.pxd
index 828bccf7d5641..5f234910deede 100644
--- a/pandas/_libs/util.pxd
+++ b/pandas/_libs/util.pxd
@@ -48,4 +48,3 @@ cdef inline void set_array_not_contiguous(ndarray ao) nogil:
# ao->flags &= ~(NPY_ARRAY_C_CONTIGUOUS | NPY_ARRAY_F_CONTIGUOUS);
PyArray_CLEARFLAGS(ao,
(NPY_ARRAY_C_CONTIGUOUS | NPY_ARRAY_F_CONTIGUOUS))
-
diff --git a/pandas/tests/io/json/data/tsframe_iso_v012.json b/pandas/tests/io/json/data/tsframe_iso_v012.json
index bd9ff885ad23a..5fa01d5cd902d 100644
--- a/pandas/tests/io/json/data/tsframe_iso_v012.json
+++ b/pandas/tests/io/json/data/tsframe_iso_v012.json
@@ -1 +1 @@
-{"A":{"2000-01-03T00:00:00":1.56808523,"2000-01-04T00:00:00":-0.2550111,"2000-01-05T00:00:00":1.51493992,"2000-01-06T00:00:00":-0.02765498,"2000-01-07T00:00:00":0.05951614},"B":{"2000-01-03T00:00:00":0.65727391,"2000-01-04T00:00:00":-0.08072427,"2000-01-05T00:00:00":0.11805825,"2000-01-06T00:00:00":0.44679743,"2000-01-07T00:00:00":-2.69652057},"C":{"2000-01-03T00:00:00":1.81021139,"2000-01-04T00:00:00":-0.03202878,"2000-01-05T00:00:00":1.629455,"2000-01-06T00:00:00":0.33192641,"2000-01-07T00:00:00":1.28163262},"D":{"2000-01-03T00:00:00":-0.17251653,"2000-01-04T00:00:00":-0.17581665,"2000-01-05T00:00:00":-1.31506612,"2000-01-06T00:00:00":-0.27885413,"2000-01-07T00:00:00":0.34703478},"date":{"2000-01-03T00:00:00":"1992-01-06T18:21:32.120000","2000-01-04T00:00:00":"1992-01-06T18:21:32.120000","2000-01-05T00:00:00":"1992-01-06T18:21:32.120000","2000-01-06T00:00:00":"2013-01-01T00:00:00","2000-01-07T00:00:00":"1992-01-06T18:21:32.120000"}}
\ No newline at end of file
+{"A":{"2000-01-03T00:00:00":1.56808523,"2000-01-04T00:00:00":-0.2550111,"2000-01-05T00:00:00":1.51493992,"2000-01-06T00:00:00":-0.02765498,"2000-01-07T00:00:00":0.05951614},"B":{"2000-01-03T00:00:00":0.65727391,"2000-01-04T00:00:00":-0.08072427,"2000-01-05T00:00:00":0.11805825,"2000-01-06T00:00:00":0.44679743,"2000-01-07T00:00:00":-2.69652057},"C":{"2000-01-03T00:00:00":1.81021139,"2000-01-04T00:00:00":-0.03202878,"2000-01-05T00:00:00":1.629455,"2000-01-06T00:00:00":0.33192641,"2000-01-07T00:00:00":1.28163262},"D":{"2000-01-03T00:00:00":-0.17251653,"2000-01-04T00:00:00":-0.17581665,"2000-01-05T00:00:00":-1.31506612,"2000-01-06T00:00:00":-0.27885413,"2000-01-07T00:00:00":0.34703478},"date":{"2000-01-03T00:00:00":"1992-01-06T18:21:32.120000","2000-01-04T00:00:00":"1992-01-06T18:21:32.120000","2000-01-05T00:00:00":"1992-01-06T18:21:32.120000","2000-01-06T00:00:00":"2013-01-01T00:00:00","2000-01-07T00:00:00":"1992-01-06T18:21:32.120000"}}
diff --git a/pandas/tests/io/json/data/tsframe_v012.json b/pandas/tests/io/json/data/tsframe_v012.json
index d4474c767855c..1d6a0a45c028e 100644
--- a/pandas/tests/io/json/data/tsframe_v012.json
+++ b/pandas/tests/io/json/data/tsframe_v012.json
@@ -1 +1 @@
-{"A":{"946857600000000000":1.56808523,"946944000000000000":-0.2550111,"947030400000000000":1.51493992,"947116800000000000":-0.02765498,"947203200000000000":0.05951614},"B":{"946857600000000000":0.65727391,"946944000000000000":-0.08072427,"947030400000000000":0.11805825,"947116800000000000":0.44679743,"947203200000000000":-2.69652057},"C":{"946857600000000000":1.81021139,"946944000000000000":-0.03202878,"947030400000000000":1.629455,"947116800000000000":0.33192641,"947203200000000000":1.28163262},"D":{"946857600000000000":-0.17251653,"946944000000000000":-0.17581665,"947030400000000000":-1.31506612,"947116800000000000":-0.27885413,"947203200000000000":0.34703478},"date":{"946857600000000000":694722092120000000,"946944000000000000":694722092120000000,"947030400000000000":694722092120000000,"947116800000000000":1356998400000000000,"947203200000000000":694722092120000000},"modified":{"946857600000000000":694722092120000000,"946944000000000000":null,"947030400000000000":694722092120000000,"947116800000000000":1356998400000000000,"947203200000000000":694722092120000000}}
\ No newline at end of file
+{"A":{"946857600000000000":1.56808523,"946944000000000000":-0.2550111,"947030400000000000":1.51493992,"947116800000000000":-0.02765498,"947203200000000000":0.05951614},"B":{"946857600000000000":0.65727391,"946944000000000000":-0.08072427,"947030400000000000":0.11805825,"947116800000000000":0.44679743,"947203200000000000":-2.69652057},"C":{"946857600000000000":1.81021139,"946944000000000000":-0.03202878,"947030400000000000":1.629455,"947116800000000000":0.33192641,"947203200000000000":1.28163262},"D":{"946857600000000000":-0.17251653,"946944000000000000":-0.17581665,"947030400000000000":-1.31506612,"947116800000000000":-0.27885413,"947203200000000000":0.34703478},"date":{"946857600000000000":694722092120000000,"946944000000000000":694722092120000000,"947030400000000000":694722092120000000,"947116800000000000":1356998400000000000,"947203200000000000":694722092120000000},"modified":{"946857600000000000":694722092120000000,"946944000000000000":null,"947030400000000000":694722092120000000,"947116800000000000":1356998400000000000,"947203200000000000":694722092120000000}}
diff --git a/requirements-dev.txt b/requirements-dev.txt
index 8f3dd20f309aa..e6346e6c38694 100644
--- a/requirements-dev.txt
+++ b/requirements-dev.txt
@@ -79,4 +79,4 @@ tabulate>=0.8.3
natsort
git+https://github.com/pandas-dev/pydata-sphinx-theme.git@master
git+https://github.com/numpy/numpydoc
-pyflakes>=2.2.0
\ No newline at end of file
+pyflakes>=2.2.0
diff --git a/scripts/generate_pip_deps_from_conda.py b/scripts/generate_pip_deps_from_conda.py
index c417f58f6bf1b..c6d00eb58a969 100755
--- a/scripts/generate_pip_deps_from_conda.py
+++ b/scripts/generate_pip_deps_from_conda.py
@@ -94,7 +94,7 @@ def main(conda_fname, pip_fname, compare=False):
f"# This file is auto-generated from {fname}, do not modify.\n"
"# See that file for comments about the need/usage of each dependency.\n\n"
)
- pip_content = header + "\n".join(pip_deps)
+ pip_content = header + "\n".join(pip_deps) + "\n"
if compare:
with open(pip_fname) as pip_fd:
diff --git a/setup.cfg b/setup.cfg
index 73986f692b6cd..3f9842b4ed0b3 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -305,4 +305,3 @@ check_untyped_defs=False
[mypy-pandas.util._decorators]
check_untyped_defs=False
-
diff --git a/web/pandas/community/coc.md b/web/pandas/community/coc.md
index d2af9c3fdd25b..f6d0c3543840e 100644
--- a/web/pandas/community/coc.md
+++ b/web/pandas/community/coc.md
@@ -63,4 +63,3 @@ and the [Swift Code of Conduct][swift].
[homepage]: https://www.contributor-covenant.org
[version]: https://www.contributor-covenant.org/version/1/3/0/
[swift]: https://swift.org/community/#code-of-conduct
-
| xref https://github.com/pandas-dev/pandas/pull/36600#issuecomment-702976850
> ok if you'd fix the EOF issue can merge (and yes good idea to add to the pre-commit hook / separate PR pls)
Note: in addition to adding the end-of-file-fixer pre-commit hook, I applied the following
```diff
diff --git a/scripts/generate_pip_deps_from_conda.py b/scripts/generate_pip_deps_from_conda.py
index c417f58f6..c6d00eb58 100755
--- a/scripts/generate_pip_deps_from_conda.py
+++ b/scripts/generate_pip_deps_from_conda.py
@@ -94,7 +94,7 @@ def main(conda_fname, pip_fname, compare=False):
f"# This file is auto-generated from {fname}, do not modify.\n"
"# See that file for comments about the need/usage of each dependency.\n\n"
)
- pip_content = header + "\n".join(pip_deps)
+ pip_content = header + "\n".join(pip_deps) + "\n"
if compare:
with open(pip_fname) as pip_fd:
```
----
I don't know how many of these we want to apply this to, for now I'm just excluding licenses (which I don't think should be touched?), html, txt, svg,and csv files | https://api.github.com/repos/pandas-dev/pandas/pulls/36826 | 2020-10-03T06:43:25Z | 2020-10-06T22:54:42Z | 2020-10-06T22:54:42Z | 2020-10-07T07:26:31Z |
DOC: Fix extending.rst code style #36777 | diff --git a/doc/source/development/extending.rst b/doc/source/development/extending.rst
index c708ebb361ed1..afdd2c2a0571e 100644
--- a/doc/source/development/extending.rst
+++ b/doc/source/development/extending.rst
@@ -34,7 +34,7 @@ decorate a class, providing the name of attribute to add. The class's
@staticmethod
def _validate(obj):
# verify there is a column latitude and a column longitude
- if 'latitude' not in obj.columns or 'longitude' not in obj.columns:
+ if "latitude" not in obj.columns or "longitude" not in obj.columns:
raise AttributeError("Must have 'latitude' and 'longitude'.")
@property
@@ -50,8 +50,7 @@ decorate a class, providing the name of attribute to add. The class's
Now users can access your methods using the ``geo`` namespace:
- >>> ds = pd.DataFrame({'longitude': np.linspace(0, 10),
- ... 'latitude': np.linspace(0, 20)})
+ >>> ds = pd.DataFrame({"longitude": np.linspace(0, 10), "latitude": np.linspace(0, 20)})
>>> ds.geo.center
(5.0, 10.0)
>>> ds.geo.plot()
@@ -176,6 +175,7 @@ your ``MyExtensionArray`` class, as follows:
from pandas.api.extensions import ExtensionArray, ExtensionScalarOpsMixin
+
class MyExtensionArray(ExtensionArray, ExtensionScalarOpsMixin):
pass
@@ -271,6 +271,7 @@ included as a column in a pandas DataFrame):
def __arrow_array__(self, type=None):
# convert the underlying array values to a pyarrow Array
import pyarrow
+
return pyarrow.array(..., type=type)
The ``ExtensionDtype.__from_arrow__`` method then controls the conversion
@@ -347,7 +348,6 @@ Below example shows how to define ``SubclassedSeries`` and ``SubclassedDataFrame
.. code-block:: python
class SubclassedSeries(pd.Series):
-
@property
def _constructor(self):
return SubclassedSeries
@@ -358,7 +358,6 @@ Below example shows how to define ``SubclassedSeries`` and ``SubclassedDataFrame
class SubclassedDataFrame(pd.DataFrame):
-
@property
def _constructor(self):
return SubclassedDataFrame
@@ -377,7 +376,7 @@ Below example shows how to define ``SubclassedSeries`` and ``SubclassedDataFrame
>>> type(to_framed)
<class '__main__.SubclassedDataFrame'>
- >>> df = SubclassedDataFrame({'A': [1, 2, 3], 'B': [4, 5, 6], 'C': [7, 8, 9]})
+ >>> df = SubclassedDataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]})
>>> df
A B C
0 1 4 7
@@ -387,7 +386,7 @@ Below example shows how to define ``SubclassedSeries`` and ``SubclassedDataFrame
>>> type(df)
<class '__main__.SubclassedDataFrame'>
- >>> sliced1 = df[['A', 'B']]
+ >>> sliced1 = df[["A", "B"]]
>>> sliced1
A B
0 1 4
@@ -397,7 +396,7 @@ Below example shows how to define ``SubclassedSeries`` and ``SubclassedDataFrame
>>> type(sliced1)
<class '__main__.SubclassedDataFrame'>
- >>> sliced2 = df['A']
+ >>> sliced2 = df["A"]
>>> sliced2
0 1
1 2
@@ -422,11 +421,11 @@ Below is an example to define two original properties, "internal_cache" as a tem
class SubclassedDataFrame2(pd.DataFrame):
# temporary properties
- _internal_names = pd.DataFrame._internal_names + ['internal_cache']
+ _internal_names = pd.DataFrame._internal_names + ["internal_cache"]
_internal_names_set = set(_internal_names)
# normal properties
- _metadata = ['added_property']
+ _metadata = ["added_property"]
@property
def _constructor(self):
@@ -434,15 +433,15 @@ Below is an example to define two original properties, "internal_cache" as a tem
.. code-block:: python
- >>> df = SubclassedDataFrame2({'A': [1, 2, 3], 'B': [4, 5, 6], 'C': [7, 8, 9]})
+ >>> df = SubclassedDataFrame2({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]})
>>> df
A B C
0 1 4 7
1 2 5 8
2 3 6 9
- >>> df.internal_cache = 'cached'
- >>> df.added_property = 'property'
+ >>> df.internal_cache = "cached"
+ >>> df.added_property = "property"
>>> df.internal_cache
cached
@@ -450,11 +449,11 @@ Below is an example to define two original properties, "internal_cache" as a tem
property
# properties defined in _internal_names is reset after manipulation
- >>> df[['A', 'B']].internal_cache
+ >>> df[["A", "B"]].internal_cache
AttributeError: 'SubclassedDataFrame2' object has no attribute 'internal_cache'
# properties defined in _metadata are retained
- >>> df[['A', 'B']].added_property
+ >>> df[["A", "B"]].added_property
property
.. _extending.plotting-backends:
@@ -468,7 +467,7 @@ one based on Matplotlib. For example:
.. code-block:: python
- >>> pd.set_option('plotting.backend', 'backend.module')
+ >>> pd.set_option("plotting.backend", "backend.module")
>>> pd.Series([1, 2, 3]).plot()
This would be more or less equivalent to:
@@ -499,4 +498,4 @@ registers the default "matplotlib" backend as follows.
More information on how to implement a third-party plotting backend can be found at
-https://github.com/pandas-dev/pandas/blob/master/pandas/plotting/__init__.py#L1.
\ No newline at end of file
+https://github.com/pandas-dev/pandas/blob/master/pandas/plotting/__init__.py#L1.
diff --git a/doc/source/user_guide/computation.rst b/doc/source/user_guide/computation.rst
index e7edda90610b5..2f6ac6b06d85e 100644
--- a/doc/source/user_guide/computation.rst
+++ b/doc/source/user_guide/computation.rst
@@ -63,8 +63,7 @@ series in the DataFrame, also excluding NA/null values.
.. ipython:: python
- frame = pd.DataFrame(np.random.randn(1000, 5),
- columns=['a', 'b', 'c', 'd', 'e'])
+ frame = pd.DataFrame(np.random.randn(1000, 5), columns=["a", "b", "c", "d", "e"])
frame.cov()
``DataFrame.cov`` also supports an optional ``min_periods`` keyword that
@@ -73,9 +72,9 @@ in order to have a valid result.
.. ipython:: python
- frame = pd.DataFrame(np.random.randn(20, 3), columns=['a', 'b', 'c'])
- frame.loc[frame.index[:5], 'a'] = np.nan
- frame.loc[frame.index[5:10], 'b'] = np.nan
+ frame = pd.DataFrame(np.random.randn(20, 3), columns=["a", "b", "c"])
+ frame.loc[frame.index[:5], "a"] = np.nan
+ frame.loc[frame.index[5:10], "b"] = np.nan
frame.cov()
@@ -116,13 +115,12 @@ Wikipedia has articles covering the above correlation coefficients:
.. ipython:: python
- frame = pd.DataFrame(np.random.randn(1000, 5),
- columns=['a', 'b', 'c', 'd', 'e'])
+ frame = pd.DataFrame(np.random.randn(1000, 5), columns=["a", "b", "c", "d", "e"])
frame.iloc[::2] = np.nan
# Series with Series
- frame['a'].corr(frame['b'])
- frame['a'].corr(frame['b'], method='spearman')
+ frame["a"].corr(frame["b"])
+ frame["a"].corr(frame["b"], method="spearman")
# Pairwise correlation of DataFrame columns
frame.corr()
@@ -134,9 +132,9 @@ Like ``cov``, ``corr`` also supports the optional ``min_periods`` keyword:
.. ipython:: python
- frame = pd.DataFrame(np.random.randn(20, 3), columns=['a', 'b', 'c'])
- frame.loc[frame.index[:5], 'a'] = np.nan
- frame.loc[frame.index[5:10], 'b'] = np.nan
+ frame = pd.DataFrame(np.random.randn(20, 3), columns=["a", "b", "c"])
+ frame.loc[frame.index[:5], "a"] = np.nan
+ frame.loc[frame.index[5:10], "b"] = np.nan
frame.corr()
@@ -154,8 +152,8 @@ compute the correlation based on histogram intersection:
# histogram intersection
def histogram_intersection(a, b):
- return np.minimum(np.true_divide(a, a.sum()),
- np.true_divide(b, b.sum())).sum()
+ return np.minimum(np.true_divide(a, a.sum()), np.true_divide(b, b.sum())).sum()
+
frame.corr(method=histogram_intersection)
@@ -165,8 +163,8 @@ DataFrame objects.
.. ipython:: python
- index = ['a', 'b', 'c', 'd', 'e']
- columns = ['one', 'two', 'three', 'four']
+ index = ["a", "b", "c", "d", "e"]
+ columns = ["one", "two", "three", "four"]
df1 = pd.DataFrame(np.random.randn(5, 4), index=index, columns=columns)
df2 = pd.DataFrame(np.random.randn(4, 4), index=index[:4], columns=columns)
df1.corrwith(df2)
@@ -182,8 +180,8 @@ assigned the mean of the ranks (by default) for the group:
.. ipython:: python
- s = pd.Series(np.random.randn(5), index=list('abcde'))
- s['d'] = s['b'] # so there's a tie
+ s = pd.Series(np.random.randn(5), index=list("abcde"))
+ s["d"] = s["b"] # so there's a tie
s.rank()
:meth:`~DataFrame.rank` is also a DataFrame method and can rank either the rows
@@ -243,8 +241,7 @@ objects, :class:`~pandas.core.window.Rolling`, :class:`~pandas.core.window.Expan
.. ipython:: python
- s = pd.Series(np.random.randn(1000),
- index=pd.date_range('1/1/2000', periods=1000))
+ s = pd.Series(np.random.randn(1000), index=pd.date_range("1/1/2000", periods=1000))
s = s.cumsum()
s
@@ -279,24 +276,26 @@ We can then call methods on these ``rolling`` objects. These return like-indexed
.. ipython:: python
- s.plot(style='k--')
+ s.plot(style="k--")
@savefig rolling_mean_ex.png
- r.mean().plot(style='k')
+ r.mean().plot(style="k")
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
They can also be applied to DataFrame objects. This is really just syntactic
sugar for applying the moving window operator to all of the DataFrame's columns:
.. ipython:: python
- df = pd.DataFrame(np.random.randn(1000, 4),
- index=pd.date_range('1/1/2000', periods=1000),
- columns=['A', 'B', 'C', 'D'])
+ df = pd.DataFrame(
+ np.random.randn(1000, 4),
+ index=pd.date_range("1/1/2000", periods=1000),
+ columns=["A", "B", "C", "D"],
+ )
df = df.cumsum()
@savefig rolling_mean_frame.png
@@ -368,7 +367,7 @@ compute the mean absolute deviation on a rolling basis:
return np.fabs(x - x.mean()).mean()
@savefig rolling_apply_ex.png
- s.rolling(window=60).apply(mad, raw=True).plot(style='k')
+ s.rolling(window=60).apply(mad, raw=True).plot(style="k")
Using the Numba engine
~~~~~~~~~~~~~~~~~~~~~~
@@ -453,23 +452,22 @@ The list of recognized types are the `scipy.signal window functions
.. ipython:: python
- ser = pd.Series(np.random.randn(10),
- index=pd.date_range('1/1/2000', periods=10))
+ ser = pd.Series(np.random.randn(10), index=pd.date_range("1/1/2000", periods=10))
- ser.rolling(window=5, win_type='triang').mean()
+ ser.rolling(window=5, win_type="triang").mean()
Note that the ``boxcar`` window is equivalent to :meth:`~Rolling.mean`.
.. ipython:: python
- ser.rolling(window=5, win_type='boxcar').mean()
+ ser.rolling(window=5, win_type="boxcar").mean()
ser.rolling(window=5).mean()
For some windowing functions, additional parameters must be specified:
.. ipython:: python
- ser.rolling(window=5, win_type='gaussian').mean(std=0.1)
+ ser.rolling(window=5, win_type="gaussian").mean(std=0.1)
.. _stats.moments.normalization:
@@ -498,10 +496,10 @@ This can be particularly useful for a non-regular time frequency index.
.. ipython:: python
- dft = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]},
- index=pd.date_range('20130101 09:00:00',
- periods=5,
- freq='s'))
+ dft = pd.DataFrame(
+ {"B": [0, 1, 2, np.nan, 4]},
+ index=pd.date_range("20130101 09:00:00", periods=5, freq="s"),
+ )
dft
This is a regular frequency index. Using an integer window parameter works to roll along the window frequency.
@@ -515,20 +513,26 @@ Specifying an offset allows a more intuitive specification of the rolling freque
.. ipython:: python
- dft.rolling('2s').sum()
+ dft.rolling("2s").sum()
Using a non-regular, but still monotonic index, rolling with an integer window does not impart any special calculation.
.. ipython:: python
- dft = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]},
- index=pd.Index([pd.Timestamp('20130101 09:00:00'),
- pd.Timestamp('20130101 09:00:02'),
- pd.Timestamp('20130101 09:00:03'),
- pd.Timestamp('20130101 09:00:05'),
- pd.Timestamp('20130101 09:00:06')],
- name='foo'))
+ dft = pd.DataFrame(
+ {"B": [0, 1, 2, np.nan, 4]},
+ index=pd.Index(
+ [
+ pd.Timestamp("20130101 09:00:00"),
+ pd.Timestamp("20130101 09:00:02"),
+ pd.Timestamp("20130101 09:00:03"),
+ pd.Timestamp("20130101 09:00:05"),
+ pd.Timestamp("20130101 09:00:06"),
+ ],
+ name="foo",
+ ),
+ )
dft
dft.rolling(2).sum()
@@ -537,7 +541,7 @@ Using the time-specification generates variable windows for this sparse data.
.. ipython:: python
- dft.rolling('2s').sum()
+ dft.rolling("2s").sum()
Furthermore, we now allow an optional ``on`` parameter to specify a column (rather than the
default of the index) in a DataFrame.
@@ -546,7 +550,7 @@ default of the index) in a DataFrame.
dft = dft.reset_index()
dft
- dft.rolling('2s', on='foo').sum()
+ dft.rolling("2s", on="foo").sum()
.. _stats.custom_rolling_window:
@@ -569,7 +573,7 @@ For example, if we have the following ``DataFrame``:
use_expanding = [True, False, True, False, True]
use_expanding
- df = pd.DataFrame({'values': range(5)})
+ df = pd.DataFrame({"values": range(5)})
df
and we want to use an expanding window where ``use_expanding`` is ``True`` otherwise a window of size
@@ -615,7 +619,8 @@ rolling operations over a non-fixed offset like a ``BusinessDay``.
.. ipython:: python
from pandas.api.indexers import VariableOffsetWindowIndexer
- df = pd.DataFrame(range(10), index=pd.date_range('2020', periods=10))
+
+ df = pd.DataFrame(range(10), index=pd.date_range("2020", periods=10))
offset = pd.offsets.BDay(1)
indexer = VariableOffsetWindowIndexer(index=df.index, offset=offset)
df
@@ -657,17 +662,21 @@ from present information back to past information. This allows the rolling windo
.. ipython:: python
- df = pd.DataFrame({'x': 1},
- index=[pd.Timestamp('20130101 09:00:01'),
- pd.Timestamp('20130101 09:00:02'),
- pd.Timestamp('20130101 09:00:03'),
- pd.Timestamp('20130101 09:00:04'),
- pd.Timestamp('20130101 09:00:06')])
-
- df["right"] = df.rolling('2s', closed='right').x.sum() # default
- df["both"] = df.rolling('2s', closed='both').x.sum()
- df["left"] = df.rolling('2s', closed='left').x.sum()
- df["neither"] = df.rolling('2s', closed='neither').x.sum()
+ df = pd.DataFrame(
+ {"x": 1},
+ index=[
+ pd.Timestamp("20130101 09:00:01"),
+ pd.Timestamp("20130101 09:00:02"),
+ pd.Timestamp("20130101 09:00:03"),
+ pd.Timestamp("20130101 09:00:04"),
+ pd.Timestamp("20130101 09:00:06"),
+ ],
+ )
+
+ df["right"] = df.rolling("2s", closed="right").x.sum() # default
+ df["both"] = df.rolling("2s", closed="both").x.sum()
+ df["left"] = df.rolling("2s", closed="left").x.sum()
+ df["neither"] = df.rolling("2s", closed="neither").x.sum()
df
@@ -745,13 +754,15 @@ For example:
.. ipython:: python
- df = pd.DataFrame(np.random.randn(1000, 4),
- index=pd.date_range('1/1/2000', periods=1000),
- columns=['A', 'B', 'C', 'D'])
+ df = pd.DataFrame(
+ np.random.randn(1000, 4),
+ index=pd.date_range("1/1/2000", periods=1000),
+ columns=["A", "B", "C", "D"],
+ )
df = df.cumsum()
df2 = df[:20]
- df2.rolling(window=5).corr(df2['B'])
+ df2.rolling(window=5).corr(df2["B"])
.. _stats.moments.corr_pairwise:
@@ -776,14 +787,13 @@ can even be omitted:
.. ipython:: python
- covs = (df[['B', 'C', 'D']].rolling(window=50)
- .cov(df[['A', 'B', 'C']], pairwise=True))
- covs.loc['2002-09-22':]
+ covs = df[["B", "C", "D"]].rolling(window=50).cov(df[["A", "B", "C"]], pairwise=True)
+ covs.loc["2002-09-22":]
.. ipython:: python
correls = df.rolling(window=50).corr()
- correls.loc['2002-09-22':]
+ correls.loc["2002-09-22":]
You can efficiently retrieve the time series of correlations between two
columns by reshaping and indexing:
@@ -791,12 +801,12 @@ columns by reshaping and indexing:
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
.. ipython:: python
@savefig rolling_corr_pairwise_ex.png
- correls.unstack(1)[('A', 'C')].plot()
+ correls.unstack(1)[("A", "C")].plot()
.. _stats.aggregate:
@@ -810,9 +820,11 @@ perform multiple computations on the data. These operations are similar to the :
.. ipython:: python
- dfa = pd.DataFrame(np.random.randn(1000, 3),
- index=pd.date_range('1/1/2000', periods=1000),
- columns=['A', 'B', 'C'])
+ dfa = pd.DataFrame(
+ np.random.randn(1000, 3),
+ index=pd.date_range("1/1/2000", periods=1000),
+ columns=["A", "B", "C"],
+ )
r = dfa.rolling(window=60, min_periods=1)
r
@@ -823,9 +835,9 @@ Series (or multiple Series) via standard ``__getitem__``.
r.aggregate(np.sum)
- r['A'].aggregate(np.sum)
+ r["A"].aggregate(np.sum)
- r[['A', 'B']].aggregate(np.sum)
+ r[["A", "B"]].aggregate(np.sum)
As you can see, the result of the aggregation will have the selected columns, or all
columns if none are selected.
@@ -840,7 +852,7 @@ aggregation with, outputting a DataFrame:
.. ipython:: python
- r['A'].agg([np.sum, np.mean, np.std])
+ r["A"].agg([np.sum, np.mean, np.std])
On a windowed DataFrame, you can pass a list of functions to apply to each
column, which produces an aggregated result with a hierarchical index:
@@ -860,20 +872,20 @@ columns of a ``DataFrame``:
.. ipython:: python
- r.agg({'A': np.sum, 'B': lambda x: np.std(x, ddof=1)})
+ r.agg({"A": np.sum, "B": lambda x: np.std(x, ddof=1)})
The function names can also be strings. In order for a string to be valid it
must be implemented on the windowed object
.. ipython:: python
- r.agg({'A': 'sum', 'B': 'std'})
+ r.agg({"A": "sum", "B": "std"})
Furthermore you can pass a nested dict to indicate different aggregations on different columns.
.. ipython:: python
- r.agg({'A': ['sum', 'std'], 'B': ['mean', 'std']})
+ r.agg({"A": ["sum", "std"], "B": ["mean", "std"]})
.. _stats.moments.expanding:
@@ -967,7 +979,7 @@ all accept are:
sn.expanding().sum()
sn.cumsum()
- sn.cumsum().fillna(method='ffill')
+ sn.cumsum().fillna(method="ffill")
An expanding window statistic will be more stable (and less responsive) than
@@ -978,14 +990,14 @@ relative impact of an individual data point. As an example, here is the
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
.. ipython:: python
- s.plot(style='k--')
+ s.plot(style="k--")
@savefig expanding_mean_frame.png
- s.expanding().mean().plot(style='k')
+ s.expanding().mean().plot(style="k")
.. _stats.moments.exponentially_weighted:
@@ -1115,10 +1127,10 @@ of ``times``.
.. ipython:: python
- df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})
+ df = pd.DataFrame({"B": [0, 1, 2, np.nan, 4]})
df
- times = ['2020-01-01', '2020-01-03', '2020-01-10', '2020-01-15', '2020-01-17']
- df.ewm(halflife='4 days', times=pd.DatetimeIndex(times)).mean()
+ times = ["2020-01-01", "2020-01-03", "2020-01-10", "2020-01-15", "2020-01-17"]
+ df.ewm(halflife="4 days", times=pd.DatetimeIndex(times)).mean()
The following formula is used to compute exponentially weighted mean with an input vector of times:
@@ -1130,10 +1142,10 @@ Here is an example for a univariate time series:
.. ipython:: python
- s.plot(style='k--')
+ s.plot(style="k--")
@savefig ewma_ex.png
- s.ewm(span=20).mean().plot(style='k')
+ s.ewm(span=20).mean().plot(style="k")
ExponentialMovingWindow has a ``min_periods`` argument, which has the same
meaning it does for all the ``.expanding`` and ``.rolling`` methods:
diff --git a/doc/source/user_guide/dsintro.rst b/doc/source/user_guide/dsintro.rst
index c27c73d439a0c..d698b316d321e 100644
--- a/doc/source/user_guide/dsintro.rst
+++ b/doc/source/user_guide/dsintro.rst
@@ -51,7 +51,7 @@ index is passed, one will be created having values ``[0, ..., len(data) - 1]``.
.. ipython:: python
- s = pd.Series(np.random.randn(5), index=['a', 'b', 'c', 'd', 'e'])
+ s = pd.Series(np.random.randn(5), index=["a", "b", "c", "d", "e"])
s
s.index
@@ -71,7 +71,7 @@ Series can be instantiated from dicts:
.. ipython:: python
- d = {'b': 1, 'a': 0, 'c': 2}
+ d = {"b": 1, "a": 0, "c": 2}
pd.Series(d)
.. note::
@@ -92,9 +92,9 @@ index will be pulled out.
.. ipython:: python
- d = {'a': 0., 'b': 1., 'c': 2.}
+ d = {"a": 0.0, "b": 1.0, "c": 2.0}
pd.Series(d)
- pd.Series(d, index=['b', 'c', 'd', 'a'])
+ pd.Series(d, index=["b", "c", "d", "a"])
.. note::
@@ -107,7 +107,7 @@ provided. The value will be repeated to match the length of **index**.
.. ipython:: python
- pd.Series(5., index=['a', 'b', 'c', 'd', 'e'])
+ pd.Series(5.0, index=["a", "b", "c", "d", "e"])
Series is ndarray-like
~~~~~~~~~~~~~~~~~~~~~~
@@ -173,26 +173,26 @@ label:
.. ipython:: python
- s['a']
- s['e'] = 12.
+ s["a"]
+ s["e"] = 12.0
s
- 'e' in s
- 'f' in s
+ "e" in s
+ "f" in s
If a label is not contained, an exception is raised:
.. code-block:: python
- >>> s['f']
+ >>> s["f"]
KeyError: 'f'
Using the ``get`` method, a missing label will return None or specified default:
.. ipython:: python
- s.get('f')
+ s.get("f")
- s.get('f', np.nan)
+ s.get("f", np.nan)
See also the :ref:`section on attribute access<indexing.attribute_access>`.
@@ -244,7 +244,7 @@ Series can also have a ``name`` attribute:
.. ipython:: python
- s = pd.Series(np.random.randn(5), name='something')
+ s = pd.Series(np.random.randn(5), name="something")
s
s.name
@@ -306,13 +306,15 @@ keys.
.. ipython:: python
- d = {'one': pd.Series([1., 2., 3.], index=['a', 'b', 'c']),
- 'two': pd.Series([1., 2., 3., 4.], index=['a', 'b', 'c', 'd'])}
+ d = {
+ "one": pd.Series([1.0, 2.0, 3.0], index=["a", "b", "c"]),
+ "two": pd.Series([1.0, 2.0, 3.0, 4.0], index=["a", "b", "c", "d"]),
+ }
df = pd.DataFrame(d)
df
- pd.DataFrame(d, index=['d', 'b', 'a'])
- pd.DataFrame(d, index=['d', 'b', 'a'], columns=['two', 'three'])
+ pd.DataFrame(d, index=["d", "b", "a"])
+ pd.DataFrame(d, index=["d", "b", "a"], columns=["two", "three"])
The row and column labels can be accessed respectively by accessing the
**index** and **columns** attributes:
@@ -336,10 +338,9 @@ result will be ``range(n)``, where ``n`` is the array length.
.. ipython:: python
- d = {'one': [1., 2., 3., 4.],
- 'two': [4., 3., 2., 1.]}
+ d = {"one": [1.0, 2.0, 3.0, 4.0], "two": [4.0, 3.0, 2.0, 1.0]}
pd.DataFrame(d)
- pd.DataFrame(d, index=['a', 'b', 'c', 'd'])
+ pd.DataFrame(d, index=["a", "b", "c", "d"])
From structured or record array
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -348,12 +349,12 @@ This case is handled identically to a dict of arrays.
.. ipython:: python
- data = np.zeros((2, ), dtype=[('A', 'i4'), ('B', 'f4'), ('C', 'a10')])
- data[:] = [(1, 2., 'Hello'), (2, 3., "World")]
+ data = np.zeros((2,), dtype=[("A", "i4"), ("B", "f4"), ("C", "a10")])
+ data[:] = [(1, 2.0, "Hello"), (2, 3.0, "World")]
pd.DataFrame(data)
- pd.DataFrame(data, index=['first', 'second'])
- pd.DataFrame(data, columns=['C', 'A', 'B'])
+ pd.DataFrame(data, index=["first", "second"])
+ pd.DataFrame(data, columns=["C", "A", "B"])
.. note::
@@ -367,10 +368,10 @@ From a list of dicts
.. ipython:: python
- data2 = [{'a': 1, 'b': 2}, {'a': 5, 'b': 10, 'c': 20}]
+ data2 = [{"a": 1, "b": 2}, {"a": 5, "b": 10, "c": 20}]
pd.DataFrame(data2)
- pd.DataFrame(data2, index=['first', 'second'])
- pd.DataFrame(data2, columns=['a', 'b'])
+ pd.DataFrame(data2, index=["first", "second"])
+ pd.DataFrame(data2, columns=["a", "b"])
.. _basics.dataframe.from_dict_of_tuples:
@@ -382,11 +383,15 @@ dictionary.
.. ipython:: python
- pd.DataFrame({('a', 'b'): {('A', 'B'): 1, ('A', 'C'): 2},
- ('a', 'a'): {('A', 'C'): 3, ('A', 'B'): 4},
- ('a', 'c'): {('A', 'B'): 5, ('A', 'C'): 6},
- ('b', 'a'): {('A', 'C'): 7, ('A', 'B'): 8},
- ('b', 'b'): {('A', 'D'): 9, ('A', 'B'): 10}})
+ pd.DataFrame(
+ {
+ ("a", "b"): {("A", "B"): 1, ("A", "C"): 2},
+ ("a", "a"): {("A", "C"): 3, ("A", "B"): 4},
+ ("a", "c"): {("A", "B"): 5, ("A", "C"): 6},
+ ("b", "a"): {("A", "C"): 7, ("A", "B"): 8},
+ ("b", "b"): {("A", "D"): 9, ("A", "B"): 10},
+ }
+ )
.. _basics.dataframe.from_series:
@@ -414,11 +419,11 @@ first ``namedtuple``, a ``ValueError`` is raised.
from collections import namedtuple
- Point = namedtuple('Point', 'x y')
+ Point = namedtuple("Point", "x y")
pd.DataFrame([Point(0, 0), Point(0, 3), (2, 3)])
- Point3D = namedtuple('Point3D', 'x y z')
+ Point3D = namedtuple("Point3D", "x y z")
pd.DataFrame([Point3D(0, 0, 0), Point3D(0, 3, 5), Point(2, 3)])
@@ -468,15 +473,18 @@ set to ``'index'`` in order to use the dict keys as row labels.
.. ipython:: python
- pd.DataFrame.from_dict(dict([('A', [1, 2, 3]), ('B', [4, 5, 6])]))
+ pd.DataFrame.from_dict(dict([("A", [1, 2, 3]), ("B", [4, 5, 6])]))
If you pass ``orient='index'``, the keys will be the row labels. In this
case, you can also pass the desired column names:
.. ipython:: python
- pd.DataFrame.from_dict(dict([('A', [1, 2, 3]), ('B', [4, 5, 6])]),
- orient='index', columns=['one', 'two', 'three'])
+ pd.DataFrame.from_dict(
+ dict([("A", [1, 2, 3]), ("B", [4, 5, 6])]),
+ orient="index",
+ columns=["one", "two", "three"],
+ )
.. _basics.dataframe.from_records:
@@ -490,7 +498,7 @@ dtype. For example:
.. ipython:: python
data
- pd.DataFrame.from_records(data, index='C')
+ pd.DataFrame.from_records(data, index="C")
.. _basics.dataframe.sel_add_del:
@@ -503,17 +511,17 @@ the analogous dict operations:
.. ipython:: python
- df['one']
- df['three'] = df['one'] * df['two']
- df['flag'] = df['one'] > 2
+ df["one"]
+ df["three"] = df["one"] * df["two"]
+ df["flag"] = df["one"] > 2
df
Columns can be deleted or popped like with a dict:
.. ipython:: python
- del df['two']
- three = df.pop('three')
+ del df["two"]
+ three = df.pop("three")
df
When inserting a scalar value, it will naturally be propagated to fill the
@@ -521,7 +529,7 @@ column:
.. ipython:: python
- df['foo'] = 'bar'
+ df["foo"] = "bar"
df
When inserting a Series that does not have the same index as the DataFrame, it
@@ -529,7 +537,7 @@ will be conformed to the DataFrame's index:
.. ipython:: python
- df['one_trunc'] = df['one'][:2]
+ df["one_trunc"] = df["one"][:2]
df
You can insert raw ndarrays but their length must match the length of the
@@ -540,7 +548,7 @@ available to insert at a particular location in the columns:
.. ipython:: python
- df.insert(1, 'bar', df['one'])
+ df.insert(1, "bar", df["one"])
df
.. _dsintro.chained_assignment:
@@ -556,17 +564,16 @@ derived from existing columns.
.. ipython:: python
- iris = pd.read_csv('data/iris.data')
+ iris = pd.read_csv("data/iris.data")
iris.head()
- (iris.assign(sepal_ratio=iris['SepalWidth'] / iris['SepalLength'])
- .head())
+ iris.assign(sepal_ratio=iris["SepalWidth"] / iris["SepalLength"]).head()
In the example above, we inserted a precomputed value. We can also pass in
a function of one argument to be evaluated on the DataFrame being assigned to.
.. ipython:: python
- iris.assign(sepal_ratio=lambda x: (x['SepalWidth'] / x['SepalLength'])).head()
+ iris.assign(sepal_ratio=lambda x: (x["SepalWidth"] / x["SepalLength"])).head()
``assign`` **always** returns a copy of the data, leaving the original
DataFrame untouched.
@@ -580,10 +587,14 @@ greater than 5, calculate the ratio, and plot:
.. ipython:: python
@savefig basics_assign.png
- (iris.query('SepalLength > 5')
- .assign(SepalRatio=lambda x: x.SepalWidth / x.SepalLength,
- PetalRatio=lambda x: x.PetalWidth / x.PetalLength)
- .plot(kind='scatter', x='SepalRatio', y='PetalRatio'))
+ (
+ iris.query("SepalLength > 5")
+ .assign(
+ SepalRatio=lambda x: x.SepalWidth / x.SepalLength,
+ PetalRatio=lambda x: x.PetalWidth / x.PetalLength,
+ )
+ .plot(kind="scatter", x="SepalRatio", y="PetalRatio")
+ )
Since a function is passed in, the function is computed on the DataFrame
being assigned to. Importantly, this is the DataFrame that's been filtered
@@ -603,10 +614,8 @@ to a column created earlier in the same :meth:`~DataFrame.assign`.
.. ipython:: python
- dfa = pd.DataFrame({"A": [1, 2, 3],
- "B": [4, 5, 6]})
- dfa.assign(C=lambda x: x['A'] + x['B'],
- D=lambda x: x['A'] + x['C'])
+ dfa = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
+ dfa.assign(C=lambda x: x["A"] + x["B"], D=lambda x: x["A"] + x["C"])
In the second expression, ``x['C']`` will refer to the newly created column,
that's equal to ``dfa['A'] + dfa['B']``.
@@ -631,7 +640,7 @@ DataFrame:
.. ipython:: python
- df.loc['b']
+ df.loc["b"]
df.iloc[2]
For a more exhaustive treatment of sophisticated label-based indexing and
@@ -650,8 +659,8 @@ union of the column and row labels.
.. ipython:: python
- df = pd.DataFrame(np.random.randn(10, 4), columns=['A', 'B', 'C', 'D'])
- df2 = pd.DataFrame(np.random.randn(7, 3), columns=['A', 'B', 'C'])
+ df = pd.DataFrame(np.random.randn(10, 4), columns=["A", "B", "C", "D"])
+ df2 = pd.DataFrame(np.random.randn(7, 3), columns=["A", "B", "C"])
df + df2
When doing an operation between DataFrame and Series, the default behavior is
@@ -680,8 +689,8 @@ Boolean operators work as well:
.. ipython:: python
- df1 = pd.DataFrame({'a': [1, 0, 1], 'b': [0, 1, 1]}, dtype=bool)
- df2 = pd.DataFrame({'a': [0, 1, 1], 'b': [1, 1, 0]}, dtype=bool)
+ df1 = pd.DataFrame({"a": [1, 0, 1], "b": [0, 1, 1]}, dtype=bool)
+ df2 = pd.DataFrame({"a": [0, 1, 1], "b": [1, 1, 0]}, dtype=bool)
df1 & df2
df1 | df2
df1 ^ df2
@@ -737,8 +746,8 @@ on two :class:`Series` with differently ordered labels will align before the ope
.. ipython:: python
- ser1 = pd.Series([1, 2, 3], index=['a', 'b', 'c'])
- ser2 = pd.Series([1, 3, 5], index=['b', 'a', 'c'])
+ ser1 = pd.Series([1, 2, 3], index=["a", "b", "c"])
+ ser2 = pd.Series([1, 3, 5], index=["b", "a", "c"])
ser1
ser2
np.remainder(ser1, ser2)
@@ -748,7 +757,7 @@ with missing values.
.. ipython:: python
- ser3 = pd.Series([2, 4, 6], index=['b', 'c', 'd'])
+ ser3 = pd.Series([2, 4, 6], index=["b", "c", "d"])
ser3
np.remainder(ser1, ser3)
@@ -778,11 +787,11 @@ R package):
:suppress:
# force a summary to be printed
- pd.set_option('display.max_rows', 5)
+ pd.set_option("display.max_rows", 5)
.. ipython:: python
- baseball = pd.read_csv('data/baseball.csv')
+ baseball = pd.read_csv("data/baseball.csv")
print(baseball)
baseball.info()
@@ -791,7 +800,7 @@ R package):
:okwarning:
# restore GlobalPrintConfig
- pd.reset_option(r'^display\.')
+ pd.reset_option(r"^display\.")
However, using ``to_string`` will return a string representation of the
DataFrame in tabular form, though it won't always fit the console width:
@@ -812,7 +821,7 @@ option:
.. ipython:: python
- pd.set_option('display.width', 40) # default is 80
+ pd.set_option("display.width", 40) # default is 80
pd.DataFrame(np.random.randn(3, 12))
@@ -820,21 +829,25 @@ You can adjust the max width of the individual columns by setting ``display.max_
.. ipython:: python
- datafile = {'filename': ['filename_01', 'filename_02'],
- 'path': ["media/user_name/storage/folder_01/filename_01",
- "media/user_name/storage/folder_02/filename_02"]}
+ datafile = {
+ "filename": ["filename_01", "filename_02"],
+ "path": [
+ "media/user_name/storage/folder_01/filename_01",
+ "media/user_name/storage/folder_02/filename_02",
+ ],
+ }
- pd.set_option('display.max_colwidth', 30)
+ pd.set_option("display.max_colwidth", 30)
pd.DataFrame(datafile)
- pd.set_option('display.max_colwidth', 100)
+ pd.set_option("display.max_colwidth", 100)
pd.DataFrame(datafile)
.. ipython:: python
:suppress:
- pd.reset_option('display.width')
- pd.reset_option('display.max_colwidth')
+ pd.reset_option("display.width")
+ pd.reset_option("display.max_colwidth")
You can also disable this feature via the ``expand_frame_repr`` option.
This will print the table in one block.
@@ -847,8 +860,7 @@ accessed like an attribute:
.. ipython:: python
- df = pd.DataFrame({'foo1': np.random.randn(5),
- 'foo2': np.random.randn(5)})
+ df = pd.DataFrame({"foo1": np.random.randn(5), "foo2": np.random.randn(5)})
df
df.foo1
diff --git a/doc/source/user_guide/duplicates.rst b/doc/source/user_guide/duplicates.rst
index b65822fab2b23..2993ca7799510 100644
--- a/doc/source/user_guide/duplicates.rst
+++ b/doc/source/user_guide/duplicates.rst
@@ -29,8 +29,8 @@ duplicates present. The output can't be determined, and so pandas raises.
.. ipython:: python
:okexcept:
- s1 = pd.Series([0, 1, 2], index=['a', 'b', 'b'])
- s1.reindex(['a', 'b', 'c'])
+ s1 = pd.Series([0, 1, 2], index=["a", "b", "b"])
+ s1.reindex(["a", "b", "c"])
Other methods, like indexing, can give very surprising results. Typically
indexing with a scalar will *reduce dimensionality*. Slicing a ``DataFrame``
@@ -39,30 +39,30 @@ return a scalar. But with duplicates, this isn't the case.
.. ipython:: python
- df1 = pd.DataFrame([[0, 1, 2], [3, 4, 5]], columns=['A', 'A', 'B'])
+ df1 = pd.DataFrame([[0, 1, 2], [3, 4, 5]], columns=["A", "A", "B"])
df1
We have duplicates in the columns. If we slice ``'B'``, we get back a ``Series``
.. ipython:: python
- df1['B'] # a series
+ df1["B"] # a series
But slicing ``'A'`` returns a ``DataFrame``
.. ipython:: python
- df1['A'] # a DataFrame
+ df1["A"] # a DataFrame
This applies to row labels as well
.. ipython:: python
- df2 = pd.DataFrame({"A": [0, 1, 2]}, index=['a', 'a', 'b'])
+ df2 = pd.DataFrame({"A": [0, 1, 2]}, index=["a", "a", "b"])
df2
- df2.loc['b', 'A'] # a scalar
- df2.loc['a', 'A'] # a Series
+ df2.loc["b", "A"] # a scalar
+ df2.loc["a", "A"] # a Series
Duplicate Label Detection
~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -121,29 +121,24 @@ will be raised.
.. ipython:: python
:okexcept:
- pd.Series(
- [0, 1, 2],
- index=['a', 'b', 'b']
- ).set_flags(allows_duplicate_labels=False)
+ pd.Series([0, 1, 2], index=["a", "b", "b"]).set_flags(allows_duplicate_labels=False)
This applies to both row and column labels for a :class:`DataFrame`
.. ipython:: python
:okexcept:
- pd.DataFrame(
- [[0, 1, 2], [3, 4, 5]], columns=["A", "B", "C"],
- ).set_flags(allows_duplicate_labels=False)
+ pd.DataFrame([[0, 1, 2], [3, 4, 5]], columns=["A", "B", "C"],).set_flags(
+ allows_duplicate_labels=False
+ )
This attribute can be checked or set with :attr:`~DataFrame.flags.allows_duplicate_labels`,
which indicates whether that object can have duplicate labels.
.. ipython:: python
- df = (
- pd.DataFrame({"A": [0, 1, 2, 3]},
- index=['x', 'y', 'X', 'Y'])
- .set_flags(allows_duplicate_labels=False)
+ df = pd.DataFrame({"A": [0, 1, 2, 3]}, index=["x", "y", "X", "Y"]).set_flags(
+ allows_duplicate_labels=False
)
df
df.flags.allows_duplicate_labels
@@ -198,7 +193,7 @@ operations.
.. ipython:: python
:okexcept:
- s1 = pd.Series(0, index=['a', 'b']).set_flags(allows_duplicate_labels=False)
+ s1 = pd.Series(0, index=["a", "b"]).set_flags(allows_duplicate_labels=False)
s1
s1.head().rename({"a": "b"})
diff --git a/doc/source/user_guide/gotchas.rst b/doc/source/user_guide/gotchas.rst
index a96c70405d859..07c856c96426d 100644
--- a/doc/source/user_guide/gotchas.rst
+++ b/doc/source/user_guide/gotchas.rst
@@ -21,12 +21,19 @@ when calling :meth:`~DataFrame.info`:
.. ipython:: python
- dtypes = ['int64', 'float64', 'datetime64[ns]', 'timedelta64[ns]',
- 'complex128', 'object', 'bool']
+ dtypes = [
+ "int64",
+ "float64",
+ "datetime64[ns]",
+ "timedelta64[ns]",
+ "complex128",
+ "object",
+ "bool",
+ ]
n = 5000
data = {t: np.random.randint(100, size=n).astype(t) for t in dtypes}
df = pd.DataFrame(data)
- df['categorical'] = df['object'].astype('category')
+ df["categorical"] = df["object"].astype("category")
df.info()
@@ -40,7 +47,7 @@ as it can be expensive to do this deeper introspection.
.. ipython:: python
- df.info(memory_usage='deep')
+ df.info(memory_usage="deep")
By default the display option is set to ``True`` but can be explicitly
overridden by passing the ``memory_usage`` argument when invoking ``df.info()``.
@@ -155,7 +162,7 @@ index, not membership among the values.
.. ipython:: python
- s = pd.Series(range(5), index=list('abcde'))
+ s = pd.Series(range(5), index=list("abcde"))
2 in s
'b' in s
@@ -206,11 +213,11 @@ arrays. For example:
.. ipython:: python
- s = pd.Series([1, 2, 3, 4, 5], index=list('abcde'))
+ s = pd.Series([1, 2, 3, 4, 5], index=list("abcde"))
s
s.dtype
- s2 = s.reindex(['a', 'b', 'c', 'f', 'u'])
+ s2 = s.reindex(["a", "b", "c", "f", "u"])
s2
s2.dtype
@@ -227,12 +234,11 @@ the nullable-integer extension dtypes provided by pandas
.. ipython:: python
- s_int = pd.Series([1, 2, 3, 4, 5], index=list('abcde'),
- dtype=pd.Int64Dtype())
+ s_int = pd.Series([1, 2, 3, 4, 5], index=list("abcde"), dtype=pd.Int64Dtype())
s_int
s_int.dtype
- s2_int = s_int.reindex(['a', 'b', 'c', 'f', 'u'])
+ s2_int = s_int.reindex(["a", "b", "c", "f", "u"])
s2_int
s2_int.dtype
@@ -334,7 +340,7 @@ constructors using something similar to the following:
.. ipython:: python
- x = np.array(list(range(10)), '>i4') # big endian
+ x = np.array(list(range(10)), ">i4") # big endian
newx = x.byteswap().newbyteorder() # force native byteorder
s = pd.Series(newx)
diff --git a/doc/source/user_guide/groupby.rst b/doc/source/user_guide/groupby.rst
index 52342de98de79..9696f14f03b56 100644
--- a/doc/source/user_guide/groupby.rst
+++ b/doc/source/user_guide/groupby.rst
@@ -68,19 +68,23 @@ object (more on what the GroupBy object is later), you may do the following:
.. ipython:: python
- df = pd.DataFrame([('bird', 'Falconiformes', 389.0),
- ('bird', 'Psittaciformes', 24.0),
- ('mammal', 'Carnivora', 80.2),
- ('mammal', 'Primates', np.nan),
- ('mammal', 'Carnivora', 58)],
- index=['falcon', 'parrot', 'lion', 'monkey', 'leopard'],
- columns=('class', 'order', 'max_speed'))
+ df = pd.DataFrame(
+ [
+ ("bird", "Falconiformes", 389.0),
+ ("bird", "Psittaciformes", 24.0),
+ ("mammal", "Carnivora", 80.2),
+ ("mammal", "Primates", np.nan),
+ ("mammal", "Carnivora", 58),
+ ],
+ index=["falcon", "parrot", "lion", "monkey", "leopard"],
+ columns=("class", "order", "max_speed"),
+ )
df
# default is axis=0
- grouped = df.groupby('class')
- grouped = df.groupby('order', axis='columns')
- grouped = df.groupby(['class', 'order'])
+ grouped = df.groupby("class")
+ grouped = df.groupby("order", axis="columns")
+ grouped = df.groupby(["class", "order"])
The mapping can be specified many different ways:
@@ -103,12 +107,14 @@ consider the following ``DataFrame``:
.. ipython:: python
- df = pd.DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
- 'foo', 'bar', 'foo', 'foo'],
- 'B': ['one', 'one', 'two', 'three',
- 'two', 'two', 'one', 'three'],
- 'C': np.random.randn(8),
- 'D': np.random.randn(8)})
+ df = pd.DataFrame(
+ {
+ "A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
+ "B": ["one", "one", "two", "three", "two", "two", "one", "three"],
+ "C": np.random.randn(8),
+ "D": np.random.randn(8),
+ }
+ )
df
On a DataFrame, we obtain a GroupBy object by calling :meth:`~DataFrame.groupby`.
@@ -116,8 +122,8 @@ We could naturally group by either the ``A`` or ``B`` columns, or both:
.. ipython:: python
- grouped = df.groupby('A')
- grouped = df.groupby(['A', 'B'])
+ grouped = df.groupby("A")
+ grouped = df.groupby(["A", "B"])
.. versionadded:: 0.24
@@ -126,8 +132,8 @@ but the specified columns
.. ipython:: python
- df2 = df.set_index(['A', 'B'])
- grouped = df2.groupby(level=df2.index.names.difference(['B']))
+ df2 = df.set_index(["A", "B"])
+ grouped = df2.groupby(level=df2.index.names.difference(["B"]))
grouped.sum()
These will split the DataFrame on its index (rows). We could also split by the
@@ -181,9 +187,9 @@ By default the group keys are sorted during the ``groupby`` operation. You may h
.. ipython:: python
- df2 = pd.DataFrame({'X': ['B', 'B', 'A', 'A'], 'Y': [1, 2, 3, 4]})
- df2.groupby(['X']).sum()
- df2.groupby(['X'], sort=False).sum()
+ df2 = pd.DataFrame({"X": ["B", "B", "A", "A"], "Y": [1, 2, 3, 4]})
+ df2.groupby(["X"]).sum()
+ df2.groupby(["X"], sort=False).sum()
Note that ``groupby`` will preserve the order in which *observations* are sorted *within* each group.
@@ -191,10 +197,10 @@ For example, the groups created by ``groupby()`` below are in the order they app
.. ipython:: python
- df3 = pd.DataFrame({'X': ['A', 'B', 'A', 'B'], 'Y': [1, 4, 3, 2]})
- df3.groupby(['X']).get_group('A')
+ df3 = pd.DataFrame({"X": ["A", "B", "A", "B"], "Y": [1, 4, 3, 2]})
+ df3.groupby(["X"]).get_group("A")
- df3.groupby(['X']).get_group('B')
+ df3.groupby(["X"]).get_group("B")
.. _groupby.dropna:
@@ -236,7 +242,7 @@ above example we have:
.. ipython:: python
- df.groupby('A').groups
+ df.groupby("A").groups
df.groupby(get_letter_type, axis=1).groups
Calling the standard Python ``len`` function on the GroupBy object just returns
@@ -244,7 +250,7 @@ the length of the ``groups`` dict, so it is largely just a convenience:
.. ipython:: python
- grouped = df.groupby(['A', 'B'])
+ grouped = df.groupby(["A", "B"])
grouped.groups
len(grouped)
@@ -259,15 +265,14 @@ the length of the ``groups`` dict, so it is largely just a convenience:
n = 10
weight = np.random.normal(166, 20, size=n)
height = np.random.normal(60, 10, size=n)
- time = pd.date_range('1/1/2000', periods=n)
- gender = np.random.choice(['male', 'female'], size=n)
- df = pd.DataFrame({'height': height, 'weight': weight,
- 'gender': gender}, index=time)
+ time = pd.date_range("1/1/2000", periods=n)
+ gender = np.random.choice(["male", "female"], size=n)
+ df = pd.DataFrame({"height": height, "weight": weight, "gender": gender}, index=time)
.. ipython:: python
df
- gb = df.groupby('gender')
+ gb = df.groupby("gender")
.. ipython::
@@ -291,9 +296,11 @@ Let's create a Series with a two-level ``MultiIndex``.
.. ipython:: python
- arrays = [['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'],
- ['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
- index = pd.MultiIndex.from_arrays(arrays, names=['first', 'second'])
+ arrays = [
+ ["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
+ ["one", "two", "one", "two", "one", "two", "one", "two"],
+ ]
+ index = pd.MultiIndex.from_arrays(arrays, names=["first", "second"])
s = pd.Series(np.random.randn(8), index=index)
s
@@ -309,7 +316,7 @@ number:
.. ipython:: python
- s.groupby(level='second').sum()
+ s.groupby(level="second").sum()
The aggregation functions such as ``sum`` will take the level parameter
directly. Additionally, the resulting index will be named according to the
@@ -317,30 +324,32 @@ chosen level:
.. ipython:: python
- s.sum(level='second')
+ s.sum(level="second")
Grouping with multiple levels is supported.
.. ipython:: python
:suppress:
- arrays = [['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'],
- ['doo', 'doo', 'bee', 'bee', 'bop', 'bop', 'bop', 'bop'],
- ['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
+ arrays = [
+ ["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
+ ["doo", "doo", "bee", "bee", "bop", "bop", "bop", "bop"],
+ ["one", "two", "one", "two", "one", "two", "one", "two"],
+ ]
tuples = list(zip(*arrays))
- index = pd.MultiIndex.from_tuples(tuples, names=['first', 'second', 'third'])
+ index = pd.MultiIndex.from_tuples(tuples, names=["first", "second", "third"])
s = pd.Series(np.random.randn(8), index=index)
.. ipython:: python
s
- s.groupby(level=['first', 'second']).sum()
+ s.groupby(level=["first", "second"]).sum()
Index level names may be supplied as keys.
.. ipython:: python
- s.groupby(['first', 'second']).sum()
+ s.groupby(["first", "second"]).sum()
More on the ``sum`` function and aggregation later.
@@ -352,14 +361,14 @@ objects.
.. ipython:: python
- arrays = [['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'],
- ['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
+ arrays = [
+ ["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
+ ["one", "two", "one", "two", "one", "two", "one", "two"],
+ ]
- index = pd.MultiIndex.from_arrays(arrays, names=['first', 'second'])
+ index = pd.MultiIndex.from_arrays(arrays, names=["first", "second"])
- df = pd.DataFrame({'A': [1, 1, 1, 1, 2, 2, 3, 3],
- 'B': np.arange(8)},
- index=index)
+ df = pd.DataFrame({"A": [1, 1, 1, 1, 2, 2, 3, 3], "B": np.arange(8)}, index=index)
df
@@ -368,19 +377,19 @@ the ``A`` column.
.. ipython:: python
- df.groupby([pd.Grouper(level=1), 'A']).sum()
+ df.groupby([pd.Grouper(level=1), "A"]).sum()
Index levels may also be specified by name.
.. ipython:: python
- df.groupby([pd.Grouper(level='second'), 'A']).sum()
+ df.groupby([pd.Grouper(level="second"), "A"]).sum()
Index level names may be specified as keys directly to ``groupby``.
.. ipython:: python
- df.groupby(['second', 'A']).sum()
+ df.groupby(["second", "A"]).sum()
DataFrame column selection in GroupBy
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -392,24 +401,26 @@ getting a column from a DataFrame, you can do:
.. ipython:: python
:suppress:
- df = pd.DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
- 'foo', 'bar', 'foo', 'foo'],
- 'B': ['one', 'one', 'two', 'three',
- 'two', 'two', 'one', 'three'],
- 'C': np.random.randn(8),
- 'D': np.random.randn(8)})
+ df = pd.DataFrame(
+ {
+ "A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
+ "B": ["one", "one", "two", "three", "two", "two", "one", "three"],
+ "C": np.random.randn(8),
+ "D": np.random.randn(8),
+ }
+ )
.. ipython:: python
- grouped = df.groupby(['A'])
- grouped_C = grouped['C']
- grouped_D = grouped['D']
+ grouped = df.groupby(["A"])
+ grouped_C = grouped["C"]
+ grouped_D = grouped["D"]
This is mainly syntactic sugar for the alternative and much more verbose:
.. ipython:: python
- df['C'].groupby(df['A'])
+ df["C"].groupby(df["A"])
Additionally this method avoids recomputing the internal grouping information
derived from the passed key.
@@ -450,13 +461,13 @@ A single group can be selected using
.. ipython:: python
- grouped.get_group('bar')
+ grouped.get_group("bar")
Or for an object grouped on multiple columns:
.. ipython:: python
- df.groupby(['A', 'B']).get_group(('bar', 'one'))
+ df.groupby(["A", "B"]).get_group(("bar", "one"))
.. _groupby.aggregate:
@@ -474,10 +485,10 @@ An obvious one is aggregation via the
.. ipython:: python
- grouped = df.groupby('A')
+ grouped = df.groupby("A")
grouped.aggregate(np.sum)
- grouped = df.groupby(['A', 'B'])
+ grouped = df.groupby(["A", "B"])
grouped.aggregate(np.sum)
As you can see, the result of the aggregation will have the group names as the
@@ -487,17 +498,17 @@ changed by using the ``as_index`` option:
.. ipython:: python
- grouped = df.groupby(['A', 'B'], as_index=False)
+ grouped = df.groupby(["A", "B"], as_index=False)
grouped.aggregate(np.sum)
- df.groupby('A', as_index=False).sum()
+ df.groupby("A", as_index=False).sum()
Note that you could use the ``reset_index`` DataFrame function to achieve the
same result as the column names are stored in the resulting ``MultiIndex``:
.. ipython:: python
- df.groupby(['A', 'B']).sum().reset_index()
+ df.groupby(["A", "B"]).sum().reset_index()
Another simple aggregation example is to compute the size of each group.
This is included in GroupBy as the ``size`` method. It returns a Series whose
@@ -559,8 +570,8 @@ aggregation with, outputting a DataFrame:
.. ipython:: python
- grouped = df.groupby('A')
- grouped['C'].agg([np.sum, np.mean, np.std])
+ grouped = df.groupby("A")
+ grouped["C"].agg([np.sum, np.mean, np.std])
On a grouped ``DataFrame``, you can pass a list of functions to apply to each
column, which produces an aggregated result with a hierarchical index:
@@ -575,19 +586,21 @@ need to rename, then you can add in a chained operation for a ``Series`` like th
.. ipython:: python
- (grouped['C'].agg([np.sum, np.mean, np.std])
- .rename(columns={'sum': 'foo',
- 'mean': 'bar',
- 'std': 'baz'}))
+ (
+ grouped["C"]
+ .agg([np.sum, np.mean, np.std])
+ .rename(columns={"sum": "foo", "mean": "bar", "std": "baz"})
+ )
For a grouped ``DataFrame``, you can rename in a similar manner:
.. ipython:: python
- (grouped.agg([np.sum, np.mean, np.std])
- .rename(columns={'sum': 'foo',
- 'mean': 'bar',
- 'std': 'baz'}))
+ (
+ grouped.agg([np.sum, np.mean, np.std]).rename(
+ columns={"sum": "foo", "mean": "bar", "std": "baz"}
+ )
+ )
.. note::
@@ -598,7 +611,7 @@ For a grouped ``DataFrame``, you can rename in a similar manner:
.. ipython:: python
:okexcept:
- grouped['C'].agg(['sum', 'sum'])
+ grouped["C"].agg(["sum", "sum"])
Pandas *does* allow you to provide multiple lambdas. In this case, pandas
@@ -607,8 +620,7 @@ For a grouped ``DataFrame``, you can rename in a similar manner:
.. ipython:: python
- grouped['C'].agg([lambda x: x.max() - x.min(),
- lambda x: x.median() - x.mean()])
+ grouped["C"].agg([lambda x: x.max() - x.min(), lambda x: x.median() - x.mean()])
@@ -631,15 +643,19 @@ accepts the special syntax in :meth:`GroupBy.agg`, known as "named aggregation",
.. ipython:: python
- animals = pd.DataFrame({'kind': ['cat', 'dog', 'cat', 'dog'],
- 'height': [9.1, 6.0, 9.5, 34.0],
- 'weight': [7.9, 7.5, 9.9, 198.0]})
+ animals = pd.DataFrame(
+ {
+ "kind": ["cat", "dog", "cat", "dog"],
+ "height": [9.1, 6.0, 9.5, 34.0],
+ "weight": [7.9, 7.5, 9.9, 198.0],
+ }
+ )
animals
animals.groupby("kind").agg(
- min_height=pd.NamedAgg(column='height', aggfunc='min'),
- max_height=pd.NamedAgg(column='height', aggfunc='max'),
- average_weight=pd.NamedAgg(column='weight', aggfunc=np.mean),
+ min_height=pd.NamedAgg(column="height", aggfunc="min"),
+ max_height=pd.NamedAgg(column="height", aggfunc="max"),
+ average_weight=pd.NamedAgg(column="weight", aggfunc=np.mean),
)
@@ -648,9 +664,9 @@ accepts the special syntax in :meth:`GroupBy.agg`, known as "named aggregation",
.. ipython:: python
animals.groupby("kind").agg(
- min_height=('height', 'min'),
- max_height=('height', 'max'),
- average_weight=('weight', np.mean),
+ min_height=("height", "min"),
+ max_height=("height", "max"),
+ average_weight=("weight", np.mean),
)
@@ -659,9 +675,11 @@ and unpack the keyword arguments
.. ipython:: python
- animals.groupby("kind").agg(**{
- 'total weight': pd.NamedAgg(column='weight', aggfunc=sum),
- })
+ animals.groupby("kind").agg(
+ **{
+ "total weight": pd.NamedAgg(column="weight", aggfunc=sum),
+ }
+ )
Additional keyword arguments are not passed through to the aggregation functions. Only pairs
of ``(column, aggfunc)`` should be passed as ``**kwargs``. If your aggregation functions
@@ -680,8 +698,8 @@ no column selection, so the values are just the functions.
.. ipython:: python
animals.groupby("kind").height.agg(
- min_height='min',
- max_height='max',
+ min_height="min",
+ max_height="max",
)
Applying different functions to DataFrame columns
@@ -692,8 +710,7 @@ columns of a DataFrame:
.. ipython:: python
- grouped.agg({'C': np.sum,
- 'D': lambda x: np.std(x, ddof=1)})
+ grouped.agg({"C": np.sum, "D": lambda x: np.std(x, ddof=1)})
The function names can also be strings. In order for a string to be valid it
must be either implemented on GroupBy or available via :ref:`dispatching
@@ -701,7 +718,7 @@ must be either implemented on GroupBy or available via :ref:`dispatching
.. ipython:: python
- grouped.agg({'C': 'sum', 'D': 'std'})
+ grouped.agg({"C": "sum", "D": "std"})
.. _groupby.aggregate.cython:
@@ -713,8 +730,8 @@ optimized Cython implementations:
.. ipython:: python
- df.groupby('A').sum()
- df.groupby(['A', 'B']).mean()
+ df.groupby("A").sum()
+ df.groupby(["A", "B"]).mean()
Of course ``sum`` and ``mean`` are implemented on pandas objects, so the above
code would work even without the special versions via dispatching (see below).
@@ -743,15 +760,14 @@ For example, suppose we wished to standardize the data within each group:
.. ipython:: python
- index = pd.date_range('10/1/1999', periods=1100)
+ index = pd.date_range("10/1/1999", periods=1100)
ts = pd.Series(np.random.normal(0.5, 2, 1100), index)
ts = ts.rolling(window=100, min_periods=100).mean().dropna()
ts.head()
ts.tail()
- transformed = (ts.groupby(lambda x: x.year)
- .transform(lambda x: (x - x.mean()) / x.std()))
+ transformed = ts.groupby(lambda x: x.year).transform(lambda x: (x - x.mean()) / x.std())
We would expect the result to now have mean 0 and standard deviation 1 within
each group, which we can easily check:
@@ -772,7 +788,7 @@ We can also visually compare the original and transformed data sets.
.. ipython:: python
- compare = pd.DataFrame({'Original': ts, 'Transformed': transformed})
+ compare = pd.DataFrame({"Original": ts, "Transformed": transformed})
@savefig groupby_transform_plot.png
compare.plot()
@@ -788,8 +804,8 @@ Alternatively, the built-in methods could be used to produce the same outputs.
.. ipython:: python
- max = ts.groupby(lambda x: x.year).transform('max')
- min = ts.groupby(lambda x: x.year).transform('min')
+ max = ts.groupby(lambda x: x.year).transform("max")
+ min = ts.groupby(lambda x: x.year).transform("min")
max - min
@@ -798,7 +814,7 @@ Another common data transform is to replace missing data with the group mean.
.. ipython:: python
:suppress:
- cols = ['A', 'B', 'C']
+ cols = ["A", "B", "C"]
values = np.random.randn(1000, 3)
values[np.random.randint(0, 1000, 100), 0] = np.nan
values[np.random.randint(0, 1000, 50), 1] = np.nan
@@ -809,7 +825,7 @@ Another common data transform is to replace missing data with the group mean.
data_df
- countries = np.array(['US', 'UK', 'GR', 'JP'])
+ countries = np.array(["US", "UK", "GR", "JP"])
key = countries[np.random.randint(0, 4, 1000)]
grouped = data_df.groupby(key)
@@ -859,11 +875,10 @@ the column B based on the groups of column A.
.. ipython:: python
- df_re = pd.DataFrame({'A': [1] * 10 + [5] * 10,
- 'B': np.arange(20)})
+ df_re = pd.DataFrame({"A": [1] * 10 + [5] * 10, "B": np.arange(20)})
df_re
- df_re.groupby('A').rolling(4).B.mean()
+ df_re.groupby("A").rolling(4).B.mean()
The ``expanding()`` method will accumulate a given operation
@@ -872,7 +887,7 @@ group.
.. ipython:: python
- df_re.groupby('A').expanding().sum()
+ df_re.groupby("A").expanding().sum()
Suppose you want to use the ``resample()`` method to get a daily
@@ -881,13 +896,16 @@ missing values with the ``ffill()`` method.
.. ipython:: python
- df_re = pd.DataFrame({'date': pd.date_range(start='2016-01-01', periods=4,
- freq='W'),
- 'group': [1, 1, 2, 2],
- 'val': [5, 6, 7, 8]}).set_index('date')
+ df_re = pd.DataFrame(
+ {
+ "date": pd.date_range(start="2016-01-01", periods=4, freq="W"),
+ "group": [1, 1, 2, 2],
+ "val": [5, 6, 7, 8],
+ }
+ ).set_index("date")
df_re
- df_re.groupby('group').resample('1D').ffill()
+ df_re.groupby("group").resample("1D").ffill()
.. _groupby.filter:
@@ -911,8 +929,8 @@ with only a couple members.
.. ipython:: python
- dff = pd.DataFrame({'A': np.arange(8), 'B': list('aabbbbcc')})
- dff.groupby('B').filter(lambda x: len(x) > 2)
+ dff = pd.DataFrame({"A": np.arange(8), "B": list("aabbbbcc")})
+ dff.groupby("B").filter(lambda x: len(x) > 2)
Alternatively, instead of dropping the offending groups, we can return a
like-indexed objects where the groups that do not pass the filter are filled
@@ -920,14 +938,14 @@ with NaNs.
.. ipython:: python
- dff.groupby('B').filter(lambda x: len(x) > 2, dropna=False)
+ dff.groupby("B").filter(lambda x: len(x) > 2, dropna=False)
For DataFrames with multiple columns, filters should explicitly specify a column as the filter criterion.
.. ipython:: python
- dff['C'] = np.arange(8)
- dff.groupby('B').filter(lambda x: len(x['C']) > 2)
+ dff["C"] = np.arange(8)
+ dff.groupby("B").filter(lambda x: len(x["C"]) > 2)
.. note::
@@ -939,7 +957,7 @@ For DataFrames with multiple columns, filters should explicitly specify a column
.. ipython:: python
- dff.groupby('B').head(2)
+ dff.groupby("B").head(2)
.. _groupby.dispatch:
@@ -953,7 +971,7 @@ functions:
.. ipython:: python
- grouped = df.groupby('A')
+ grouped = df.groupby("A")
grouped.agg(lambda x: x.std())
But, it's rather verbose and can be untidy if you need to pass additional
@@ -973,12 +991,14 @@ next). This enables some operations to be carried out rather succinctly:
.. ipython:: python
- tsdf = pd.DataFrame(np.random.randn(1000, 3),
- index=pd.date_range('1/1/2000', periods=1000),
- columns=['A', 'B', 'C'])
+ tsdf = pd.DataFrame(
+ np.random.randn(1000, 3),
+ index=pd.date_range("1/1/2000", periods=1000),
+ columns=["A", "B", "C"],
+ )
tsdf.iloc[::2] = np.nan
grouped = tsdf.groupby(lambda x: x.year)
- grouped.fillna(method='pad')
+ grouped.fillna(method="pad")
In this example, we chopped the collection of time series into yearly chunks
then independently called :ref:`fillna <missing_data.fillna>` on the
@@ -989,7 +1009,7 @@ The ``nlargest`` and ``nsmallest`` methods work on ``Series`` style groupbys:
.. ipython:: python
s = pd.Series([9, 8, 7, 5, 19, 1, 4.2, 3.3])
- g = pd.Series(list('abababab'))
+ g = pd.Series(list("abababab"))
gb = s.groupby(g)
gb.nlargest(3)
gb.nsmallest(3)
@@ -1008,10 +1028,10 @@ for both ``aggregate`` and ``transform`` in many standard use cases. However,
.. ipython:: python
df
- grouped = df.groupby('A')
+ grouped = df.groupby("A")
# could also just call .describe()
- grouped['C'].apply(lambda x: x.describe())
+ grouped["C"].apply(lambda x: x.describe())
The dimension of the returned result can also change:
@@ -1032,7 +1052,8 @@ that is itself a series, and possibly upcast the result to a DataFrame:
.. ipython:: python
def f(x):
- return pd.Series([x, x ** 2], index=['x', 'x^2'])
+ return pd.Series([x, x ** 2], index=["x", "x^2"])
+
s = pd.Series(np.random.rand(5))
s
@@ -1133,7 +1154,7 @@ will be (silently) dropped. Thus, this does not pose any problems:
.. ipython:: python
- df.groupby('A').std()
+ df.groupby("A").std()
Note that ``df.groupby('A').colname.std().`` is more efficient than
``df.groupby('A').std().colname``, so if the result of an aggregation function
@@ -1151,23 +1172,29 @@ is only interesting over one column (here ``colname``), it may be filtered
.. ipython:: python
from decimal import Decimal
+
df_dec = pd.DataFrame(
- {'id': [1, 2, 1, 2],
- 'int_column': [1, 2, 3, 4],
- 'dec_column': [Decimal('0.50'), Decimal('0.15'),
- Decimal('0.25'), Decimal('0.40')]
- }
+ {
+ "id": [1, 2, 1, 2],
+ "int_column": [1, 2, 3, 4],
+ "dec_column": [
+ Decimal("0.50"),
+ Decimal("0.15"),
+ Decimal("0.25"),
+ Decimal("0.40"),
+ ],
+ }
)
# Decimal columns can be sum'd explicitly by themselves...
- df_dec.groupby(['id'])[['dec_column']].sum()
+ df_dec.groupby(["id"])[["dec_column"]].sum()
# ...but cannot be combined with standard data types or they will be excluded
- df_dec.groupby(['id'])[['int_column', 'dec_column']].sum()
+ df_dec.groupby(["id"])[["int_column", "dec_column"]].sum()
# Use .agg function to aggregate over standard and "nuisance" data types
# at the same time
- df_dec.groupby(['id']).agg({'int_column': 'sum', 'dec_column': 'sum'})
+ df_dec.groupby(["id"]).agg({"int_column": "sum", "dec_column": "sum"})
.. _groupby.observed:
@@ -1182,25 +1209,27 @@ Show all values:
.. ipython:: python
- pd.Series([1, 1, 1]).groupby(pd.Categorical(['a', 'a', 'a'],
- categories=['a', 'b']),
- observed=False).count()
+ pd.Series([1, 1, 1]).groupby(
+ pd.Categorical(["a", "a", "a"], categories=["a", "b"]), observed=False
+ ).count()
Show only the observed values:
.. ipython:: python
- pd.Series([1, 1, 1]).groupby(pd.Categorical(['a', 'a', 'a'],
- categories=['a', 'b']),
- observed=True).count()
+ pd.Series([1, 1, 1]).groupby(
+ pd.Categorical(["a", "a", "a"], categories=["a", "b"]), observed=True
+ ).count()
The returned dtype of the grouped will *always* include *all* of the categories that were grouped.
.. ipython:: python
- s = pd.Series([1, 1, 1]).groupby(pd.Categorical(['a', 'a', 'a'],
- categories=['a', 'b']),
- observed=False).count()
+ s = (
+ pd.Series([1, 1, 1])
+ .groupby(pd.Categorical(["a", "a", "a"], categories=["a", "b"]), observed=False)
+ .count()
+ )
s.index.dtype
.. _groupby.missing:
@@ -1224,7 +1253,7 @@ can be used as group keys. If so, the order of the levels will be preserved:
data = pd.Series(np.random.randn(100))
- factor = pd.qcut(data, [0, .25, .5, .75, 1.])
+ factor = pd.qcut(data, [0, 0.25, 0.5, 0.75, 1.0])
data.groupby(factor).mean()
@@ -1240,19 +1269,23 @@ use the ``pd.Grouper`` to provide this local control.
import datetime
- df = pd.DataFrame({'Branch': 'A A A A A A A B'.split(),
- 'Buyer': 'Carl Mark Carl Carl Joe Joe Joe Carl'.split(),
- 'Quantity': [1, 3, 5, 1, 8, 1, 9, 3],
- 'Date': [
- datetime.datetime(2013, 1, 1, 13, 0),
- datetime.datetime(2013, 1, 1, 13, 5),
- datetime.datetime(2013, 10, 1, 20, 0),
- datetime.datetime(2013, 10, 2, 10, 0),
- datetime.datetime(2013, 10, 1, 20, 0),
- datetime.datetime(2013, 10, 2, 10, 0),
- datetime.datetime(2013, 12, 2, 12, 0),
- datetime.datetime(2013, 12, 2, 14, 0)]
- })
+ df = pd.DataFrame(
+ {
+ "Branch": "A A A A A A A B".split(),
+ "Buyer": "Carl Mark Carl Carl Joe Joe Joe Carl".split(),
+ "Quantity": [1, 3, 5, 1, 8, 1, 9, 3],
+ "Date": [
+ datetime.datetime(2013, 1, 1, 13, 0),
+ datetime.datetime(2013, 1, 1, 13, 5),
+ datetime.datetime(2013, 10, 1, 20, 0),
+ datetime.datetime(2013, 10, 2, 10, 0),
+ datetime.datetime(2013, 10, 1, 20, 0),
+ datetime.datetime(2013, 10, 2, 10, 0),
+ datetime.datetime(2013, 12, 2, 12, 0),
+ datetime.datetime(2013, 12, 2, 14, 0),
+ ],
+ }
+ )
df
@@ -1260,18 +1293,18 @@ Groupby a specific column with the desired frequency. This is like resampling.
.. ipython:: python
- df.groupby([pd.Grouper(freq='1M', key='Date'), 'Buyer']).sum()
+ df.groupby([pd.Grouper(freq="1M", key="Date"), "Buyer"]).sum()
You have an ambiguous specification in that you have a named index and a column
that could be potential groupers.
.. ipython:: python
- df = df.set_index('Date')
- df['Date'] = df.index + pd.offsets.MonthEnd(2)
- df.groupby([pd.Grouper(freq='6M', key='Date'), 'Buyer']).sum()
+ df = df.set_index("Date")
+ df["Date"] = df.index + pd.offsets.MonthEnd(2)
+ df.groupby([pd.Grouper(freq="6M", key="Date"), "Buyer"]).sum()
- df.groupby([pd.Grouper(freq='6M', level='Date'), 'Buyer']).sum()
+ df.groupby([pd.Grouper(freq="6M", level="Date"), "Buyer"]).sum()
Taking the first rows of each group
@@ -1281,10 +1314,10 @@ Just like for a DataFrame or Series you can call head and tail on a groupby:
.. ipython:: python
- df = pd.DataFrame([[1, 2], [1, 4], [5, 6]], columns=['A', 'B'])
+ df = pd.DataFrame([[1, 2], [1, 4], [5, 6]], columns=["A", "B"])
df
- g = df.groupby('A')
+ g = df.groupby("A")
g.head(1)
g.tail(1)
@@ -1302,8 +1335,8 @@ will return a single row (or no row) per group if you pass an int for n:
.. ipython:: python
- df = pd.DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=['A', 'B'])
- g = df.groupby('A')
+ df = pd.DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=["A", "B"])
+ g = df.groupby("A")
g.nth(0)
g.nth(-1)
@@ -1314,21 +1347,21 @@ If you want to select the nth not-null item, use the ``dropna`` kwarg. For a Dat
.. ipython:: python
# nth(0) is the same as g.first()
- g.nth(0, dropna='any')
+ g.nth(0, dropna="any")
g.first()
# nth(-1) is the same as g.last()
- g.nth(-1, dropna='any') # NaNs denote group exhausted when using dropna
+ g.nth(-1, dropna="any") # NaNs denote group exhausted when using dropna
g.last()
- g.B.nth(0, dropna='all')
+ g.B.nth(0, dropna="all")
As with other methods, passing ``as_index=False``, will achieve a filtration, which returns the grouped row.
.. ipython:: python
- df = pd.DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=['A', 'B'])
- g = df.groupby('A', as_index=False)
+ df = pd.DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=["A", "B"])
+ g = df.groupby("A", as_index=False)
g.nth(0)
g.nth(-1)
@@ -1337,8 +1370,8 @@ You can also select multiple rows from each group by specifying multiple nth val
.. ipython:: python
- business_dates = pd.date_range(start='4/1/2014', end='6/30/2014', freq='B')
- df = pd.DataFrame(1, index=business_dates, columns=['a', 'b'])
+ business_dates = pd.date_range(start="4/1/2014", end="6/30/2014", freq="B")
+ df = pd.DataFrame(1, index=business_dates, columns=["a", "b"])
# get the first, 4th, and last date index for each month
df.groupby([df.index.year, df.index.month]).nth([0, 3, -1])
@@ -1350,12 +1383,12 @@ To see the order in which each row appears within its group, use the
.. ipython:: python
- dfg = pd.DataFrame(list('aaabba'), columns=['A'])
+ dfg = pd.DataFrame(list("aaabba"), columns=["A"])
dfg
- dfg.groupby('A').cumcount()
+ dfg.groupby("A").cumcount()
- dfg.groupby('A').cumcount(ascending=False)
+ dfg.groupby("A").cumcount(ascending=False)
.. _groupby.ngroup:
@@ -1374,12 +1407,12 @@ order they are first observed.
.. ipython:: python
- dfg = pd.DataFrame(list('aaabba'), columns=['A'])
+ dfg = pd.DataFrame(list("aaabba"), columns=["A"])
dfg
- dfg.groupby('A').ngroup()
+ dfg.groupby("A").ngroup()
- dfg.groupby('A').ngroup(ascending=False)
+ dfg.groupby("A").ngroup(ascending=False)
Plotting
~~~~~~~~
@@ -1392,8 +1425,8 @@ the values in column 1 where the group is "B" are 3 higher on average.
np.random.seed(1234)
df = pd.DataFrame(np.random.randn(50, 2))
- df['g'] = np.random.choice(['A', 'B'], size=50)
- df.loc[df['g'] == 'B', 1] += 3
+ df["g"] = np.random.choice(["A", "B"], size=50)
+ df.loc[df["g"] == "B", 1] += 3
We can easily visualize this with a boxplot:
@@ -1401,7 +1434,7 @@ We can easily visualize this with a boxplot:
:okwarning:
@savefig groupby_boxplot.png
- df.groupby('g').boxplot()
+ df.groupby("g").boxplot()
The result of calling ``boxplot`` is a dictionary whose keys are the values
of our grouping column ``g`` ("A" and "B"). The values of the resulting dictionary
@@ -1436,20 +1469,26 @@ code more readable. First we set the data:
.. ipython:: python
n = 1000
- df = pd.DataFrame({'Store': np.random.choice(['Store_1', 'Store_2'], n),
- 'Product': np.random.choice(['Product_1',
- 'Product_2'], n),
- 'Revenue': (np.random.random(n) * 50 + 10).round(2),
- 'Quantity': np.random.randint(1, 10, size=n)})
+ df = pd.DataFrame(
+ {
+ "Store": np.random.choice(["Store_1", "Store_2"], n),
+ "Product": np.random.choice(["Product_1", "Product_2"], n),
+ "Revenue": (np.random.random(n) * 50 + 10).round(2),
+ "Quantity": np.random.randint(1, 10, size=n),
+ }
+ )
df.head(2)
Now, to find prices per store/product, we can simply do:
.. ipython:: python
- (df.groupby(['Store', 'Product'])
- .pipe(lambda grp: grp.Revenue.sum() / grp.Quantity.sum())
- .unstack().round(2))
+ (
+ df.groupby(["Store", "Product"])
+ .pipe(lambda grp: grp.Revenue.sum() / grp.Quantity.sum())
+ .unstack()
+ .round(2)
+ )
Piping can also be expressive when you want to deliver a grouped object to some
arbitrary function, for example:
@@ -1459,7 +1498,8 @@ arbitrary function, for example:
def mean(groupby):
return groupby.mean()
- df.groupby(['Store', 'Product']).pipe(mean)
+
+ df.groupby(["Store", "Product"]).pipe(mean)
where ``mean`` takes a GroupBy object and finds the mean of the Revenue and Quantity
columns respectively for each Store-Product combination. The ``mean`` function can
@@ -1476,8 +1516,7 @@ Regroup columns of a DataFrame according to their sum, and sum the aggregated on
.. ipython:: python
- df = pd.DataFrame({'a': [1, 0, 0], 'b': [0, 1, 0],
- 'c': [1, 0, 0], 'd': [2, 3, 4]})
+ df = pd.DataFrame({"a": [1, 0, 0], "b": [0, 1, 0], "c": [1, 0, 0], "d": [2, 3, 4]})
df
df.groupby(df.sum(), axis=1).sum()
@@ -1536,16 +1575,22 @@ column index name will be used as the name of the inserted column:
.. ipython:: python
- df = pd.DataFrame({'a': [0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2],
- 'b': [0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1],
- 'c': [1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0],
- 'd': [0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1]})
+ df = pd.DataFrame(
+ {
+ "a": [0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2],
+ "b": [0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1],
+ "c": [1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0],
+ "d": [0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1],
+ }
+ )
+
def compute_metrics(x):
- result = {'b_sum': x['b'].sum(), 'c_mean': x['c'].mean()}
- return pd.Series(result, name='metrics')
+ result = {"b_sum": x["b"].sum(), "c_mean": x["c"].mean()}
+ return pd.Series(result, name="metrics")
+
- result = df.groupby('a').apply(compute_metrics)
+ result = df.groupby("a").apply(compute_metrics)
result
diff --git a/doc/source/user_guide/io.rst b/doc/source/user_guide/io.rst
index e483cebf71614..184894bbafe28 100644
--- a/doc/source/user_guide/io.rst
+++ b/doc/source/user_guide/io.rst
@@ -3310,10 +3310,10 @@ applications (CTRL-V on many operating systems). Here we illustrate writing a
.. code-block:: python
- >>> df = pd.DataFrame({'A': [1, 2, 3],
- ... 'B': [4, 5, 6],
- ... 'C': ['p', 'q', 'r']},
- ... index=['x', 'y', 'z'])
+ >>> df = pd.DataFrame(
+ ... {"A": [1, 2, 3], "B": [4, 5, 6], "C": ["p", "q", "r"]}, index=["x", "y", "z"]
+ ... )
+
>>> df
A B C
x 1 4 p
@@ -3607,8 +3607,8 @@ This format is specified by default when using ``put`` or ``to_hdf`` or by ``for
.. code-block:: python
- >>> pd.DataFrame(np.random.randn(10, 2)).to_hdf('test_fixed.h5', 'df')
- >>> pd.read_hdf('test_fixed.h5', 'df', where='index>5')
+ >>> pd.DataFrame(np.random.randn(10, 2)).to_hdf("test_fixed.h5", "df")
+ >>> pd.read_hdf("test_fixed.h5", "df", where="index>5")
TypeError: cannot pass a where specification when reading a fixed format.
this store must be selected in its entirety
diff --git a/doc/source/user_guide/missing_data.rst b/doc/source/user_guide/missing_data.rst
index 9294897686d46..3c97cc7da6edb 100644
--- a/doc/source/user_guide/missing_data.rst
+++ b/doc/source/user_guide/missing_data.rst
@@ -38,12 +38,15 @@ arise and we wish to also consider that "missing" or "not available" or "NA".
.. ipython:: python
- df = pd.DataFrame(np.random.randn(5, 3), index=['a', 'c', 'e', 'f', 'h'],
- columns=['one', 'two', 'three'])
- df['four'] = 'bar'
- df['five'] = df['one'] > 0
+ df = pd.DataFrame(
+ np.random.randn(5, 3),
+ index=["a", "c", "e", "f", "h"],
+ columns=["one", "two", "three"],
+ )
+ df["four"] = "bar"
+ df["five"] = df["one"] > 0
df
- df2 = df.reindex(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'])
+ df2 = df.reindex(["a", "b", "c", "d", "e", "f", "g", "h"])
df2
To make detecting missing values easier (and across different array dtypes),
@@ -53,9 +56,9 @@ Series and DataFrame objects:
.. ipython:: python
- df2['one']
- pd.isna(df2['one'])
- df2['four'].notna()
+ df2["one"]
+ pd.isna(df2["one"])
+ df2["four"].notna()
df2.isna()
.. warning::
@@ -65,14 +68,14 @@ Series and DataFrame objects:
.. ipython:: python
- None == None # noqa: E711
+ None == None # noqa: E711
np.nan == np.nan
So as compared to above, a scalar equality comparison versus a ``None/np.nan`` doesn't provide useful information.
.. ipython:: python
- df2['one'] == np.nan
+ df2["one"] == np.nan
Integer dtypes and missing data
-------------------------------
@@ -101,9 +104,9 @@ pandas objects provide compatibility between ``NaT`` and ``NaN``.
.. ipython:: python
df2 = df.copy()
- df2['timestamp'] = pd.Timestamp('20120101')
+ df2["timestamp"] = pd.Timestamp("20120101")
df2
- df2.loc[['a', 'c', 'h'], ['one', 'timestamp']] = np.nan
+ df2.loc[["a", "c", "h"], ["one", "timestamp"]] = np.nan
df2
df2.dtypes.value_counts()
@@ -146,9 +149,9 @@ objects.
.. ipython:: python
:suppress:
- df = df2.loc[:, ['one', 'two', 'three']]
- a = df2.loc[df2.index[:5], ['one', 'two']].fillna(method='pad')
- b = df2.loc[df2.index[:5], ['one', 'two', 'three']]
+ df = df2.loc[:, ["one", "two", "three"]]
+ a = df2.loc[df2.index[:5], ["one", "two"]].fillna(method="pad")
+ b = df2.loc[df2.index[:5], ["one", "two", "three"]]
.. ipython:: python
@@ -168,7 +171,7 @@ account for missing data. For example:
.. ipython:: python
df
- df['one'].sum()
+ df["one"].sum()
df.mean(1)
df.cumsum()
df.cumsum(skipna=False)
@@ -210,7 +213,7 @@ with R, for example:
.. ipython:: python
df
- df.groupby('one').mean()
+ df.groupby("one").mean()
See the groupby section :ref:`here <groupby.missing>` for more information.
@@ -234,7 +237,7 @@ of ways, which we illustrate:
df2
df2.fillna(0)
- df2['one'].fillna('missing')
+ df2["one"].fillna("missing")
**Fill gaps forward or backward**
@@ -244,7 +247,7 @@ can propagate non-NA values forward or backward:
.. ipython:: python
df
- df.fillna(method='pad')
+ df.fillna(method="pad")
.. _missing_data.fillna.limit:
@@ -261,7 +264,7 @@ we can use the ``limit`` keyword:
.. ipython:: python
df
- df.fillna(method='pad', limit=1)
+ df.fillna(method="pad", limit=1)
To remind you, these are the available filling methods:
@@ -289,21 +292,21 @@ use case of this is to fill a DataFrame with the mean of that column.
.. ipython:: python
- dff = pd.DataFrame(np.random.randn(10, 3), columns=list('ABC'))
+ dff = pd.DataFrame(np.random.randn(10, 3), columns=list("ABC"))
dff.iloc[3:5, 0] = np.nan
dff.iloc[4:6, 1] = np.nan
dff.iloc[5:8, 2] = np.nan
dff
dff.fillna(dff.mean())
- dff.fillna(dff.mean()['B':'C'])
+ dff.fillna(dff.mean()["B":"C"])
Same result as above, but is aligning the 'fill' value which is
a Series in this case.
.. ipython:: python
- dff.where(pd.notna(dff), dff.mean(), axis='columns')
+ dff.where(pd.notna(dff), dff.mean(), axis="columns")
.. _missing_data.dropna:
@@ -317,15 +320,15 @@ data. To do this, use :meth:`~DataFrame.dropna`:
.. ipython:: python
:suppress:
- df['two'] = df['two'].fillna(0)
- df['three'] = df['three'].fillna(0)
+ df["two"] = df["two"].fillna(0)
+ df["three"] = df["three"].fillna(0)
.. ipython:: python
df
df.dropna(axis=0)
df.dropna(axis=1)
- df['one'].dropna()
+ df["one"].dropna()
An equivalent :meth:`~Series.dropna` is available for Series.
DataFrame.dropna has considerably more options than Series.dropna, which can be
@@ -343,7 +346,7 @@ that, by default, performs linear interpolation at missing data points.
:suppress:
np.random.seed(123456)
- idx = pd.date_range('1/1/2000', periods=100, freq='BM')
+ idx = pd.date_range("1/1/2000", periods=100, freq="BM")
ts = pd.Series(np.random.randn(100), index=idx)
ts[1:5] = np.nan
ts[20:30] = np.nan
@@ -376,28 +379,29 @@ Index aware interpolation is available via the ``method`` keyword:
ts2
ts2.interpolate()
- ts2.interpolate(method='time')
+ ts2.interpolate(method="time")
For a floating-point index, use ``method='values'``:
.. ipython:: python
:suppress:
- idx = [0., 1., 10.]
- ser = pd.Series([0., np.nan, 10.], idx)
+ idx = [0.0, 1.0, 10.0]
+ ser = pd.Series([0.0, np.nan, 10.0], idx)
.. ipython:: python
ser
ser.interpolate()
- ser.interpolate(method='values')
+ ser.interpolate(method="values")
You can also interpolate with a DataFrame:
.. ipython:: python
- df = pd.DataFrame({'A': [1, 2.1, np.nan, 4.7, 5.6, 6.8],
- 'B': [.25, np.nan, np.nan, 4, 12.2, 14.4]})
+ df = pd.DataFrame(
+ {"A": [1, 2.1, np.nan, 4.7, 5.6, 6.8], "B": [0.25, np.nan, np.nan, 4, 12.2, 14.4]}
+ )
df
df.interpolate()
@@ -418,20 +422,20 @@ The appropriate interpolation method will depend on the type of data you are wor
.. ipython:: python
- df.interpolate(method='barycentric')
+ df.interpolate(method="barycentric")
- df.interpolate(method='pchip')
+ df.interpolate(method="pchip")
- df.interpolate(method='akima')
+ df.interpolate(method="akima")
When interpolating via a polynomial or spline approximation, you must also specify
the degree or order of the approximation:
.. ipython:: python
- df.interpolate(method='spline', order=2)
+ df.interpolate(method="spline", order=2)
- df.interpolate(method='polynomial', order=2)
+ df.interpolate(method="polynomial", order=2)
Compare several methods:
@@ -439,10 +443,10 @@ Compare several methods:
np.random.seed(2)
- ser = pd.Series(np.arange(1, 10.1, .25) ** 2 + np.random.randn(37))
+ ser = pd.Series(np.arange(1, 10.1, 0.25) ** 2 + np.random.randn(37))
missing = np.array([4, 13, 14, 15, 16, 17, 18, 20, 29])
ser[missing] = np.nan
- methods = ['linear', 'quadratic', 'cubic']
+ methods = ["linear", "quadratic", "cubic"]
df = pd.DataFrame({m: ser.interpolate(method=m) for m in methods})
@savefig compare_interpolations.png
@@ -460,7 +464,7 @@ at the new values.
# interpolate at new_index
new_index = ser.index | pd.Index([49.25, 49.5, 49.75, 50.25, 50.5, 50.75])
- interp_s = ser.reindex(new_index).interpolate(method='pchip')
+ interp_s = ser.reindex(new_index).interpolate(method="pchip")
interp_s[49:51]
.. _scipy: https://www.scipy.org
@@ -478,8 +482,7 @@ filled since the last valid observation:
.. ipython:: python
- ser = pd.Series([np.nan, np.nan, 5, np.nan, np.nan,
- np.nan, 13, np.nan, np.nan])
+ ser = pd.Series([np.nan, np.nan, 5, np.nan, np.nan, np.nan, 13, np.nan, np.nan])
ser
# fill all consecutive values in a forward direction
@@ -494,13 +497,13 @@ By default, ``NaN`` values are filled in a ``forward`` direction. Use
.. ipython:: python
# fill one consecutive value backwards
- ser.interpolate(limit=1, limit_direction='backward')
+ ser.interpolate(limit=1, limit_direction="backward")
# fill one consecutive value in both directions
- ser.interpolate(limit=1, limit_direction='both')
+ ser.interpolate(limit=1, limit_direction="both")
# fill all consecutive values in both directions
- ser.interpolate(limit_direction='both')
+ ser.interpolate(limit_direction="both")
By default, ``NaN`` values are filled whether they are inside (surrounded by)
existing valid values, or outside existing valid values. The ``limit_area``
@@ -509,13 +512,13 @@ parameter restricts filling to either inside or outside values.
.. ipython:: python
# fill one consecutive inside value in both directions
- ser.interpolate(limit_direction='both', limit_area='inside', limit=1)
+ ser.interpolate(limit_direction="both", limit_area="inside", limit=1)
# fill all consecutive outside values backward
- ser.interpolate(limit_direction='backward', limit_area='outside')
+ ser.interpolate(limit_direction="backward", limit_area="outside")
# fill all consecutive outside values in both directions
- ser.interpolate(limit_direction='both', limit_area='outside')
+ ser.interpolate(limit_direction="both", limit_area="outside")
.. _missing_data.replace:
@@ -531,7 +534,7 @@ value:
.. ipython:: python
- ser = pd.Series([0., 1., 2., 3., 4.])
+ ser = pd.Series([0.0, 1.0, 2.0, 3.0, 4.0])
ser.replace(0, 5)
@@ -551,16 +554,16 @@ For a DataFrame, you can specify individual values by column:
.. ipython:: python
- df = pd.DataFrame({'a': [0, 1, 2, 3, 4], 'b': [5, 6, 7, 8, 9]})
+ df = pd.DataFrame({"a": [0, 1, 2, 3, 4], "b": [5, 6, 7, 8, 9]})
- df.replace({'a': 0, 'b': 5}, 100)
+ df.replace({"a": 0, "b": 5}, 100)
Instead of replacing with specified values, you can treat all given values as
missing and interpolate over them:
.. ipython:: python
- ser.replace([1, 2, 3], method='pad')
+ ser.replace([1, 2, 3], method="pad")
.. _missing_data.replace_expression:
@@ -581,67 +584,67 @@ Replace the '.' with ``NaN`` (str -> str):
.. ipython:: python
- d = {'a': list(range(4)), 'b': list('ab..'), 'c': ['a', 'b', np.nan, 'd']}
+ d = {"a": list(range(4)), "b": list("ab.."), "c": ["a", "b", np.nan, "d"]}
df = pd.DataFrame(d)
- df.replace('.', np.nan)
+ df.replace(".", np.nan)
Now do it with a regular expression that removes surrounding whitespace
(regex -> regex):
.. ipython:: python
- df.replace(r'\s*\.\s*', np.nan, regex=True)
+ df.replace(r"\s*\.\s*", np.nan, regex=True)
Replace a few different values (list -> list):
.. ipython:: python
- df.replace(['a', '.'], ['b', np.nan])
+ df.replace(["a", "."], ["b", np.nan])
list of regex -> list of regex:
.. ipython:: python
- df.replace([r'\.', r'(a)'], ['dot', r'\1stuff'], regex=True)
+ df.replace([r"\.", r"(a)"], ["dot", r"\1stuff"], regex=True)
Only search in column ``'b'`` (dict -> dict):
.. ipython:: python
- df.replace({'b': '.'}, {'b': np.nan})
+ df.replace({"b": "."}, {"b": np.nan})
Same as the previous example, but use a regular expression for
searching instead (dict of regex -> dict):
.. ipython:: python
- df.replace({'b': r'\s*\.\s*'}, {'b': np.nan}, regex=True)
+ df.replace({"b": r"\s*\.\s*"}, {"b": np.nan}, regex=True)
You can pass nested dictionaries of regular expressions that use ``regex=True``:
.. ipython:: python
- df.replace({'b': {'b': r''}}, regex=True)
+ df.replace({"b": {"b": r""}}, regex=True)
Alternatively, you can pass the nested dictionary like so:
.. ipython:: python
- df.replace(regex={'b': {r'\s*\.\s*': np.nan}})
+ df.replace(regex={"b": {r"\s*\.\s*": np.nan}})
You can also use the group of a regular expression match when replacing (dict
of regex -> dict of regex), this works for lists as well.
.. ipython:: python
- df.replace({'b': r'\s*(\.)\s*'}, {'b': r'\1ty'}, regex=True)
+ df.replace({"b": r"\s*(\.)\s*"}, {"b": r"\1ty"}, regex=True)
You can pass a list of regular expressions, of which those that match
will be replaced with a scalar (list of regex -> regex).
.. ipython:: python
- df.replace([r'\s*\.\s*', r'a|b'], np.nan, regex=True)
+ df.replace([r"\s*\.\s*", r"a|b"], np.nan, regex=True)
All of the regular expression examples can also be passed with the
``to_replace`` argument as the ``regex`` argument. In this case the ``value``
@@ -650,7 +653,7 @@ dictionary. The previous example, in this case, would then be:
.. ipython:: python
- df.replace(regex=[r'\s*\.\s*', r'a|b'], value=np.nan)
+ df.replace(regex=[r"\s*\.\s*", r"a|b"], value=np.nan)
This can be convenient if you do not want to pass ``regex=True`` every time you
want to use a regular expression.
@@ -676,7 +679,7 @@ Replacing more than one value is possible by passing a list.
.. ipython:: python
df00 = df.iloc[0, 0]
- df.replace([1.5, df00], [np.nan, 'a'])
+ df.replace([1.5, df00], [np.nan, "a"])
df[1].dtype
You can also operate on the DataFrame in place:
@@ -932,7 +935,7 @@ the first 10 columns.
.. ipython:: python
- bb = pd.read_csv('data/baseball.csv', index_col='id')
+ bb = pd.read_csv("data/baseball.csv", index_col="id")
bb[bb.columns[:10]].dtypes
.. ipython:: python
diff --git a/doc/source/user_guide/scale.rst b/doc/source/user_guide/scale.rst
index 206d8dd0f4739..f36f27269a996 100644
--- a/doc/source/user_guide/scale.rst
+++ b/doc/source/user_guide/scale.rst
@@ -72,7 +72,7 @@ Option 1 loads in all the data and then filters to what we need.
.. ipython:: python
- columns = ['id_0', 'name_0', 'x_0', 'y_0']
+ columns = ["id_0", "name_0", "x_0", "y_0"]
pd.read_parquet("timeseries_wide.parquet")[columns]
@@ -123,7 +123,7 @@ space-efficient integers to know which specific name is used in each row.
.. ipython:: python
ts2 = ts.copy()
- ts2['name'] = ts2['name'].astype('category')
+ ts2["name"] = ts2["name"].astype("category")
ts2.memory_usage(deep=True)
We can go a bit further and downcast the numeric columns to their smallest types
@@ -131,8 +131,8 @@ using :func:`pandas.to_numeric`.
.. ipython:: python
- ts2['id'] = pd.to_numeric(ts2['id'], downcast='unsigned')
- ts2[['x', 'y']] = ts2[['x', 'y']].apply(pd.to_numeric, downcast='float')
+ ts2["id"] = pd.to_numeric(ts2["id"], downcast="unsigned")
+ ts2[["x", "y"]] = ts2[["x", "y"]].apply(pd.to_numeric, downcast="float")
ts2.dtypes
.. ipython:: python
@@ -141,8 +141,7 @@ using :func:`pandas.to_numeric`.
.. ipython:: python
- reduction = (ts2.memory_usage(deep=True).sum()
- / ts.memory_usage(deep=True).sum())
+ reduction = ts2.memory_usage(deep=True).sum() / ts.memory_usage(deep=True).sum()
print(f"{reduction:0.2f}")
In all, we've reduced the in-memory footprint of this dataset to 1/5 of its
@@ -174,13 +173,13 @@ files. Each file in the directory represents a different year of the entire data
import pathlib
N = 12
- starts = [f'20{i:>02d}-01-01' for i in range(N)]
- ends = [f'20{i:>02d}-12-13' for i in range(N)]
+ starts = [f"20{i:>02d}-01-01" for i in range(N)]
+ ends = [f"20{i:>02d}-12-13" for i in range(N)]
pathlib.Path("data/timeseries").mkdir(exist_ok=True)
for i, (start, end) in enumerate(zip(starts, ends)):
- ts = _make_timeseries(start=start, end=end, freq='1T', seed=i)
+ ts = _make_timeseries(start=start, end=end, freq="1T", seed=i)
ts.to_parquet(f"data/timeseries/ts-{i:0>2d}.parquet")
@@ -215,7 +214,7 @@ work for arbitrary-sized datasets.
# Only one dataframe is in memory at a time...
df = pd.read_parquet(path)
# ... plus a small Series ``counts``, which is updated.
- counts = counts.add(df['name'].value_counts(), fill_value=0)
+ counts = counts.add(df["name"].value_counts(), fill_value=0)
counts.astype(int)
Some readers, like :meth:`pandas.read_csv`, offer parameters to control the
@@ -278,8 +277,8 @@ Rather than executing immediately, doing operations build up a **task graph**.
.. ipython:: python
ddf
- ddf['name']
- ddf['name'].value_counts()
+ ddf["name"]
+ ddf["name"].value_counts()
Each of these calls is instant because the result isn't being computed yet.
We're just building up a list of computation to do when someone needs the
@@ -291,7 +290,7 @@ To get the actual result you can call ``.compute()``.
.. ipython:: python
- %time ddf['name'].value_counts().compute()
+ %time ddf["name"].value_counts().compute()
At that point, you get back the same thing you'd get with pandas, in this case
a concrete pandas Series with the count of each ``name``.
@@ -324,7 +323,7 @@ a familiar groupby aggregation.
.. ipython:: python
- %time ddf.groupby('name')[['x', 'y']].mean().compute().head()
+ %time ddf.groupby("name")[["x", "y"]].mean().compute().head()
The grouping and aggregation is done out-of-core and in parallel.
@@ -336,8 +335,8 @@ we need to supply the divisions manually.
.. ipython:: python
N = 12
- starts = [f'20{i:>02d}-01-01' for i in range(N)]
- ends = [f'20{i:>02d}-12-13' for i in range(N)]
+ starts = [f"20{i:>02d}-01-01" for i in range(N)]
+ ends = [f"20{i:>02d}-12-13" for i in range(N)]
divisions = tuple(pd.to_datetime(starts)) + (pd.Timestamp(ends[-1]),)
ddf.divisions = divisions
@@ -347,7 +346,7 @@ Now we can do things like fast random access with ``.loc``.
.. ipython:: python
- ddf.loc['2002-01-01 12:01':'2002-01-01 12:05'].compute()
+ ddf.loc["2002-01-01 12:01":"2002-01-01 12:05"].compute()
Dask knows to just look in the 3rd partition for selecting values in 2002. It
doesn't need to look at any other data.
@@ -362,7 +361,7 @@ out of memory. At that point it's just a regular pandas object.
:okwarning:
@savefig dask_resample.png
- ddf[['x', 'y']].resample("1D").mean().cumsum().compute().plot()
+ ddf[["x", "y"]].resample("1D").mean().cumsum().compute().plot()
These Dask examples have all be done using multiple processes on a single
machine. Dask can be `deployed on a cluster
diff --git a/doc/source/user_guide/timeseries.rst b/doc/source/user_guide/timeseries.rst
index 61902b4a41b7c..11ec90085d9bf 100644
--- a/doc/source/user_guide/timeseries.rst
+++ b/doc/source/user_guide/timeseries.rst
@@ -19,42 +19,43 @@ Parsing time series information from various sources and formats
import datetime
- dti = pd.to_datetime(['1/1/2018', np.datetime64('2018-01-01'),
- datetime.datetime(2018, 1, 1)])
+ dti = pd.to_datetime(
+ ["1/1/2018", np.datetime64("2018-01-01"), datetime.datetime(2018, 1, 1)]
+ )
dti
Generate sequences of fixed-frequency dates and time spans
.. ipython:: python
- dti = pd.date_range('2018-01-01', periods=3, freq='H')
+ dti = pd.date_range("2018-01-01", periods=3, freq="H")
dti
Manipulating and converting date times with timezone information
.. ipython:: python
- dti = dti.tz_localize('UTC')
+ dti = dti.tz_localize("UTC")
dti
- dti.tz_convert('US/Pacific')
+ dti.tz_convert("US/Pacific")
Resampling or converting a time series to a particular frequency
.. ipython:: python
- idx = pd.date_range('2018-01-01', periods=5, freq='H')
+ idx = pd.date_range("2018-01-01", periods=5, freq="H")
ts = pd.Series(range(len(idx)), index=idx)
ts
- ts.resample('2H').mean()
+ ts.resample("2H").mean()
Performing date and time arithmetic with absolute or relative time increments
.. ipython:: python
- friday = pd.Timestamp('2018-01-05')
+ friday = pd.Timestamp("2018-01-05")
friday.day_name()
# Add 1 day
- saturday = friday + pd.Timedelta('1 day')
+ saturday = friday + pd.Timedelta("1 day")
saturday.day_name()
# Add 1 business day (Friday --> Monday)
monday = friday + pd.offsets.BDay()
@@ -90,13 +91,13 @@ so manipulations can be performed with respect to the time element.
.. ipython:: python
- pd.Series(range(3), index=pd.date_range('2000', freq='D', periods=3))
+ pd.Series(range(3), index=pd.date_range("2000", freq="D", periods=3))
However, :class:`Series` and :class:`DataFrame` can directly also support the time component as data itself.
.. ipython:: python
- pd.Series(pd.date_range('2000', freq='D', periods=3))
+ pd.Series(pd.date_range("2000", freq="D", periods=3))
:class:`Series` and :class:`DataFrame` have extended data type support and functionality for ``datetime``, ``timedelta``
and ``Period`` data when passed into those constructors. ``DateOffset``
@@ -104,9 +105,9 @@ data however will be stored as ``object`` data.
.. ipython:: python
- pd.Series(pd.period_range('1/1/2011', freq='M', periods=3))
+ pd.Series(pd.period_range("1/1/2011", freq="M", periods=3))
pd.Series([pd.DateOffset(1), pd.DateOffset(2)])
- pd.Series(pd.date_range('1/1/2011', freq='M', periods=3))
+ pd.Series(pd.date_range("1/1/2011", freq="M", periods=3))
Lastly, pandas represents null date times, time deltas, and time spans as ``NaT`` which
is useful for representing missing or null date like values and behaves similar
@@ -132,7 +133,7 @@ time.
.. ipython:: python
pd.Timestamp(datetime.datetime(2012, 5, 1))
- pd.Timestamp('2012-05-01')
+ pd.Timestamp("2012-05-01")
pd.Timestamp(2012, 5, 1)
However, in many cases it is more natural to associate things like change
@@ -143,9 +144,9 @@ For example:
.. ipython:: python
- pd.Period('2011-01')
+ pd.Period("2011-01")
- pd.Period('2012-05', freq='D')
+ pd.Period("2012-05", freq="D")
:class:`Timestamp` and :class:`Period` can serve as an index. Lists of
``Timestamp`` and ``Period`` are automatically coerced to :class:`DatetimeIndex`
@@ -153,9 +154,11 @@ and :class:`PeriodIndex` respectively.
.. ipython:: python
- dates = [pd.Timestamp('2012-05-01'),
- pd.Timestamp('2012-05-02'),
- pd.Timestamp('2012-05-03')]
+ dates = [
+ pd.Timestamp("2012-05-01"),
+ pd.Timestamp("2012-05-02"),
+ pd.Timestamp("2012-05-03"),
+ ]
ts = pd.Series(np.random.randn(3), dates)
type(ts.index)
@@ -163,7 +166,7 @@ and :class:`PeriodIndex` respectively.
ts
- periods = [pd.Period('2012-01'), pd.Period('2012-02'), pd.Period('2012-03')]
+ periods = [pd.Period("2012-01"), pd.Period("2012-02"), pd.Period("2012-03")]
ts = pd.Series(np.random.randn(3), periods)
@@ -193,18 +196,18 @@ is converted to a ``DatetimeIndex``:
.. ipython:: python
- pd.to_datetime(pd.Series(['Jul 31, 2009', '2010-01-10', None]))
+ pd.to_datetime(pd.Series(["Jul 31, 2009", "2010-01-10", None]))
- pd.to_datetime(['2005/11/23', '2010.12.31'])
+ pd.to_datetime(["2005/11/23", "2010.12.31"])
If you use dates which start with the day first (i.e. European style),
you can pass the ``dayfirst`` flag:
.. ipython:: python
- pd.to_datetime(['04-01-2012 10:00'], dayfirst=True)
+ pd.to_datetime(["04-01-2012 10:00"], dayfirst=True)
- pd.to_datetime(['14-01-2012', '01-14-2012'], dayfirst=True)
+ pd.to_datetime(["14-01-2012", "01-14-2012"], dayfirst=True)
.. warning::
@@ -218,22 +221,22 @@ options like ``dayfirst`` or ``format``, so use ``to_datetime`` if these are req
.. ipython:: python
- pd.to_datetime('2010/11/12')
+ pd.to_datetime("2010/11/12")
- pd.Timestamp('2010/11/12')
+ pd.Timestamp("2010/11/12")
You can also use the ``DatetimeIndex`` constructor directly:
.. ipython:: python
- pd.DatetimeIndex(['2018-01-01', '2018-01-03', '2018-01-05'])
+ pd.DatetimeIndex(["2018-01-01", "2018-01-03", "2018-01-05"])
The string 'infer' can be passed in order to set the frequency of the index as the
inferred frequency upon creation:
.. ipython:: python
- pd.DatetimeIndex(['2018-01-01', '2018-01-03', '2018-01-05'], freq='infer')
+ pd.DatetimeIndex(["2018-01-01", "2018-01-03", "2018-01-05"], freq="infer")
.. _timeseries.converting.format:
@@ -245,9 +248,9 @@ This could also potentially speed up the conversion considerably.
.. ipython:: python
- pd.to_datetime('2010/11/12', format='%Y/%m/%d')
+ pd.to_datetime("2010/11/12", format="%Y/%m/%d")
- pd.to_datetime('12-11-2010 00:00', format='%d-%m-%Y %H:%M')
+ pd.to_datetime("12-11-2010 00:00", format="%d-%m-%Y %H:%M")
For more information on the choices available when specifying the ``format``
option, see the Python `datetime documentation`_.
@@ -261,10 +264,9 @@ You can also pass a ``DataFrame`` of integer or string columns to assemble into
.. ipython:: python
- df = pd.DataFrame({'year': [2015, 2016],
- 'month': [2, 3],
- 'day': [4, 5],
- 'hour': [2, 3]})
+ df = pd.DataFrame(
+ {"year": [2015, 2016], "month": [2, 3], "day": [4, 5], "hour": [2, 3]}
+ )
pd.to_datetime(df)
@@ -272,7 +274,7 @@ You can pass only the columns that you need to assemble.
.. ipython:: python
- pd.to_datetime(df[['year', 'month', 'day']])
+ pd.to_datetime(df[["year", "month", "day"]])
``pd.to_datetime`` looks for standard designations of the datetime component in the column names, including:
@@ -293,13 +295,13 @@ Pass ``errors='ignore'`` to return the original input when unparsable:
.. ipython:: python
- pd.to_datetime(['2009/07/31', 'asd'], errors='ignore')
+ pd.to_datetime(["2009/07/31", "asd"], errors="ignore")
Pass ``errors='coerce'`` to convert unparsable data to ``NaT`` (not a time):
.. ipython:: python
- pd.to_datetime(['2009/07/31', 'asd'], errors='coerce')
+ pd.to_datetime(["2009/07/31", "asd"], errors="coerce")
.. _timeseries.converting.epoch:
@@ -315,11 +317,12 @@ which can be specified. These are computed from the starting point specified by
.. ipython:: python
- pd.to_datetime([1349720105, 1349806505, 1349892905,
- 1349979305, 1350065705], unit='s')
+ pd.to_datetime([1349720105, 1349806505, 1349892905, 1349979305, 1350065705], unit="s")
- pd.to_datetime([1349720105100, 1349720105200, 1349720105300,
- 1349720105400, 1349720105500], unit='ms')
+ pd.to_datetime(
+ [1349720105100, 1349720105200, 1349720105300, 1349720105400, 1349720105500],
+ unit="ms",
+ )
.. note::
@@ -336,8 +339,8 @@ as timezone-naive timestamps and then localize to the appropriate timezone:
.. ipython:: python
- pd.Timestamp(1262347200000000000).tz_localize('US/Pacific')
- pd.DatetimeIndex([1262347200000000000]).tz_localize('US/Pacific')
+ pd.Timestamp(1262347200000000000).tz_localize("US/Pacific")
+ pd.DatetimeIndex([1262347200000000000]).tz_localize("US/Pacific")
.. note::
@@ -353,8 +356,8 @@ as timezone-naive timestamps and then localize to the appropriate timezone:
.. ipython:: python
- pd.to_datetime([1490195805.433, 1490195805.433502912], unit='s')
- pd.to_datetime(1490195805433502912, unit='ns')
+ pd.to_datetime([1490195805.433, 1490195805.433502912], unit="s")
+ pd.to_datetime(1490195805433502912, unit="ns")
.. seealso::
@@ -369,7 +372,7 @@ To invert the operation from above, namely, to convert from a ``Timestamp`` to a
.. ipython:: python
- stamps = pd.date_range('2012-10-08 18:15:05', periods=4, freq='D')
+ stamps = pd.date_range("2012-10-08 18:15:05", periods=4, freq="D")
stamps
We subtract the epoch (midnight at January 1, 1970 UTC) and then floor divide by the
@@ -377,7 +380,7 @@ We subtract the epoch (midnight at January 1, 1970 UTC) and then floor divide by
.. ipython:: python
- (stamps - pd.Timestamp("1970-01-01")) // pd.Timedelta('1s')
+ (stamps - pd.Timestamp("1970-01-01")) // pd.Timedelta("1s")
.. _timeseries.origin:
@@ -389,14 +392,14 @@ of a ``DatetimeIndex``. For example, to use 1960-01-01 as the starting date:
.. ipython:: python
- pd.to_datetime([1, 2, 3], unit='D', origin=pd.Timestamp('1960-01-01'))
+ pd.to_datetime([1, 2, 3], unit="D", origin=pd.Timestamp("1960-01-01"))
The default is set at ``origin='unix'``, which defaults to ``1970-01-01 00:00:00``.
Commonly called 'unix epoch' or POSIX time.
.. ipython:: python
- pd.to_datetime([1, 2, 3], unit='D')
+ pd.to_datetime([1, 2, 3], unit="D")
.. _timeseries.daterange:
@@ -408,9 +411,11 @@ To generate an index with timestamps, you can use either the ``DatetimeIndex`` o
.. ipython:: python
- dates = [datetime.datetime(2012, 5, 1),
- datetime.datetime(2012, 5, 2),
- datetime.datetime(2012, 5, 3)]
+ dates = [
+ datetime.datetime(2012, 5, 1),
+ datetime.datetime(2012, 5, 2),
+ datetime.datetime(2012, 5, 3),
+ ]
# Note the frequency information
index = pd.DatetimeIndex(dates)
@@ -442,9 +447,9 @@ variety of :ref:`frequency aliases <timeseries.offset_aliases>`:
.. ipython:: python
- pd.date_range(start, periods=1000, freq='M')
+ pd.date_range(start, periods=1000, freq="M")
- pd.bdate_range(start, periods=250, freq='BQS')
+ pd.bdate_range(start, periods=250, freq="BQS")
``date_range`` and ``bdate_range`` make it easy to generate a range of dates
using various combinations of parameters like ``start``, ``end``, ``periods``,
@@ -453,9 +458,9 @@ of those specified will not be generated:
.. ipython:: python
- pd.date_range(start, end, freq='BM')
+ pd.date_range(start, end, freq="BM")
- pd.date_range(start, end, freq='W')
+ pd.date_range(start, end, freq="W")
pd.bdate_range(end=end, periods=20)
@@ -467,9 +472,9 @@ resulting ``DatetimeIndex``:
.. ipython:: python
- pd.date_range('2018-01-01', '2018-01-05', periods=5)
+ pd.date_range("2018-01-01", "2018-01-05", periods=5)
- pd.date_range('2018-01-01', '2018-01-05', periods=10)
+ pd.date_range("2018-01-01", "2018-01-05", periods=10)
.. _timeseries.custom-freq-ranges:
@@ -482,13 +487,13 @@ used if a custom frequency string is passed.
.. ipython:: python
- weekmask = 'Mon Wed Fri'
+ weekmask = "Mon Wed Fri"
holidays = [datetime.datetime(2011, 1, 5), datetime.datetime(2011, 3, 14)]
- pd.bdate_range(start, end, freq='C', weekmask=weekmask, holidays=holidays)
+ pd.bdate_range(start, end, freq="C", weekmask=weekmask, holidays=holidays)
- pd.bdate_range(start, end, freq='CBMS', weekmask=weekmask)
+ pd.bdate_range(start, end, freq="CBMS", weekmask=weekmask)
.. seealso::
@@ -545,7 +550,7 @@ intelligent functionality like selection, slicing, etc.
.. ipython:: python
- rng = pd.date_range(start, end, freq='BM')
+ rng = pd.date_range(start, end, freq="BM")
ts = pd.Series(np.random.randn(len(rng)), index=rng)
ts.index
ts[:5].index
@@ -560,20 +565,20 @@ Dates and strings that parse to timestamps can be passed as indexing parameters:
.. ipython:: python
- ts['1/31/2011']
+ ts["1/31/2011"]
ts[datetime.datetime(2011, 12, 25):]
- ts['10/31/2011':'12/31/2011']
+ ts["10/31/2011":"12/31/2011"]
To provide convenience for accessing longer time series, you can also pass in
the year or year and month as strings:
.. ipython:: python
- ts['2011']
+ ts["2011"]
- ts['2011-6']
+ ts["2011-6"]
This type of slicing will work on a ``DataFrame`` with a ``DatetimeIndex`` as well. Since the
partial string selection is a form of label slicing, the endpoints **will be** included. This
@@ -586,10 +591,13 @@ would include matching times on an included date:
.. ipython:: python
:okwarning:
- dft = pd.DataFrame(np.random.randn(100000, 1), columns=['A'],
- index=pd.date_range('20130101', periods=100000, freq='T'))
+ dft = pd.DataFrame(
+ np.random.randn(100000, 1),
+ columns=["A"],
+ index=pd.date_range("20130101", periods=100000, freq="T"),
+ )
dft
- dft['2013']
+ dft["2013"]
This starts on the very first time in the month, and includes the last date and
time for the month:
@@ -597,43 +605,45 @@ time for the month:
.. ipython:: python
:okwarning:
- dft['2013-1':'2013-2']
+ dft["2013-1":"2013-2"]
This specifies a stop time **that includes all of the times on the last day**:
.. ipython:: python
:okwarning:
- dft['2013-1':'2013-2-28']
+ dft["2013-1":"2013-2-28"]
This specifies an **exact** stop time (and is not the same as the above):
.. ipython:: python
:okwarning:
- dft['2013-1':'2013-2-28 00:00:00']
+ dft["2013-1":"2013-2-28 00:00:00"]
We are stopping on the included end-point as it is part of the index:
.. ipython:: python
:okwarning:
- dft['2013-1-15':'2013-1-15 12:30:00']
+ dft["2013-1-15":"2013-1-15 12:30:00"]
``DatetimeIndex`` partial string indexing also works on a ``DataFrame`` with a ``MultiIndex``:
.. ipython:: python
- dft2 = pd.DataFrame(np.random.randn(20, 1),
- columns=['A'],
- index=pd.MultiIndex.from_product(
- [pd.date_range('20130101', periods=10, freq='12H'),
- ['a', 'b']]))
+ dft2 = pd.DataFrame(
+ np.random.randn(20, 1),
+ columns=["A"],
+ index=pd.MultiIndex.from_product(
+ [pd.date_range("20130101", periods=10, freq="12H"), ["a", "b"]]
+ ),
+ )
dft2
- dft2.loc['2013-01-05']
+ dft2.loc["2013-01-05"]
idx = pd.IndexSlice
dft2 = dft2.swaplevel(0, 1).sort_index()
- dft2.loc[idx[:, '2013-01-05'], :]
+ dft2.loc[idx[:, "2013-01-05"], :]
.. versionadded:: 0.25.0
@@ -642,9 +652,9 @@ Slicing with string indexing also honors UTC offset.
.. ipython:: python
:okwarning:
- df = pd.DataFrame([0], index=pd.DatetimeIndex(['2019-01-01'], tz='US/Pacific'))
+ df = pd.DataFrame([0], index=pd.DatetimeIndex(["2019-01-01"], tz="US/Pacific"))
df
- df['2019-01-01 12:00:00+04:00':'2019-01-01 13:00:00+04:00']
+ df["2019-01-01 12:00:00+04:00":"2019-01-01 13:00:00+04:00"]
.. _timeseries.slice_vs_exact_match:
@@ -657,45 +667,48 @@ Consider a ``Series`` object with a minute resolution index:
.. ipython:: python
- series_minute = pd.Series([1, 2, 3],
- pd.DatetimeIndex(['2011-12-31 23:59:00',
- '2012-01-01 00:00:00',
- '2012-01-01 00:02:00']))
+ series_minute = pd.Series(
+ [1, 2, 3],
+ pd.DatetimeIndex(
+ ["2011-12-31 23:59:00", "2012-01-01 00:00:00", "2012-01-01 00:02:00"]
+ ),
+ )
series_minute.index.resolution
A timestamp string less accurate than a minute gives a ``Series`` object.
.. ipython:: python
- series_minute['2011-12-31 23']
+ series_minute["2011-12-31 23"]
A timestamp string with minute resolution (or more accurate), gives a scalar instead, i.e. it is not casted to a slice.
.. ipython:: python
- series_minute['2011-12-31 23:59']
- series_minute['2011-12-31 23:59:00']
+ series_minute["2011-12-31 23:59"]
+ series_minute["2011-12-31 23:59:00"]
If index resolution is second, then the minute-accurate timestamp gives a
``Series``.
.. ipython:: python
- series_second = pd.Series([1, 2, 3],
- pd.DatetimeIndex(['2011-12-31 23:59:59',
- '2012-01-01 00:00:00',
- '2012-01-01 00:00:01']))
+ series_second = pd.Series(
+ [1, 2, 3],
+ pd.DatetimeIndex(
+ ["2011-12-31 23:59:59", "2012-01-01 00:00:00", "2012-01-01 00:00:01"]
+ ),
+ )
series_second.index.resolution
- series_second['2011-12-31 23:59']
+ series_second["2011-12-31 23:59"]
If the timestamp string is treated as a slice, it can be used to index ``DataFrame`` with ``[]`` as well.
.. ipython:: python
:okwarning:
- dft_minute = pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]},
- index=series_minute.index)
- dft_minute['2011-12-31 23']
+ dft_minute = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=series_minute.index)
+ dft_minute["2011-12-31 23"]
.. warning::
@@ -706,16 +719,17 @@ If the timestamp string is treated as a slice, it can be used to index ``DataFra
.. ipython:: python
- dft_minute.loc['2011-12-31 23:59']
+ dft_minute.loc["2011-12-31 23:59"]
Note also that ``DatetimeIndex`` resolution cannot be less precise than day.
.. ipython:: python
- series_monthly = pd.Series([1, 2, 3],
- pd.DatetimeIndex(['2011-12', '2012-01', '2012-02']))
+ series_monthly = pd.Series(
+ [1, 2, 3], pd.DatetimeIndex(["2011-12", "2012-01", "2012-02"])
+ )
series_monthly.index.resolution
- series_monthly['2011-12'] # returns Series
+ series_monthly["2011-12"] # returns Series
Exact indexing
@@ -727,14 +741,15 @@ These ``Timestamp`` and ``datetime`` objects have exact ``hours, minutes,`` and
.. ipython:: python
- dft[datetime.datetime(2013, 1, 1):datetime.datetime(2013, 2, 28)]
+ dft[datetime.datetime(2013, 1, 1): datetime.datetime(2013, 2, 28)]
With no defaults.
.. ipython:: python
- dft[datetime.datetime(2013, 1, 1, 10, 12, 0):
- datetime.datetime(2013, 2, 28, 10, 12, 0)]
+ dft[
+ datetime.datetime(2013, 1, 1, 10, 12, 0): datetime.datetime(2013, 2, 28, 10, 12, 0)
+ ]
Truncating & fancy indexing
@@ -747,11 +762,11 @@ partially matching dates:
.. ipython:: python
- rng2 = pd.date_range('2011-01-01', '2012-01-01', freq='W')
+ rng2 = pd.date_range("2011-01-01", "2012-01-01", freq="W")
ts2 = pd.Series(np.random.randn(len(rng2)), index=rng2)
- ts2.truncate(before='2011-11', after='2011-12')
- ts2['2011-11':'2011-12']
+ ts2.truncate(before="2011-11", after="2011-12")
+ ts2["2011-11":"2011-12"]
Even complicated fancy indexing that breaks the ``DatetimeIndex`` frequency
regularity will result in a ``DatetimeIndex``, although frequency is lost:
@@ -807,7 +822,7 @@ You may obtain the year, week and day components of the ISO year from the ISO 86
.. ipython:: python
- idx = pd.date_range(start='2019-12-29', freq='D', periods=4)
+ idx = pd.date_range(start="2019-12-29", freq="D", periods=4)
idx.isocalendar()
idx.to_series().dt.isocalendar()
@@ -837,12 +852,12 @@ arithmetic operator (``+``) or the ``apply`` method can be used to perform the s
.. ipython:: python
# This particular day contains a day light savings time transition
- ts = pd.Timestamp('2016-10-30 00:00:00', tz='Europe/Helsinki')
+ ts = pd.Timestamp("2016-10-30 00:00:00", tz="Europe/Helsinki")
# Respects absolute time
ts + pd.Timedelta(days=1)
# Respects calendar time
ts + pd.DateOffset(days=1)
- friday = pd.Timestamp('2018-01-05')
+ friday = pd.Timestamp("2018-01-05")
friday.day_name()
# Add 2 business days (Friday --> Tuesday)
two_business_days = 2 * pd.offsets.BDay()
@@ -900,10 +915,10 @@ business offsets operate on the weekdays.
.. ipython:: python
- ts = pd.Timestamp('2018-01-06 00:00:00')
+ ts = pd.Timestamp("2018-01-06 00:00:00")
ts.day_name()
# BusinessHour's valid offset dates are Monday through Friday
- offset = pd.offsets.BusinessHour(start='09:00')
+ offset = pd.offsets.BusinessHour(start="09:00")
# Bring the date to the closest offset date (Monday)
offset.rollforward(ts)
# Date is brought to the closest offset date first and then the hour is added
@@ -916,12 +931,12 @@ in the operation).
.. ipython:: python
- ts = pd.Timestamp('2014-01-01 09:00')
+ ts = pd.Timestamp("2014-01-01 09:00")
day = pd.offsets.Day()
day.apply(ts)
day.apply(ts).normalize()
- ts = pd.Timestamp('2014-01-01 22:00')
+ ts = pd.Timestamp("2014-01-01 22:00")
hour = pd.offsets.Hour()
hour.apply(ts)
hour.apply(ts).normalize()
@@ -974,7 +989,7 @@ apply the offset to each element.
.. ipython:: python
- rng = pd.date_range('2012-01-01', '2012-01-03')
+ rng = pd.date_range("2012-01-01", "2012-01-03")
s = pd.Series(rng)
rng
rng + pd.DateOffset(months=2)
@@ -989,7 +1004,7 @@ used exactly like a ``Timedelta`` - see the
.. ipython:: python
s - pd.offsets.Day(2)
- td = s - pd.Series(pd.date_range('2011-12-29', '2011-12-31'))
+ td = s - pd.Series(pd.date_range("2011-12-29", "2011-12-31"))
td
td + pd.offsets.Minute(15)
@@ -1016,16 +1031,13 @@ As an interesting example, let's look at Egypt where a Friday-Saturday weekend i
.. ipython:: python
- weekmask_egypt = 'Sun Mon Tue Wed Thu'
+ weekmask_egypt = "Sun Mon Tue Wed Thu"
# They also observe International Workers' Day so let's
# add that for a couple of years
- holidays = ['2012-05-01',
- datetime.datetime(2013, 5, 1),
- np.datetime64('2014-05-01')]
- bday_egypt = pd.offsets.CustomBusinessDay(holidays=holidays,
- weekmask=weekmask_egypt)
+ holidays = ["2012-05-01", datetime.datetime(2013, 5, 1), np.datetime64("2014-05-01")]
+ bday_egypt = pd.offsets.CustomBusinessDay(holidays=holidays, weekmask=weekmask_egypt)
dt = datetime.datetime(2013, 4, 30)
dt + 2 * bday_egypt
@@ -1035,8 +1047,7 @@ Let's map to the weekday names:
dts = pd.date_range(dt, periods=5, freq=bday_egypt)
- pd.Series(dts.weekday, dts).map(
- pd.Series('Mon Tue Wed Thu Fri Sat Sun'.split()))
+ pd.Series(dts.weekday, dts).map(pd.Series("Mon Tue Wed Thu Fri Sat Sun".split()))
Holiday calendars can be used to provide the list of holidays. See the
:ref:`holiday calendar<timeseries.holiday>` section for more information.
@@ -1058,15 +1069,14 @@ in the usual way.
.. ipython:: python
- bmth_us = pd.offsets.CustomBusinessMonthBegin(
- calendar=USFederalHolidayCalendar())
+ bmth_us = pd.offsets.CustomBusinessMonthBegin(calendar=USFederalHolidayCalendar())
# Skip new years
dt = datetime.datetime(2013, 12, 17)
dt + bmth_us
# Define date index with custom offset
- pd.date_range(start='20100101', end='20120101', freq=bmth_us)
+ pd.date_range(start="20100101", end="20120101", freq=bmth_us)
.. note::
@@ -1097,23 +1107,23 @@ hours are added to the next business day.
bh
# 2014-08-01 is Friday
- pd.Timestamp('2014-08-01 10:00').weekday()
- pd.Timestamp('2014-08-01 10:00') + bh
+ pd.Timestamp("2014-08-01 10:00").weekday()
+ pd.Timestamp("2014-08-01 10:00") + bh
# Below example is the same as: pd.Timestamp('2014-08-01 09:00') + bh
- pd.Timestamp('2014-08-01 08:00') + bh
+ pd.Timestamp("2014-08-01 08:00") + bh
# If the results is on the end time, move to the next business day
- pd.Timestamp('2014-08-01 16:00') + bh
+ pd.Timestamp("2014-08-01 16:00") + bh
# Remainings are added to the next day
- pd.Timestamp('2014-08-01 16:30') + bh
+ pd.Timestamp("2014-08-01 16:30") + bh
# Adding 2 business hours
- pd.Timestamp('2014-08-01 10:00') + pd.offsets.BusinessHour(2)
+ pd.Timestamp("2014-08-01 10:00") + pd.offsets.BusinessHour(2)
# Subtracting 3 business hours
- pd.Timestamp('2014-08-01 10:00') + pd.offsets.BusinessHour(-3)
+ pd.Timestamp("2014-08-01 10:00") + pd.offsets.BusinessHour(-3)
You can also specify ``start`` and ``end`` time by keywords. The argument must
be a ``str`` with an ``hour:minute`` representation or a ``datetime.time``
@@ -1122,12 +1132,12 @@ results in ``ValueError``.
.. ipython:: python
- bh = pd.offsets.BusinessHour(start='11:00', end=datetime.time(20, 0))
+ bh = pd.offsets.BusinessHour(start="11:00", end=datetime.time(20, 0))
bh
- pd.Timestamp('2014-08-01 13:00') + bh
- pd.Timestamp('2014-08-01 09:00') + bh
- pd.Timestamp('2014-08-01 18:00') + bh
+ pd.Timestamp("2014-08-01 13:00") + bh
+ pd.Timestamp("2014-08-01 09:00") + bh
+ pd.Timestamp("2014-08-01 18:00") + bh
Passing ``start`` time later than ``end`` represents midnight business hour.
In this case, business hour exceeds midnight and overlap to the next day.
@@ -1135,19 +1145,19 @@ Valid business hours are distinguished by whether it started from valid ``Busine
.. ipython:: python
- bh = pd.offsets.BusinessHour(start='17:00', end='09:00')
+ bh = pd.offsets.BusinessHour(start="17:00", end="09:00")
bh
- pd.Timestamp('2014-08-01 17:00') + bh
- pd.Timestamp('2014-08-01 23:00') + bh
+ pd.Timestamp("2014-08-01 17:00") + bh
+ pd.Timestamp("2014-08-01 23:00") + bh
# Although 2014-08-02 is Saturday,
# it is valid because it starts from 08-01 (Friday).
- pd.Timestamp('2014-08-02 04:00') + bh
+ pd.Timestamp("2014-08-02 04:00") + bh
# Although 2014-08-04 is Monday,
# it is out of business hours because it starts from 08-03 (Sunday).
- pd.Timestamp('2014-08-04 04:00') + bh
+ pd.Timestamp("2014-08-04 04:00") + bh
Applying ``BusinessHour.rollforward`` and ``rollback`` to out of business hours results in
the next business hour start or previous day's end. Different from other offsets, ``BusinessHour.rollforward``
@@ -1160,19 +1170,19 @@ under the default business hours (9:00 - 17:00), there is no gap (0 minutes) bet
.. ipython:: python
# This adjusts a Timestamp to business hour edge
- pd.offsets.BusinessHour().rollback(pd.Timestamp('2014-08-02 15:00'))
- pd.offsets.BusinessHour().rollforward(pd.Timestamp('2014-08-02 15:00'))
+ pd.offsets.BusinessHour().rollback(pd.Timestamp("2014-08-02 15:00"))
+ pd.offsets.BusinessHour().rollforward(pd.Timestamp("2014-08-02 15:00"))
# It is the same as BusinessHour().apply(pd.Timestamp('2014-08-01 17:00')).
# And it is the same as BusinessHour().apply(pd.Timestamp('2014-08-04 09:00'))
- pd.offsets.BusinessHour().apply(pd.Timestamp('2014-08-02 15:00'))
+ pd.offsets.BusinessHour().apply(pd.Timestamp("2014-08-02 15:00"))
# BusinessDay results (for reference)
- pd.offsets.BusinessHour().rollforward(pd.Timestamp('2014-08-02'))
+ pd.offsets.BusinessHour().rollforward(pd.Timestamp("2014-08-02"))
# It is the same as BusinessDay().apply(pd.Timestamp('2014-08-01'))
# The result is the same as rollworward because BusinessDay never overlap.
- pd.offsets.BusinessHour().apply(pd.Timestamp('2014-08-02'))
+ pd.offsets.BusinessHour().apply(pd.Timestamp("2014-08-02"))
``BusinessHour`` regards Saturday and Sunday as holidays. To use arbitrary
holidays, you can use ``CustomBusinessHour`` offset, as explained in the
@@ -1190,6 +1200,7 @@ as ``BusinessHour`` except that it skips specified custom holidays.
.. ipython:: python
from pandas.tseries.holiday import USFederalHolidayCalendar
+
bhour_us = pd.offsets.CustomBusinessHour(calendar=USFederalHolidayCalendar())
# Friday before MLK Day
dt = datetime.datetime(2014, 1, 17, 15)
@@ -1203,8 +1214,7 @@ You can use keyword arguments supported by either ``BusinessHour`` and ``CustomB
.. ipython:: python
- bhour_mon = pd.offsets.CustomBusinessHour(start='10:00',
- weekmask='Tue Wed Thu Fri')
+ bhour_mon = pd.offsets.CustomBusinessHour(start="10:00", weekmask="Tue Wed Thu Fri")
# Monday is skipped because it's a holiday, business hour starts from 10:00
dt + bhour_mon * 2
@@ -1257,7 +1267,7 @@ most functions:
.. ipython:: python
- pd.date_range(start, periods=5, freq='B')
+ pd.date_range(start, periods=5, freq="B")
pd.date_range(start, periods=5, freq=pd.offsets.BDay())
@@ -1265,9 +1275,9 @@ You can combine together day and intraday offsets:
.. ipython:: python
- pd.date_range(start, periods=10, freq='2h20min')
+ pd.date_range(start, periods=10, freq="2h20min")
- pd.date_range(start, periods=10, freq='1D10U')
+ pd.date_range(start, periods=10, freq="1D10U")
Anchored offsets
~~~~~~~~~~~~~~~~
@@ -1326,39 +1336,39 @@ anchor point, and moved ``|n|-1`` additional steps forwards or backwards.
.. ipython:: python
- pd.Timestamp('2014-01-02') + pd.offsets.MonthBegin(n=1)
- pd.Timestamp('2014-01-02') + pd.offsets.MonthEnd(n=1)
+ pd.Timestamp("2014-01-02") + pd.offsets.MonthBegin(n=1)
+ pd.Timestamp("2014-01-02") + pd.offsets.MonthEnd(n=1)
- pd.Timestamp('2014-01-02') - pd.offsets.MonthBegin(n=1)
- pd.Timestamp('2014-01-02') - pd.offsets.MonthEnd(n=1)
+ pd.Timestamp("2014-01-02") - pd.offsets.MonthBegin(n=1)
+ pd.Timestamp("2014-01-02") - pd.offsets.MonthEnd(n=1)
- pd.Timestamp('2014-01-02') + pd.offsets.MonthBegin(n=4)
- pd.Timestamp('2014-01-02') - pd.offsets.MonthBegin(n=4)
+ pd.Timestamp("2014-01-02") + pd.offsets.MonthBegin(n=4)
+ pd.Timestamp("2014-01-02") - pd.offsets.MonthBegin(n=4)
If the given date *is* on an anchor point, it is moved ``|n|`` points forwards
or backwards.
.. ipython:: python
- pd.Timestamp('2014-01-01') + pd.offsets.MonthBegin(n=1)
- pd.Timestamp('2014-01-31') + pd.offsets.MonthEnd(n=1)
+ pd.Timestamp("2014-01-01") + pd.offsets.MonthBegin(n=1)
+ pd.Timestamp("2014-01-31") + pd.offsets.MonthEnd(n=1)
- pd.Timestamp('2014-01-01') - pd.offsets.MonthBegin(n=1)
- pd.Timestamp('2014-01-31') - pd.offsets.MonthEnd(n=1)
+ pd.Timestamp("2014-01-01") - pd.offsets.MonthBegin(n=1)
+ pd.Timestamp("2014-01-31") - pd.offsets.MonthEnd(n=1)
- pd.Timestamp('2014-01-01') + pd.offsets.MonthBegin(n=4)
- pd.Timestamp('2014-01-31') - pd.offsets.MonthBegin(n=4)
+ pd.Timestamp("2014-01-01") + pd.offsets.MonthBegin(n=4)
+ pd.Timestamp("2014-01-31") - pd.offsets.MonthBegin(n=4)
For the case when ``n=0``, the date is not moved if on an anchor point, otherwise
it is rolled forward to the next anchor point.
.. ipython:: python
- pd.Timestamp('2014-01-02') + pd.offsets.MonthBegin(n=0)
- pd.Timestamp('2014-01-02') + pd.offsets.MonthEnd(n=0)
+ pd.Timestamp("2014-01-02") + pd.offsets.MonthBegin(n=0)
+ pd.Timestamp("2014-01-02") + pd.offsets.MonthEnd(n=0)
- pd.Timestamp('2014-01-01') + pd.offsets.MonthBegin(n=0)
- pd.Timestamp('2014-01-31') + pd.offsets.MonthEnd(n=0)
+ pd.Timestamp("2014-01-01") + pd.offsets.MonthBegin(n=0)
+ pd.Timestamp("2014-01-31") + pd.offsets.MonthEnd(n=0)
.. _timeseries.holiday:
@@ -1394,14 +1404,22 @@ An example of how holidays and holiday calendars are defined:
.. ipython:: python
- from pandas.tseries.holiday import Holiday, USMemorialDay,\
- AbstractHolidayCalendar, nearest_workday, MO
+ from pandas.tseries.holiday import (
+ Holiday,
+ USMemorialDay,
+ AbstractHolidayCalendar,
+ nearest_workday,
+ MO,
+ )
+
+
class ExampleCalendar(AbstractHolidayCalendar):
rules = [
USMemorialDay,
- Holiday('July 4th', month=7, day=4, observance=nearest_workday),
- Holiday('Columbus Day', month=10, day=1,
- offset=pd.DateOffset(weekday=MO(2)))]
+ Holiday("July 4th", month=7, day=4, observance=nearest_workday),
+ Holiday("Columbus Day", month=10, day=1, offset=pd.DateOffset(weekday=MO(2))),
+ ]
+
cal = ExampleCalendar()
cal.holidays(datetime.datetime(2012, 1, 1), datetime.datetime(2012, 12, 31))
@@ -1417,8 +1435,9 @@ or ``Timestamp`` objects.
.. ipython:: python
- pd.date_range(start='7/1/2012', end='7/10/2012',
- freq=pd.offsets.CDay(calendar=cal)).to_pydatetime()
+ pd.date_range(
+ start="7/1/2012", end="7/10/2012", freq=pd.offsets.CDay(calendar=cal)
+ ).to_pydatetime()
offset = pd.offsets.CustomBusinessDay(calendar=cal)
datetime.datetime(2012, 5, 25) + offset
datetime.datetime(2012, 7, 3) + offset
@@ -1450,11 +1469,11 @@ or calendars with additional rules.
.. ipython:: python
- from pandas.tseries.holiday import get_calendar, HolidayCalendarFactory,\
- USLaborDay
- cal = get_calendar('ExampleCalendar')
+ from pandas.tseries.holiday import get_calendar, HolidayCalendarFactory, USLaborDay
+
+ cal = get_calendar("ExampleCalendar")
cal.rules
- new_cal = HolidayCalendarFactory('NewExampleCalendar', cal, USLaborDay)
+ new_cal = HolidayCalendarFactory("NewExampleCalendar", cal, USLaborDay)
new_cal.rules
.. _timeseries.advanced_datetime:
@@ -1484,9 +1503,9 @@ rather than changing the alignment of the data and the index:
.. ipython:: python
- ts.shift(5, freq='D')
+ ts.shift(5, freq="D")
ts.shift(5, freq=pd.offsets.BDay())
- ts.shift(5, freq='BM')
+ ts.shift(5, freq="BM")
Note that with when ``freq`` is specified, the leading entry is no longer NaN
because the data is not being realigned.
@@ -1501,7 +1520,7 @@ calls ``reindex``.
.. ipython:: python
- dr = pd.date_range('1/1/2010', periods=3, freq=3 * pd.offsets.BDay())
+ dr = pd.date_range("1/1/2010", periods=3, freq=3 * pd.offsets.BDay())
ts = pd.Series(np.random.randn(3), index=dr)
ts
ts.asfreq(pd.offsets.BDay())
@@ -1511,7 +1530,7 @@ method for any gaps that may appear after the frequency conversion.
.. ipython:: python
- ts.asfreq(pd.offsets.BDay(), method='pad')
+ ts.asfreq(pd.offsets.BDay(), method="pad")
Filling forward / backward
~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -1552,11 +1571,11 @@ Basics
.. ipython:: python
- rng = pd.date_range('1/1/2012', periods=100, freq='S')
+ rng = pd.date_range("1/1/2012", periods=100, freq="S")
ts = pd.Series(np.random.randint(0, 500, len(rng)), index=rng)
- ts.resample('5Min').sum()
+ ts.resample("5Min").sum()
The ``resample`` function is very flexible and allows you to specify many
different parameters to control the frequency conversion and resampling
@@ -1568,11 +1587,11 @@ a method of the returned object, including ``sum``, ``mean``, ``std``, ``sem``,
.. ipython:: python
- ts.resample('5Min').mean()
+ ts.resample("5Min").mean()
- ts.resample('5Min').ohlc()
+ ts.resample("5Min").ohlc()
- ts.resample('5Min').max()
+ ts.resample("5Min").max()
For downsampling, ``closed`` can be set to 'left' or 'right' to specify which
@@ -1580,9 +1599,9 @@ end of the interval is closed:
.. ipython:: python
- ts.resample('5Min', closed='right').mean()
+ ts.resample("5Min", closed="right").mean()
- ts.resample('5Min', closed='left').mean()
+ ts.resample("5Min", closed="left").mean()
Parameters like ``label`` are used to manipulate the resulting labels.
``label`` specifies whether the result is labeled with the beginning or
@@ -1590,9 +1609,9 @@ the end of the interval.
.. ipython:: python
- ts.resample('5Min').mean() # by default label='left'
+ ts.resample("5Min").mean() # by default label='left'
- ts.resample('5Min', label='left').mean()
+ ts.resample("5Min", label="left").mean()
.. warning::
@@ -1606,12 +1625,12 @@ the end of the interval.
.. ipython:: python
- s = pd.date_range('2000-01-01', '2000-01-05').to_series()
+ s = pd.date_range("2000-01-01", "2000-01-05").to_series()
s.iloc[2] = pd.NaT
s.dt.day_name()
# default: label='left', closed='left'
- s.resample('B').last().dt.day_name()
+ s.resample("B").last().dt.day_name()
Notice how the value for Sunday got pulled back to the previous Friday.
To get the behavior where the value for Sunday is pushed to Monday, use
@@ -1619,7 +1638,7 @@ the end of the interval.
.. ipython:: python
- s.resample('B', label='right', closed='right').last().dt.day_name()
+ s.resample("B", label="right", closed="right").last().dt.day_name()
The ``axis`` parameter can be set to 0 or 1 and allows you to resample the
specified axis for a ``DataFrame``.
@@ -1642,11 +1661,11 @@ For upsampling, you can specify a way to upsample and the ``limit`` parameter to
# from secondly to every 250 milliseconds
- ts[:2].resample('250L').asfreq()
+ ts[:2].resample("250L").asfreq()
- ts[:2].resample('250L').ffill()
+ ts[:2].resample("250L").ffill()
- ts[:2].resample('250L').ffill(limit=2)
+ ts[:2].resample("250L").ffill(limit=2)
Sparse resampling
~~~~~~~~~~~~~~~~~
@@ -1662,14 +1681,14 @@ resample only the groups that are not all ``NaN``.
.. ipython:: python
- rng = pd.date_range('2014-1-1', periods=100, freq='D') + pd.Timedelta('1s')
+ rng = pd.date_range("2014-1-1", periods=100, freq="D") + pd.Timedelta("1s")
ts = pd.Series(range(100), index=rng)
If we want to resample to the full range of the series:
.. ipython:: python
- ts.resample('3T').sum()
+ ts.resample("3T").sum()
We can instead only resample those groups where we have points as follows:
@@ -1678,12 +1697,14 @@ We can instead only resample those groups where we have points as follows:
from functools import partial
from pandas.tseries.frequencies import to_offset
+
def round(t, freq):
# round a Timestamp to a specified freq
freq = to_offset(freq)
return pd.Timestamp((t.value // freq.delta.value) * freq.delta.value)
- ts.groupby(partial(round, freq='3T')).sum()
+
+ ts.groupby(partial(round, freq="3T")).sum()
.. _timeseries.aggregate:
@@ -1697,25 +1718,27 @@ Resampling a ``DataFrame``, the default will be to act on all columns with the s
.. ipython:: python
- df = pd.DataFrame(np.random.randn(1000, 3),
- index=pd.date_range('1/1/2012', freq='S', periods=1000),
- columns=['A', 'B', 'C'])
- r = df.resample('3T')
+ df = pd.DataFrame(
+ np.random.randn(1000, 3),
+ index=pd.date_range("1/1/2012", freq="S", periods=1000),
+ columns=["A", "B", "C"],
+ )
+ r = df.resample("3T")
r.mean()
We can select a specific column or columns using standard getitem.
.. ipython:: python
- r['A'].mean()
+ r["A"].mean()
- r[['A', 'B']].mean()
+ r[["A", "B"]].mean()
You can pass a list or dict of functions to do aggregation with, outputting a ``DataFrame``:
.. ipython:: python
- r['A'].agg([np.sum, np.mean, np.std])
+ r["A"].agg([np.sum, np.mean, np.std])
On a resampled ``DataFrame``, you can pass a list of functions to apply to each
column, which produces an aggregated result with a hierarchical index:
@@ -1730,21 +1753,20 @@ columns of a ``DataFrame``:
.. ipython:: python
:okexcept:
- r.agg({'A': np.sum,
- 'B': lambda x: np.std(x, ddof=1)})
+ r.agg({"A": np.sum, "B": lambda x: np.std(x, ddof=1)})
The function names can also be strings. In order for a string to be valid it
must be implemented on the resampled object:
.. ipython:: python
- r.agg({'A': 'sum', 'B': 'std'})
+ r.agg({"A": "sum", "B": "std"})
Furthermore, you can also specify multiple aggregation functions for each column separately.
.. ipython:: python
- r.agg({'A': ['sum', 'std'], 'B': ['mean', 'std']})
+ r.agg({"A": ["sum", "std"], "B": ["mean", "std"]})
If a ``DataFrame`` does not have a datetimelike index, but instead you want
@@ -1753,14 +1775,15 @@ to resample based on datetimelike column in the frame, it can passed to the
.. ipython:: python
- df = pd.DataFrame({'date': pd.date_range('2015-01-01', freq='W', periods=5),
- 'a': np.arange(5)},
- index=pd.MultiIndex.from_arrays([
- [1, 2, 3, 4, 5],
- pd.date_range('2015-01-01', freq='W', periods=5)],
- names=['v', 'd']))
+ df = pd.DataFrame(
+ {"date": pd.date_range("2015-01-01", freq="W", periods=5), "a": np.arange(5)},
+ index=pd.MultiIndex.from_arrays(
+ [[1, 2, 3, 4, 5], pd.date_range("2015-01-01", freq="W", periods=5)],
+ names=["v", "d"],
+ ),
+ )
df
- df.resample('M', on='date').sum()
+ df.resample("M", on="date").sum()
Similarly, if you instead want to resample by a datetimelike
level of ``MultiIndex``, its name or location can be passed to the
@@ -1768,7 +1791,7 @@ level of ``MultiIndex``, its name or location can be passed to the
.. ipython:: python
- df.resample('M', level='d').sum()
+ df.resample("M", level="d").sum()
.. _timeseries.iterating-label:
@@ -1782,14 +1805,18 @@ natural and functions similarly to :py:func:`itertools.groupby`:
small = pd.Series(
range(6),
- index=pd.to_datetime(['2017-01-01T00:00:00',
- '2017-01-01T00:30:00',
- '2017-01-01T00:31:00',
- '2017-01-01T01:00:00',
- '2017-01-01T03:00:00',
- '2017-01-01T03:05:00'])
+ index=pd.to_datetime(
+ [
+ "2017-01-01T00:00:00",
+ "2017-01-01T00:30:00",
+ "2017-01-01T00:31:00",
+ "2017-01-01T01:00:00",
+ "2017-01-01T03:00:00",
+ "2017-01-01T03:05:00",
+ ]
+ ),
)
- resampled = small.resample('H')
+ resampled = small.resample("H")
for name, group in resampled:
print("Group: ", name)
@@ -1811,9 +1838,9 @@ For example:
.. ipython:: python
- start, end = '2000-10-01 23:30:00', '2000-10-02 00:30:00'
- middle = '2000-10-02 00:00:00'
- rng = pd.date_range(start, end, freq='7min')
+ start, end = "2000-10-01 23:30:00", "2000-10-02 00:30:00"
+ middle = "2000-10-02 00:00:00"
+ rng = pd.date_range(start, end, freq="7min")
ts = pd.Series(np.arange(len(rng)) * 3, index=rng)
ts
@@ -1821,32 +1848,32 @@ Here we can see that, when using ``origin`` with its default value (``'start_day
.. ipython:: python
- ts.resample('17min', origin='start_day').sum()
- ts[middle:end].resample('17min', origin='start_day').sum()
+ ts.resample("17min", origin="start_day").sum()
+ ts[middle:end].resample("17min", origin="start_day").sum()
Here we can see that, when setting ``origin`` to ``'epoch'``, the result after ``'2000-10-02 00:00:00'`` are identical depending on the start of time series:
.. ipython:: python
- ts.resample('17min', origin='epoch').sum()
- ts[middle:end].resample('17min', origin='epoch').sum()
+ ts.resample("17min", origin="epoch").sum()
+ ts[middle:end].resample("17min", origin="epoch").sum()
If needed you can use a custom timestamp for ``origin``:
.. ipython:: python
- ts.resample('17min', origin='2001-01-01').sum()
- ts[middle:end].resample('17min', origin=pd.Timestamp('2001-01-01')).sum()
+ ts.resample("17min", origin="2001-01-01").sum()
+ ts[middle:end].resample("17min", origin=pd.Timestamp("2001-01-01")).sum()
If needed you can just adjust the bins with an ``offset`` Timedelta that would be added to the default ``origin``.
Those two examples are equivalent for this time series:
.. ipython:: python
- ts.resample('17min', origin='start').sum()
- ts.resample('17min', offset='23h30min').sum()
+ ts.resample("17min", origin="start").sum()
+ ts.resample("17min", offset="23h30min").sum()
Note the use of ``'start'`` for ``origin`` on the last example. In that case, ``origin`` will be set to the first value of the timeseries.
@@ -1869,37 +1896,37 @@ Because ``freq`` represents a span of ``Period``, it cannot be negative like "-3
.. ipython:: python
- pd.Period('2012', freq='A-DEC')
+ pd.Period("2012", freq="A-DEC")
- pd.Period('2012-1-1', freq='D')
+ pd.Period("2012-1-1", freq="D")
- pd.Period('2012-1-1 19:00', freq='H')
+ pd.Period("2012-1-1 19:00", freq="H")
- pd.Period('2012-1-1 19:00', freq='5H')
+ pd.Period("2012-1-1 19:00", freq="5H")
Adding and subtracting integers from periods shifts the period by its own
frequency. Arithmetic is not allowed between ``Period`` with different ``freq`` (span).
.. ipython:: python
- p = pd.Period('2012', freq='A-DEC')
+ p = pd.Period("2012", freq="A-DEC")
p + 1
p - 3
- p = pd.Period('2012-01', freq='2M')
+ p = pd.Period("2012-01", freq="2M")
p + 2
p - 1
@okexcept
- p == pd.Period('2012-01', freq='3M')
+ p == pd.Period("2012-01", freq="3M")
If ``Period`` freq is daily or higher (``D``, ``H``, ``T``, ``S``, ``L``, ``U``, ``N``), ``offsets`` and ``timedelta``-like can be added if the result can have the same freq. Otherwise, ``ValueError`` will be raised.
.. ipython:: python
- p = pd.Period('2014-07-01 09:00', freq='H')
+ p = pd.Period("2014-07-01 09:00", freq="H")
p + pd.offsets.Hour(2)
p + datetime.timedelta(minutes=120)
- p + np.timedelta64(7200, 's')
+ p + np.timedelta64(7200, "s")
.. code-block:: ipython
@@ -1912,7 +1939,7 @@ If ``Period`` has other frequencies, only the same ``offsets`` can be added. Oth
.. ipython:: python
- p = pd.Period('2014-07', freq='M')
+ p = pd.Period("2014-07", freq="M")
p + pd.offsets.MonthEnd(3)
.. code-block:: ipython
@@ -1927,7 +1954,7 @@ return the number of frequency units between them:
.. ipython:: python
- pd.Period('2012', freq='A-DEC') - pd.Period('2002', freq='A-DEC')
+ pd.Period("2012", freq="A-DEC") - pd.Period("2002", freq="A-DEC")
PeriodIndex and period_range
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -1936,21 +1963,21 @@ which can be constructed using the ``period_range`` convenience function:
.. ipython:: python
- prng = pd.period_range('1/1/2011', '1/1/2012', freq='M')
+ prng = pd.period_range("1/1/2011", "1/1/2012", freq="M")
prng
The ``PeriodIndex`` constructor can also be used directly:
.. ipython:: python
- pd.PeriodIndex(['2011-1', '2011-2', '2011-3'], freq='M')
+ pd.PeriodIndex(["2011-1", "2011-2", "2011-3"], freq="M")
Passing multiplied frequency outputs a sequence of ``Period`` which
has multiplied span.
.. ipython:: python
- pd.period_range(start='2014-01', freq='3M', periods=4)
+ pd.period_range(start="2014-01", freq="3M", periods=4)
If ``start`` or ``end`` are ``Period`` objects, they will be used as anchor
endpoints for a ``PeriodIndex`` with frequency matching that of the
@@ -1958,8 +1985,9 @@ endpoints for a ``PeriodIndex`` with frequency matching that of the
.. ipython:: python
- pd.period_range(start=pd.Period('2017Q1', freq='Q'),
- end=pd.Period('2017Q2', freq='Q'), freq='M')
+ pd.period_range(
+ start=pd.Period("2017Q1", freq="Q"), end=pd.Period("2017Q2", freq="Q"), freq="M"
+ )
Just like ``DatetimeIndex``, a ``PeriodIndex`` can also be used to index pandas
objects:
@@ -1973,11 +2001,11 @@ objects:
.. ipython:: python
- idx = pd.period_range('2014-07-01 09:00', periods=5, freq='H')
+ idx = pd.period_range("2014-07-01 09:00", periods=5, freq="H")
idx
idx + pd.offsets.Hour(2)
- idx = pd.period_range('2014-07', periods=5, freq='M')
+ idx = pd.period_range("2014-07", periods=5, freq="M")
idx
idx + pd.offsets.MonthEnd(3)
@@ -1996,7 +2024,7 @@ The ``period`` dtype holds the ``freq`` attribute and is represented with
.. ipython:: python
- pi = pd.period_range('2016-01-01', periods=3, freq='M')
+ pi = pd.period_range("2016-01-01", periods=3, freq="M")
pi
pi.dtype
@@ -2007,15 +2035,15 @@ The ``period`` dtype can be used in ``.astype(...)``. It allows one to change th
.. ipython:: python
# change monthly freq to daily freq
- pi.astype('period[D]')
+ pi.astype("period[D]")
# convert to DatetimeIndex
- pi.astype('datetime64[ns]')
+ pi.astype("datetime64[ns]")
# convert to PeriodIndex
- dti = pd.date_range('2011-01-01', freq='M', periods=3)
+ dti = pd.date_range("2011-01-01", freq="M", periods=3)
dti
- dti.astype('period[M]')
+ dti.astype("period[M]")
PeriodIndex partial string indexing
@@ -2029,32 +2057,32 @@ You can pass in dates and strings to ``Series`` and ``DataFrame`` with ``PeriodI
.. ipython:: python
- ps['2011-01']
+ ps["2011-01"]
ps[datetime.datetime(2011, 12, 25):]
- ps['10/31/2011':'12/31/2011']
+ ps["10/31/2011":"12/31/2011"]
Passing a string representing a lower frequency than ``PeriodIndex`` returns partial sliced data.
.. ipython:: python
:okwarning:
- ps['2011']
+ ps["2011"]
- dfp = pd.DataFrame(np.random.randn(600, 1),
- columns=['A'],
- index=pd.period_range('2013-01-01 9:00',
- periods=600,
- freq='T'))
+ dfp = pd.DataFrame(
+ np.random.randn(600, 1),
+ columns=["A"],
+ index=pd.period_range("2013-01-01 9:00", periods=600, freq="T"),
+ )
dfp
- dfp['2013-01-01 10H']
+ dfp["2013-01-01 10H"]
As with ``DatetimeIndex``, the endpoints will be included in the result. The example below slices data starting from 10:00 to 11:59.
.. ipython:: python
- dfp['2013-01-01 10H':'2013-01-01 11H']
+ dfp["2013-01-01 10H":"2013-01-01 11H"]
Frequency conversion and resampling with PeriodIndex
@@ -2064,7 +2092,7 @@ method. Let's start with the fiscal year 2011, ending in December:
.. ipython:: python
- p = pd.Period('2011', freq='A-DEC')
+ p = pd.Period("2011", freq="A-DEC")
p
We can convert it to a monthly frequency. Using the ``how`` parameter, we can
@@ -2072,16 +2100,16 @@ specify whether to return the starting or ending month:
.. ipython:: python
- p.asfreq('M', how='start')
+ p.asfreq("M", how="start")
- p.asfreq('M', how='end')
+ p.asfreq("M", how="end")
The shorthands 's' and 'e' are provided for convenience:
.. ipython:: python
- p.asfreq('M', 's')
- p.asfreq('M', 'e')
+ p.asfreq("M", "s")
+ p.asfreq("M", "e")
Converting to a "super-period" (e.g., annual frequency is a super-period of
quarterly frequency) automatically returns the super-period that includes the
@@ -2089,9 +2117,9 @@ input period:
.. ipython:: python
- p = pd.Period('2011-12', freq='M')
+ p = pd.Period("2011-12", freq="M")
- p.asfreq('A-NOV')
+ p.asfreq("A-NOV")
Note that since we converted to an annual frequency that ends the year in
November, the monthly period of December 2011 is actually in the 2012 A-NOV
@@ -2110,21 +2138,21 @@ frequencies ``Q-JAN`` through ``Q-DEC``.
.. ipython:: python
- p = pd.Period('2012Q1', freq='Q-DEC')
+ p = pd.Period("2012Q1", freq="Q-DEC")
- p.asfreq('D', 's')
+ p.asfreq("D", "s")
- p.asfreq('D', 'e')
+ p.asfreq("D", "e")
``Q-MAR`` defines fiscal year end in March:
.. ipython:: python
- p = pd.Period('2011Q4', freq='Q-MAR')
+ p = pd.Period("2011Q4", freq="Q-MAR")
- p.asfreq('D', 's')
+ p.asfreq("D", "s")
- p.asfreq('D', 'e')
+ p.asfreq("D", "e")
.. _timeseries.interchange:
@@ -2136,7 +2164,7 @@ and vice-versa using ``to_timestamp``:
.. ipython:: python
- rng = pd.date_range('1/1/2012', periods=5, freq='M')
+ rng = pd.date_range("1/1/2012", periods=5, freq="M")
ts = pd.Series(np.random.randn(len(rng)), index=rng)
@@ -2153,7 +2181,7 @@ end of the period:
.. ipython:: python
- ps.to_timestamp('D', how='s')
+ ps.to_timestamp("D", how="s")
Converting between period and timestamp enables some convenient arithmetic
functions to be used. In the following example, we convert a quarterly
@@ -2162,11 +2190,11 @@ the quarter end:
.. ipython:: python
- prng = pd.period_range('1990Q1', '2000Q4', freq='Q-NOV')
+ prng = pd.period_range("1990Q1", "2000Q4", freq="Q-NOV")
ts = pd.Series(np.random.randn(len(prng)), prng)
- ts.index = (prng.asfreq('M', 'e') + 1).asfreq('H', 's') + 9
+ ts.index = (prng.asfreq("M", "e") + 1).asfreq("H", "s") + 9
ts.head()
@@ -2180,7 +2208,7 @@ then you can use a ``PeriodIndex`` and/or ``Series`` of ``Periods`` to do comput
.. ipython:: python
- span = pd.period_range('1215-01-01', '1381-01-01', freq='D')
+ span = pd.period_range("1215-01-01", "1381-01-01", freq="D")
span
To convert from an ``int64`` based YYYYMMDD representation.
@@ -2190,9 +2218,10 @@ To convert from an ``int64`` based YYYYMMDD representation.
s = pd.Series([20121231, 20141130, 99991231])
s
+
def conv(x):
- return pd.Period(year=x // 10000, month=x // 100 % 100,
- day=x % 100, freq='D')
+ return pd.Period(year=x // 10000, month=x // 100 % 100, day=x % 100, freq="D")
+
s.apply(conv)
s.apply(conv)[2]
@@ -2221,7 +2250,7 @@ By default, pandas objects are time zone unaware:
.. ipython:: python
- rng = pd.date_range('3/6/2012 00:00', periods=15, freq='D')
+ rng = pd.date_range("3/6/2012 00:00", periods=15, freq="D")
rng.tz is None
To localize these dates to a time zone (assign a particular time zone to a naive date),
@@ -2241,18 +2270,16 @@ To return ``dateutil`` time zone objects, append ``dateutil/`` before the string
import dateutil
# pytz
- rng_pytz = pd.date_range('3/6/2012 00:00', periods=3, freq='D',
- tz='Europe/London')
+ rng_pytz = pd.date_range("3/6/2012 00:00", periods=3, freq="D", tz="Europe/London")
rng_pytz.tz
# dateutil
- rng_dateutil = pd.date_range('3/6/2012 00:00', periods=3, freq='D')
- rng_dateutil = rng_dateutil.tz_localize('dateutil/Europe/London')
+ rng_dateutil = pd.date_range("3/6/2012 00:00", periods=3, freq="D")
+ rng_dateutil = rng_dateutil.tz_localize("dateutil/Europe/London")
rng_dateutil.tz
# dateutil - utc special case
- rng_utc = pd.date_range('3/6/2012 00:00', periods=3, freq='D',
- tz=dateutil.tz.tzutc())
+ rng_utc = pd.date_range("3/6/2012 00:00", periods=3, freq="D", tz=dateutil.tz.tzutc())
rng_utc.tz
.. versionadded:: 0.25.0
@@ -2260,8 +2287,7 @@ To return ``dateutil`` time zone objects, append ``dateutil/`` before the string
.. ipython:: python
# datetime.timezone
- rng_utc = pd.date_range('3/6/2012 00:00', periods=3, freq='D',
- tz=datetime.timezone.utc)
+ rng_utc = pd.date_range("3/6/2012 00:00", periods=3, freq="D", tz=datetime.timezone.utc)
rng_utc.tz
Note that the ``UTC`` time zone is a special case in ``dateutil`` and should be constructed explicitly
@@ -2273,15 +2299,14 @@ zones objects explicitly first.
import pytz
# pytz
- tz_pytz = pytz.timezone('Europe/London')
- rng_pytz = pd.date_range('3/6/2012 00:00', periods=3, freq='D')
+ tz_pytz = pytz.timezone("Europe/London")
+ rng_pytz = pd.date_range("3/6/2012 00:00", periods=3, freq="D")
rng_pytz = rng_pytz.tz_localize(tz_pytz)
rng_pytz.tz == tz_pytz
# dateutil
- tz_dateutil = dateutil.tz.gettz('Europe/London')
- rng_dateutil = pd.date_range('3/6/2012 00:00', periods=3, freq='D',
- tz=tz_dateutil)
+ tz_dateutil = dateutil.tz.gettz("Europe/London")
+ rng_dateutil = pd.date_range("3/6/2012 00:00", periods=3, freq="D", tz=tz_dateutil)
rng_dateutil.tz == tz_dateutil
To convert a time zone aware pandas object from one time zone to another,
@@ -2289,7 +2314,7 @@ you can use the ``tz_convert`` method.
.. ipython:: python
- rng_pytz.tz_convert('US/Eastern')
+ rng_pytz.tz_convert("US/Eastern")
.. note::
@@ -2301,9 +2326,9 @@ you can use the ``tz_convert`` method.
.. ipython:: python
- dti = pd.date_range('2019-01-01', periods=3, freq='D', tz='US/Pacific')
+ dti = pd.date_range("2019-01-01", periods=3, freq="D", tz="US/Pacific")
dti.tz
- ts = pd.Timestamp('2019-01-01', tz='US/Pacific')
+ ts = pd.Timestamp("2019-01-01", tz="US/Pacific")
ts.tz
.. warning::
@@ -2344,11 +2369,11 @@ you can use the ``tz_convert`` method.
.. ipython:: python
- d_2037 = '2037-03-31T010101'
- d_2038 = '2038-03-31T010101'
- DST = 'Europe/London'
- assert pd.Timestamp(d_2037, tz=DST) != pd.Timestamp(d_2037, tz='GMT')
- assert pd.Timestamp(d_2038, tz=DST) == pd.Timestamp(d_2038, tz='GMT')
+ d_2037 = "2037-03-31T010101"
+ d_2038 = "2038-03-31T010101"
+ DST = "Europe/London"
+ assert pd.Timestamp(d_2037, tz=DST) != pd.Timestamp(d_2037, tz="GMT")
+ assert pd.Timestamp(d_2038, tz=DST) == pd.Timestamp(d_2038, tz="GMT")
Under the hood, all timestamps are stored in UTC. Values from a time zone aware
:class:`DatetimeIndex` or :class:`Timestamp` will have their fields (day, hour, minute, etc.)
@@ -2357,8 +2382,8 @@ still considered to be equal even if they are in different time zones:
.. ipython:: python
- rng_eastern = rng_utc.tz_convert('US/Eastern')
- rng_berlin = rng_utc.tz_convert('Europe/Berlin')
+ rng_eastern = rng_utc.tz_convert("US/Eastern")
+ rng_berlin = rng_utc.tz_convert("Europe/Berlin")
rng_eastern[2]
rng_berlin[2]
@@ -2369,9 +2394,9 @@ Operations between :class:`Series` in different time zones will yield UTC
.. ipython:: python
- ts_utc = pd.Series(range(3), pd.date_range('20130101', periods=3, tz='UTC'))
- eastern = ts_utc.tz_convert('US/Eastern')
- berlin = ts_utc.tz_convert('Europe/Berlin')
+ ts_utc = pd.Series(range(3), pd.date_range("20130101", periods=3, tz="UTC"))
+ eastern = ts_utc.tz_convert("US/Eastern")
+ berlin = ts_utc.tz_convert("Europe/Berlin")
result = eastern + berlin
result
result.index
@@ -2382,14 +2407,13 @@ To remove time zone information, use ``tz_localize(None)`` or ``tz_convert(None)
.. ipython:: python
- didx = pd.date_range(start='2014-08-01 09:00', freq='H',
- periods=3, tz='US/Eastern')
+ didx = pd.date_range(start="2014-08-01 09:00", freq="H", periods=3, tz="US/Eastern")
didx
didx.tz_localize(None)
didx.tz_convert(None)
# tz_convert(None) is identical to tz_convert('UTC').tz_localize(None)
- didx.tz_convert('UTC').tz_localize(None)
+ didx.tz_convert("UTC").tz_localize(None)
.. _timeseries.fold:
@@ -2415,10 +2439,12 @@ control over how they are handled.
.. ipython:: python
- pd.Timestamp(datetime.datetime(2019, 10, 27, 1, 30, 0, 0),
- tz='dateutil/Europe/London', fold=0)
- pd.Timestamp(year=2019, month=10, day=27, hour=1, minute=30,
- tz='dateutil/Europe/London', fold=1)
+ pd.Timestamp(
+ datetime.datetime(2019, 10, 27, 1, 30, 0, 0), tz="dateutil/Europe/London", fold=0
+ )
+ pd.Timestamp(
+ year=2019, month=10, day=27, hour=1, minute=30, tz="dateutil/Europe/London", fold=1
+ )
.. _timeseries.timezone_ambiguous:
@@ -2436,8 +2462,9 @@ twice within one day ("clocks fall back"). The following options are available:
.. ipython:: python
- rng_hourly = pd.DatetimeIndex(['11/06/2011 00:00', '11/06/2011 01:00',
- '11/06/2011 01:00', '11/06/2011 02:00'])
+ rng_hourly = pd.DatetimeIndex(
+ ["11/06/2011 00:00", "11/06/2011 01:00", "11/06/2011 01:00", "11/06/2011 02:00"]
+ )
This will fail as there are ambiguous times (``'11/06/2011 01:00'``)
@@ -2450,9 +2477,9 @@ Handle these ambiguous times by specifying the following.
.. ipython:: python
- rng_hourly.tz_localize('US/Eastern', ambiguous='infer')
- rng_hourly.tz_localize('US/Eastern', ambiguous='NaT')
- rng_hourly.tz_localize('US/Eastern', ambiguous=[True, True, False, False])
+ rng_hourly.tz_localize("US/Eastern", ambiguous="infer")
+ rng_hourly.tz_localize("US/Eastern", ambiguous="NaT")
+ rng_hourly.tz_localize("US/Eastern", ambiguous=[True, True, False, False])
.. _timeseries.timezone_nonexistent:
@@ -2471,7 +2498,7 @@ can be controlled by the ``nonexistent`` argument. The following options are ava
.. ipython:: python
- dti = pd.date_range(start='2015-03-29 02:30:00', periods=3, freq='H')
+ dti = pd.date_range(start="2015-03-29 02:30:00", periods=3, freq="H")
# 2:30 is a nonexistent time
Localization of nonexistent times will raise an error by default.
@@ -2486,10 +2513,10 @@ Transform nonexistent times to ``NaT`` or shift the times.
.. ipython:: python
dti
- dti.tz_localize('Europe/Warsaw', nonexistent='shift_forward')
- dti.tz_localize('Europe/Warsaw', nonexistent='shift_backward')
- dti.tz_localize('Europe/Warsaw', nonexistent=pd.Timedelta(1, unit='H'))
- dti.tz_localize('Europe/Warsaw', nonexistent='NaT')
+ dti.tz_localize("Europe/Warsaw", nonexistent="shift_forward")
+ dti.tz_localize("Europe/Warsaw", nonexistent="shift_backward")
+ dti.tz_localize("Europe/Warsaw", nonexistent=pd.Timedelta(1, unit="H"))
+ dti.tz_localize("Europe/Warsaw", nonexistent="NaT")
.. _timeseries.timezone_series:
@@ -2502,7 +2529,7 @@ represented with a dtype of ``datetime64[ns]``.
.. ipython:: python
- s_naive = pd.Series(pd.date_range('20130101', periods=3))
+ s_naive = pd.Series(pd.date_range("20130101", periods=3))
s_naive
A :class:`Series` with a time zone **aware** values is
@@ -2510,7 +2537,7 @@ represented with a dtype of ``datetime64[ns, tz]`` where ``tz`` is the time zone
.. ipython:: python
- s_aware = pd.Series(pd.date_range('20130101', periods=3, tz='US/Eastern'))
+ s_aware = pd.Series(pd.date_range("20130101", periods=3, tz="US/Eastern"))
s_aware
Both of these :class:`Series` time zone information
@@ -2520,7 +2547,7 @@ For example, to localize and convert a naive stamp to time zone aware.
.. ipython:: python
- s_naive.dt.tz_localize('UTC').dt.tz_convert('US/Eastern')
+ s_naive.dt.tz_localize("UTC").dt.tz_convert("US/Eastern")
Time zone information can also be manipulated using the ``astype`` method.
This method can localize and convert time zone naive timestamps or
@@ -2529,13 +2556,13 @@ convert time zone aware timestamps.
.. ipython:: python
# localize and convert a naive time zone
- s_naive.astype('datetime64[ns, US/Eastern]')
+ s_naive.astype("datetime64[ns, US/Eastern]")
# make an aware tz naive
- s_aware.astype('datetime64[ns]')
+ s_aware.astype("datetime64[ns]")
# convert to a new time zone
- s_aware.astype('datetime64[ns, CET]')
+ s_aware.astype("datetime64[ns, CET]")
.. note::
@@ -2561,4 +2588,4 @@ convert time zone aware timestamps.
.. ipython:: python
- s_aware.to_numpy(dtype='datetime64[ns]')
+ s_aware.to_numpy(dtype="datetime64[ns]")
diff --git a/doc/source/user_guide/visualization.rst b/doc/source/user_guide/visualization.rst
index f41912445455d..46ab29a52747a 100644
--- a/doc/source/user_guide/visualization.rst
+++ b/doc/source/user_guide/visualization.rst
@@ -11,7 +11,8 @@ We use the standard convention for referencing the matplotlib API:
.. ipython:: python
import matplotlib.pyplot as plt
- plt.close('all')
+
+ plt.close("all")
We provide the basics in pandas to easily create decent looking plots.
See the :ref:`ecosystem <ecosystem.visualization>` section for visualization
@@ -39,8 +40,7 @@ The ``plot`` method on Series and DataFrame is just a simple wrapper around
.. ipython:: python
- ts = pd.Series(np.random.randn(1000),
- index=pd.date_range('1/1/2000', periods=1000))
+ ts = pd.Series(np.random.randn(1000), index=pd.date_range("1/1/2000", periods=1000))
ts = ts.cumsum()
@savefig series_plot_basic.png
@@ -54,18 +54,17 @@ On DataFrame, :meth:`~DataFrame.plot` is a convenience to plot all of the column
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
np.random.seed(123456)
.. ipython:: python
- df = pd.DataFrame(np.random.randn(1000, 4),
- index=ts.index, columns=list('ABCD'))
+ df = pd.DataFrame(np.random.randn(1000, 4), index=ts.index, columns=list("ABCD"))
df = df.cumsum()
plt.figure();
@savefig frame_plot_basic.png
- df.plot();
+ df.plot()
You can plot one column versus another using the ``x`` and ``y`` keywords in
:meth:`~DataFrame.plot`:
@@ -73,17 +72,17 @@ You can plot one column versus another using the ``x`` and ``y`` keywords in
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
plt.figure()
np.random.seed(123456)
.. ipython:: python
- df3 = pd.DataFrame(np.random.randn(1000, 2), columns=['B', 'C']).cumsum()
- df3['A'] = pd.Series(list(range(len(df))))
+ df3 = pd.DataFrame(np.random.randn(1000, 2), columns=["B", "C"]).cumsum()
+ df3["A"] = pd.Series(list(range(len(df))))
@savefig df_plot_xy.png
- df3.plot(x='A', y='B')
+ df3.plot(x="A", y="B")
.. note::
@@ -93,7 +92,7 @@ You can plot one column versus another using the ``x`` and ``y`` keywords in
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
.. _visualization.other:
@@ -120,7 +119,7 @@ For example, a bar plot can be created the following way:
plt.figure();
@savefig bar_plot_ex.png
- df.iloc[5].plot(kind='bar');
+ df.iloc[5].plot(kind="bar")
You can also create these other plots using the methods ``DataFrame.plot.<kind>`` instead of providing the ``kind`` keyword argument. This makes it easier to discover plot methods and the specific arguments they use:
@@ -164,7 +163,7 @@ For labeled, non-time series data, you may wish to produce a bar plot:
@savefig bar_plot_ex.png
df.iloc[5].plot.bar()
- plt.axhline(0, color='k');
+ plt.axhline(0, color="k")
Calling a DataFrame's :meth:`plot.bar() <DataFrame.plot.bar>` method produces a multiple
bar plot:
@@ -172,42 +171,42 @@ bar plot:
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
plt.figure()
np.random.seed(123456)
.. ipython:: python
- df2 = pd.DataFrame(np.random.rand(10, 4), columns=['a', 'b', 'c', 'd'])
+ df2 = pd.DataFrame(np.random.rand(10, 4), columns=["a", "b", "c", "d"])
@savefig bar_plot_multi_ex.png
- df2.plot.bar();
+ df2.plot.bar()
To produce a stacked bar plot, pass ``stacked=True``:
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
plt.figure()
.. ipython:: python
@savefig bar_plot_stacked_ex.png
- df2.plot.bar(stacked=True);
+ df2.plot.bar(stacked=True)
To get horizontal bar plots, use the ``barh`` method:
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
plt.figure()
.. ipython:: python
@savefig barh_plot_stacked_ex.png
- df2.plot.barh(stacked=True);
+ df2.plot.barh(stacked=True)
.. _visualization.hist:
@@ -218,8 +217,14 @@ Histograms can be drawn by using the :meth:`DataFrame.plot.hist` and :meth:`Seri
.. ipython:: python
- df4 = pd.DataFrame({'a': np.random.randn(1000) + 1, 'b': np.random.randn(1000),
- 'c': np.random.randn(1000) - 1}, columns=['a', 'b', 'c'])
+ df4 = pd.DataFrame(
+ {
+ "a": np.random.randn(1000) + 1,
+ "b": np.random.randn(1000),
+ "c": np.random.randn(1000) - 1,
+ },
+ columns=["a", "b", "c"],
+ )
plt.figure();
@@ -230,7 +235,7 @@ Histograms can be drawn by using the :meth:`DataFrame.plot.hist` and :meth:`Seri
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
A histogram can be stacked using ``stacked=True``. Bin size can be changed
using the ``bins`` keyword.
@@ -245,7 +250,7 @@ using the ``bins`` keyword.
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
You can pass other keywords supported by matplotlib ``hist``. For example,
horizontal and cumulative histograms can be drawn by
@@ -256,12 +261,12 @@ horizontal and cumulative histograms can be drawn by
plt.figure();
@savefig hist_new_kwargs.png
- df4['a'].plot.hist(orientation='horizontal', cumulative=True)
+ df4["a"].plot.hist(orientation="horizontal", cumulative=True)
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
See the :meth:`hist <matplotlib.axes.Axes.hist>` method and the
`matplotlib hist documentation <https://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.hist>`__ for more.
@@ -274,12 +279,12 @@ The existing interface ``DataFrame.hist`` to plot histogram still can be used.
plt.figure();
@savefig hist_plot_ex.png
- df['A'].diff().hist()
+ df["A"].diff().hist()
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
:meth:`DataFrame.hist` plots the histograms of the columns on multiple
subplots:
@@ -289,7 +294,7 @@ subplots:
plt.figure()
@savefig frame_hist_ex.png
- df.diff().hist(color='k', alpha=0.5, bins=50)
+ df.diff().hist(color="k", alpha=0.5, bins=50)
The ``by`` keyword can be specified to plot grouped histograms:
@@ -297,7 +302,7 @@ The ``by`` keyword can be specified to plot grouped histograms:
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
plt.figure()
np.random.seed(123456)
@@ -323,12 +328,12 @@ a uniform random variable on [0,1).
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
np.random.seed(123456)
.. ipython:: python
- df = pd.DataFrame(np.random.rand(10, 5), columns=['A', 'B', 'C', 'D', 'E'])
+ df = pd.DataFrame(np.random.rand(10, 5), columns=["A", "B", "C", "D", "E"])
@savefig box_plot_new.png
df.plot.box()
@@ -348,16 +353,20 @@ more complicated colorization, you can get each drawn artists by passing
.. ipython:: python
- color = {'boxes': 'DarkGreen', 'whiskers': 'DarkOrange',
- 'medians': 'DarkBlue', 'caps': 'Gray'}
+ color = {
+ "boxes": "DarkGreen",
+ "whiskers": "DarkOrange",
+ "medians": "DarkBlue",
+ "caps": "Gray",
+ }
@savefig box_new_colorize.png
- df.plot.box(color=color, sym='r+')
+ df.plot.box(color=color, sym="r+")
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
Also, you can pass other keywords supported by matplotlib ``boxplot``.
For example, horizontal and custom-positioned boxplot can be drawn by
@@ -378,7 +387,7 @@ The existing interface ``DataFrame.boxplot`` to plot boxplot still can be used.
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
np.random.seed(123456)
.. ipython:: python
@@ -396,19 +405,19 @@ groupings. For instance,
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
np.random.seed(123456)
.. ipython:: python
:okwarning:
- df = pd.DataFrame(np.random.rand(10, 2), columns=['Col1', 'Col2'])
- df['X'] = pd.Series(['A', 'A', 'A', 'A', 'A', 'B', 'B', 'B', 'B', 'B'])
+ df = pd.DataFrame(np.random.rand(10, 2), columns=["Col1", "Col2"])
+ df["X"] = pd.Series(["A", "A", "A", "A", "A", "B", "B", "B", "B", "B"])
- plt.figure();
+ plt.figure()
@savefig box_plot_ex2.png
- bp = df.boxplot(by='X')
+ bp = df.boxplot(by="X")
You can also pass a subset of columns to plot, as well as group by multiple
columns:
@@ -416,25 +425,25 @@ columns:
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
np.random.seed(123456)
.. ipython:: python
:okwarning:
- df = pd.DataFrame(np.random.rand(10, 3), columns=['Col1', 'Col2', 'Col3'])
- df['X'] = pd.Series(['A', 'A', 'A', 'A', 'A', 'B', 'B', 'B', 'B', 'B'])
- df['Y'] = pd.Series(['A', 'B', 'A', 'B', 'A', 'B', 'A', 'B', 'A', 'B'])
+ df = pd.DataFrame(np.random.rand(10, 3), columns=["Col1", "Col2", "Col3"])
+ df["X"] = pd.Series(["A", "A", "A", "A", "A", "B", "B", "B", "B", "B"])
+ df["Y"] = pd.Series(["A", "B", "A", "B", "A", "B", "A", "B", "A", "B"])
plt.figure();
@savefig box_plot_ex3.png
- bp = df.boxplot(column=['Col1', 'Col2'], by=['X', 'Y'])
+ bp = df.boxplot(column=["Col1", "Col2"], by=["X", "Y"])
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
.. _visualization.box.return:
@@ -462,16 +471,16 @@ keyword, will affect the output type as well:
np.random.seed(1234)
df_box = pd.DataFrame(np.random.randn(50, 2))
- df_box['g'] = np.random.choice(['A', 'B'], size=50)
- df_box.loc[df_box['g'] == 'B', 1] += 3
+ df_box["g"] = np.random.choice(["A", "B"], size=50)
+ df_box.loc[df_box["g"] == "B", 1] += 3
@savefig boxplot_groupby.png
- bp = df_box.boxplot(by='g')
+ bp = df_box.boxplot(by="g")
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
The subplots above are split by the numeric columns first, then the value of
the ``g`` column. Below the subplots are first split by the value of ``g``,
@@ -481,12 +490,12 @@ then by the numeric columns.
:okwarning:
@savefig groupby_boxplot_vis.png
- bp = df_box.groupby('g').boxplot()
+ bp = df_box.groupby("g").boxplot()
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
.. _visualization.area_plot:
@@ -506,23 +515,23 @@ When input data contains ``NaN``, it will be automatically filled by 0. If you w
.. ipython:: python
- df = pd.DataFrame(np.random.rand(10, 4), columns=['a', 'b', 'c', 'd'])
+ df = pd.DataFrame(np.random.rand(10, 4), columns=["a", "b", "c", "d"])
@savefig area_plot_stacked.png
- df.plot.area();
+ df.plot.area()
To produce an unstacked plot, pass ``stacked=False``. Alpha value is set to 0.5 unless otherwise specified:
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
plt.figure()
.. ipython:: python
@savefig area_plot_unstacked.png
- df.plot.area(stacked=False);
+ df.plot.area(stacked=False)
.. _visualization.scatter:
@@ -537,29 +546,29 @@ These can be specified by the ``x`` and ``y`` keywords.
:suppress:
np.random.seed(123456)
- plt.close('all')
+ plt.close("all")
plt.figure()
.. ipython:: python
- df = pd.DataFrame(np.random.rand(50, 4), columns=['a', 'b', 'c', 'd'])
+ df = pd.DataFrame(np.random.rand(50, 4), columns=["a", "b", "c", "d"])
@savefig scatter_plot.png
- df.plot.scatter(x='a', y='b');
+ df.plot.scatter(x="a", y="b")
To plot multiple column groups in a single axes, repeat ``plot`` method specifying target ``ax``.
It is recommended to specify ``color`` and ``label`` keywords to distinguish each groups.
.. ipython:: python
- ax = df.plot.scatter(x='a', y='b', color='DarkBlue', label='Group 1');
+ ax = df.plot.scatter(x="a", y="b", color="DarkBlue", label="Group 1")
@savefig scatter_plot_repeated.png
- df.plot.scatter(x='c', y='d', color='DarkGreen', label='Group 2', ax=ax);
+ df.plot.scatter(x="c", y="d", color="DarkGreen", label="Group 2", ax=ax)
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
The keyword ``c`` may be given as the name of a column to provide colors for
each point:
@@ -567,13 +576,13 @@ each point:
.. ipython:: python
@savefig scatter_plot_colored.png
- df.plot.scatter(x='a', y='b', c='c', s=50);
+ df.plot.scatter(x="a", y="b", c="c", s=50)
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
You can pass other keywords supported by matplotlib
:meth:`scatter <matplotlib.axes.Axes.scatter>`. The example below shows a
@@ -582,12 +591,12 @@ bubble chart using a column of the ``DataFrame`` as the bubble size.
.. ipython:: python
@savefig scatter_plot_bubble.png
- df.plot.scatter(x='a', y='b', s=df['c'] * 200);
+ df.plot.scatter(x="a", y="b", s=df["c"] * 200)
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
See the :meth:`scatter <matplotlib.axes.Axes.scatter>` method and the
`matplotlib scatter documentation <https://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.scatter>`__ for more.
@@ -609,11 +618,11 @@ too dense to plot each point individually.
.. ipython:: python
- df = pd.DataFrame(np.random.randn(1000, 2), columns=['a', 'b'])
- df['b'] = df['b'] + np.arange(1000)
+ df = pd.DataFrame(np.random.randn(1000, 2), columns=["a", "b"])
+ df["b"] = df["b"] + np.arange(1000)
@savefig hexbin_plot.png
- df.plot.hexbin(x='a', y='b', gridsize=25)
+ df.plot.hexbin(x="a", y="b", gridsize=25)
A useful keyword argument is ``gridsize``; it controls the number of hexagons
@@ -631,23 +640,23 @@ given by column ``z``. The bins are aggregated with NumPy's ``max`` function.
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
plt.figure()
np.random.seed(123456)
.. ipython:: python
- df = pd.DataFrame(np.random.randn(1000, 2), columns=['a', 'b'])
- df['b'] = df['b'] = df['b'] + np.arange(1000)
- df['z'] = np.random.uniform(0, 3, 1000)
+ df = pd.DataFrame(np.random.randn(1000, 2), columns=["a", "b"])
+ df["b"] = df["b"] = df["b"] + np.arange(1000)
+ df["z"] = np.random.uniform(0, 3, 1000)
@savefig hexbin_plot_agg.png
- df.plot.hexbin(x='a', y='b', C='z', reduce_C_function=np.max, gridsize=25)
+ df.plot.hexbin(x="a", y="b", C="z", reduce_C_function=np.max, gridsize=25)
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
See the :meth:`hexbin <matplotlib.axes.Axes.hexbin>` method and the
`matplotlib hexbin documentation <https://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.hexbin>`__ for more.
@@ -670,8 +679,7 @@ A ``ValueError`` will be raised if there are any negative values in your data.
.. ipython:: python
:okwarning:
- series = pd.Series(3 * np.random.rand(4),
- index=['a', 'b', 'c', 'd'], name='series')
+ series = pd.Series(3 * np.random.rand(4), index=["a", "b", "c", "d"], name="series")
@savefig series_pie_plot.png
series.plot.pie(figsize=(6, 6))
@@ -679,7 +687,7 @@ A ``ValueError`` will be raised if there are any negative values in your data.
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
For pie plots it's best to use square figures, i.e. a figure aspect ratio 1.
You can create the figure with equal width and height, or force the aspect ratio
@@ -700,8 +708,9 @@ drawn in each pie plots by default; specify ``legend=False`` to hide it.
.. ipython:: python
- df = pd.DataFrame(3 * np.random.rand(4, 2),
- index=['a', 'b', 'c', 'd'], columns=['x', 'y'])
+ df = pd.DataFrame(
+ 3 * np.random.rand(4, 2), index=["a", "b", "c", "d"], columns=["x", "y"]
+ )
@savefig df_pie_plot.png
df.plot.pie(subplots=True, figsize=(8, 4))
@@ -709,7 +718,7 @@ drawn in each pie plots by default; specify ``legend=False`` to hide it.
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
You can use the ``labels`` and ``colors`` keywords to specify the labels and colors of each wedge.
@@ -731,21 +740,26 @@ Also, other keywords supported by :func:`matplotlib.pyplot.pie` can be used.
.. ipython:: python
@savefig series_pie_plot_options.png
- series.plot.pie(labels=['AA', 'BB', 'CC', 'DD'], colors=['r', 'g', 'b', 'c'],
- autopct='%.2f', fontsize=20, figsize=(6, 6))
+ series.plot.pie(
+ labels=["AA", "BB", "CC", "DD"],
+ colors=["r", "g", "b", "c"],
+ autopct="%.2f",
+ fontsize=20,
+ figsize=(6, 6),
+ )
If you pass values whose sum total is less than 1.0, matplotlib draws a semicircle.
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
plt.figure()
.. ipython:: python
:okwarning:
- series = pd.Series([0.1] * 4, index=['a', 'b', 'c', 'd'], name='series2')
+ series = pd.Series([0.1] * 4, index=["a", "b", "c", "d"], name="series2")
@savefig series_pie_plot_semi.png
series.plot.pie(figsize=(6, 6))
@@ -755,7 +769,7 @@ See the `matplotlib pie documentation <https://matplotlib.org/api/pyplot_api.htm
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
.. _visualization.missing_data:
@@ -819,15 +833,16 @@ You can create a scatter plot matrix using the
.. ipython:: python
from pandas.plotting import scatter_matrix
- df = pd.DataFrame(np.random.randn(1000, 4), columns=['a', 'b', 'c', 'd'])
+
+ df = pd.DataFrame(np.random.randn(1000, 4), columns=["a", "b", "c", "d"])
@savefig scatter_matrix_kde.png
- scatter_matrix(df, alpha=0.2, figsize=(6, 6), diagonal='kde');
+ scatter_matrix(df, alpha=0.2, figsize=(6, 6), diagonal="kde")
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
.. _visualization.kde:
@@ -852,7 +867,7 @@ You can create density plots using the :meth:`Series.plot.kde` and :meth:`DataFr
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
.. _visualization.andrews_curves:
@@ -872,12 +887,12 @@ of the same class will usually be closer together and form larger structures.
from pandas.plotting import andrews_curves
- data = pd.read_csv('data/iris.data')
+ data = pd.read_csv("data/iris.data")
plt.figure()
@savefig andrews_curves.png
- andrews_curves(data, 'Name')
+ andrews_curves(data, "Name")
.. _visualization.parallel_coordinates:
@@ -896,17 +911,17 @@ represents one data point. Points that tend to cluster will appear closer togeth
from pandas.plotting import parallel_coordinates
- data = pd.read_csv('data/iris.data')
+ data = pd.read_csv("data/iris.data")
plt.figure()
@savefig parallel_coordinates.png
- parallel_coordinates(data, 'Name')
+ parallel_coordinates(data, "Name")
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
.. _visualization.lag:
@@ -939,7 +954,7 @@ be passed, and when ``lag=1`` the plot is essentially ``data[:-1]`` vs.
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
.. _visualization.autocorrelation:
@@ -976,7 +991,7 @@ autocorrelation plots.
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
.. _visualization.bootstrap:
@@ -1001,12 +1016,12 @@ are what constitutes the bootstrap plot.
data = pd.Series(np.random.rand(1000))
@savefig bootstrap_plot.png
- bootstrap_plot(data, size=50, samples=500, color='grey')
+ bootstrap_plot(data, size=50, samples=500, color="grey")
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
.. _visualization.radviz:
@@ -1032,17 +1047,17 @@ for more information.
from pandas.plotting import radviz
- data = pd.read_csv('data/iris.data')
+ data = pd.read_csv("data/iris.data")
plt.figure()
@savefig radviz.png
- radviz(data, 'Name')
+ radviz(data, "Name")
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
.. _visualization.formatting:
@@ -1071,12 +1086,12 @@ layout and formatting of the returned plot:
plt.figure();
@savefig series_plot_basic2.png
- ts.plot(style='k--', label='Series');
+ ts.plot(style="k--", label="Series")
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
For each kind of plot (e.g. ``line``, ``bar``, ``scatter``) any additional arguments
keywords are passed along to the corresponding matplotlib function
@@ -1098,8 +1113,7 @@ shown by default.
.. ipython:: python
- df = pd.DataFrame(np.random.randn(1000, 4),
- index=ts.index, columns=list('ABCD'))
+ df = pd.DataFrame(np.random.randn(1000, 4), index=ts.index, columns=list("ABCD"))
df = df.cumsum()
@savefig frame_plot_basic_noleg.png
@@ -1108,7 +1122,7 @@ shown by default.
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
Controlling the labels
@@ -1135,7 +1149,7 @@ it empty for ylabel.
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
Scales
@@ -1151,8 +1165,7 @@ You may pass ``logy`` to get a log-scale Y axis.
.. ipython:: python
- ts = pd.Series(np.random.randn(1000),
- index=pd.date_range('1/1/2000', periods=1000))
+ ts = pd.Series(np.random.randn(1000), index=pd.date_range("1/1/2000", periods=1000))
ts = np.exp(ts.cumsum())
@savefig series_plot_logy.png
@@ -1161,7 +1174,7 @@ You may pass ``logy`` to get a log-scale Y axis.
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
See also the ``logx`` and ``loglog`` keyword arguments.
@@ -1177,15 +1190,15 @@ To plot data on a secondary y-axis, use the ``secondary_y`` keyword:
.. ipython:: python
- df['A'].plot()
+ df["A"].plot()
@savefig series_plot_secondary_y.png
- df['B'].plot(secondary_y=True, style='g')
+ df["B"].plot(secondary_y=True, style="g")
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
To plot some columns in a ``DataFrame``, give the column names to the ``secondary_y``
keyword:
@@ -1193,15 +1206,15 @@ keyword:
.. ipython:: python
plt.figure()
- ax = df.plot(secondary_y=['A', 'B'])
- ax.set_ylabel('CD scale')
+ ax = df.plot(secondary_y=["A", "B"])
+ ax.set_ylabel("CD scale")
@savefig frame_plot_secondary_y.png
- ax.right_ax.set_ylabel('AB scale')
+ ax.right_ax.set_ylabel("AB scale")
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
Note that the columns plotted on the secondary y-axis is automatically marked
with "(right)" in the legend. To turn off the automatic marking, use the
@@ -1212,12 +1225,12 @@ with "(right)" in the legend. To turn off the automatic marking, use the
plt.figure()
@savefig frame_plot_secondary_y_no_right.png
- df.plot(secondary_y=['A', 'B'], mark_right=False)
+ df.plot(secondary_y=["A", "B"], mark_right=False)
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
.. _plotting.formatters:
@@ -1249,12 +1262,12 @@ Here is the default behavior, notice how the x-axis tick labeling is performed:
plt.figure()
@savefig ser_plot_suppress.png
- df['A'].plot()
+ df["A"].plot()
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
Using the ``x_compat`` parameter, you can suppress this behavior:
@@ -1263,12 +1276,12 @@ Using the ``x_compat`` parameter, you can suppress this behavior:
plt.figure()
@savefig ser_plot_suppress_parm.png
- df['A'].plot(x_compat=True)
+ df["A"].plot(x_compat=True)
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
If you have more than one plot that needs to be suppressed, the ``use`` method
in ``pandas.plotting.plot_params`` can be used in a ``with`` statement:
@@ -1278,15 +1291,15 @@ in ``pandas.plotting.plot_params`` can be used in a ``with`` statement:
plt.figure()
@savefig ser_plot_suppress_context.png
- with pd.plotting.plot_params.use('x_compat', True):
- df['A'].plot(color='r')
- df['B'].plot(color='g')
- df['C'].plot(color='b')
+ with pd.plotting.plot_params.use("x_compat", True):
+ df["A"].plot(color="r")
+ df["B"].plot(color="g")
+ df["C"].plot(color="b")
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
Automatic date tick adjustment
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -1307,12 +1320,12 @@ with the ``subplots`` keyword:
.. ipython:: python
@savefig frame_plot_subplots.png
- df.plot(subplots=True, figsize=(6, 6));
+ df.plot(subplots=True, figsize=(6, 6))
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
Using layout and targeting multiple axes
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -1330,23 +1343,23 @@ or columns needed, given the other.
.. ipython:: python
@savefig frame_plot_subplots_layout.png
- df.plot(subplots=True, layout=(2, 3), figsize=(6, 6), sharex=False);
+ df.plot(subplots=True, layout=(2, 3), figsize=(6, 6), sharex=False)
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
The above example is identical to using:
.. ipython:: python
- df.plot(subplots=True, layout=(2, -1), figsize=(6, 6), sharex=False);
+ df.plot(subplots=True, layout=(2, -1), figsize=(6, 6), sharex=False)
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
The required number of columns (3) is inferred from the number of series to plot
and the given number of rows (2).
@@ -1366,15 +1379,14 @@ otherwise you will see a warning.
target1 = [axes[0][0], axes[1][1], axes[2][2], axes[3][3]]
target2 = [axes[3][0], axes[2][1], axes[1][2], axes[0][3]]
- df.plot(subplots=True, ax=target1, legend=False, sharex=False, sharey=False);
+ df.plot(subplots=True, ax=target1, legend=False, sharex=False, sharey=False)
@savefig frame_plot_subplots_multi_ax.png
- (-df).plot(subplots=True, ax=target2, legend=False,
- sharex=False, sharey=False);
+ (-df).plot(subplots=True, ax=target2, legend=False, sharex=False, sharey=False)
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
Another option is passing an ``ax`` argument to :meth:`Series.plot` to plot on a particular axis:
@@ -1382,37 +1394,35 @@ Another option is passing an ``ax`` argument to :meth:`Series.plot` to plot on a
:suppress:
np.random.seed(123456)
- ts = pd.Series(np.random.randn(1000),
- index=pd.date_range('1/1/2000', periods=1000))
+ ts = pd.Series(np.random.randn(1000), index=pd.date_range("1/1/2000", periods=1000))
ts = ts.cumsum()
- df = pd.DataFrame(np.random.randn(1000, 4), index=ts.index,
- columns=list('ABCD'))
+ df = pd.DataFrame(np.random.randn(1000, 4), index=ts.index, columns=list("ABCD"))
df = df.cumsum()
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
.. ipython:: python
fig, axes = plt.subplots(nrows=2, ncols=2)
plt.subplots_adjust(wspace=0.2, hspace=0.5)
- df['A'].plot(ax=axes[0, 0]);
- axes[0, 0].set_title('A');
- df['B'].plot(ax=axes[0, 1]);
- axes[0, 1].set_title('B');
- df['C'].plot(ax=axes[1, 0]);
- axes[1, 0].set_title('C');
- df['D'].plot(ax=axes[1, 1]);
+ df["A"].plot(ax=axes[0, 0])
+ axes[0, 0].set_title("A")
+ df["B"].plot(ax=axes[0, 1])
+ axes[0, 1].set_title("B")
+ df["C"].plot(ax=axes[1, 0])
+ axes[1, 0].set_title("C")
+ df["D"].plot(ax=axes[1, 1])
@savefig series_plot_multi.png
- axes[1, 1].set_title('D');
+ axes[1, 1].set_title("D")
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
.. _visualization.errorbars:
@@ -1434,17 +1444,21 @@ Here is an example of one way to easily plot group means with standard deviation
.. ipython:: python
# Generate the data
- ix3 = pd.MultiIndex.from_arrays([
- ['a', 'a', 'a', 'a', 'b', 'b', 'b', 'b'],
- ['foo', 'foo', 'bar', 'bar', 'foo', 'foo', 'bar', 'bar']],
- names=['letter', 'word'])
-
- df3 = pd.DataFrame({'data1': [3, 2, 4, 3, 2, 4, 3, 2],
- 'data2': [6, 5, 7, 5, 4, 5, 6, 5]}, index=ix3)
+ ix3 = pd.MultiIndex.from_arrays(
+ [
+ ["a", "a", "a", "a", "b", "b", "b", "b"],
+ ["foo", "foo", "bar", "bar", "foo", "foo", "bar", "bar"],
+ ],
+ names=["letter", "word"],
+ )
+
+ df3 = pd.DataFrame(
+ {"data1": [3, 2, 4, 3, 2, 4, 3, 2], "data2": [6, 5, 7, 5, 4, 5, 6, 5]}, index=ix3
+ )
# Group by index labels and take the means and standard deviations
# for each group
- gp3 = df3.groupby(level=('letter', 'word'))
+ gp3 = df3.groupby(level=("letter", "word"))
means = gp3.mean()
errors = gp3.std()
means
@@ -1458,7 +1472,7 @@ Here is an example of one way to easily plot group means with standard deviation
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
.. _visualization.table:
@@ -1475,7 +1489,7 @@ Plotting with matplotlib table is now supported in :meth:`DataFrame.plot` and :
.. ipython:: python
fig, ax = plt.subplots(1, 1, figsize=(7, 6.5))
- df = pd.DataFrame(np.random.rand(5, 3), columns=['a', 'b', 'c'])
+ df = pd.DataFrame(np.random.rand(5, 3), columns=["a", "b", "c"])
ax.xaxis.tick_top() # Display x-axis ticks on top.
@savefig line_plot_table_true.png
@@ -1484,7 +1498,7 @@ Plotting with matplotlib table is now supported in :meth:`DataFrame.plot` and :
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
Also, you can pass a different :class:`DataFrame` or :class:`Series` to the
``table`` keyword. The data will be drawn as displayed in print method
@@ -1502,7 +1516,7 @@ as seen in the example below.
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
There also exists a helper function ``pandas.plotting.table``, which creates a
table from :class:`DataFrame` or :class:`Series`, and adds it to an
@@ -1512,10 +1526,10 @@ matplotlib `table <https://matplotlib.org/api/axes_api.html#matplotlib.axes.Axes
.. ipython:: python
from pandas.plotting import table
+
fig, ax = plt.subplots(1, 1)
- table(ax, np.round(df.describe(), 2),
- loc='upper right', colWidths=[0.2, 0.2, 0.2])
+ table(ax, np.round(df.describe(), 2), loc="upper right", colWidths=[0.2, 0.2, 0.2])
@savefig line_plot_table_describe.png
df.plot(ax=ax, ylim=(0, 2), legend=None)
@@ -1523,7 +1537,7 @@ matplotlib `table <https://matplotlib.org/api/axes_api.html#matplotlib.axes.Axes
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
**Note**: You can get table instances on the axes using ``axes.tables`` property for further decorations. See the `matplotlib table documentation <https://matplotlib.org/api/axes_api.html#matplotlib.axes.Axes.table>`__ for more.
@@ -1560,12 +1574,12 @@ To use the cubehelix colormap, we can pass ``colormap='cubehelix'``.
plt.figure()
@savefig cubehelix.png
- df.plot(colormap='cubehelix')
+ df.plot(colormap="cubehelix")
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
Alternatively, we can pass the colormap itself:
@@ -1581,7 +1595,7 @@ Alternatively, we can pass the colormap itself:
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
Colormaps can also be used other plot types, like bar charts:
@@ -1598,12 +1612,12 @@ Colormaps can also be used other plot types, like bar charts:
plt.figure()
@savefig greens.png
- dd.plot.bar(colormap='Greens')
+ dd.plot.bar(colormap="Greens")
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
Parallel coordinates charts:
@@ -1612,12 +1626,12 @@ Parallel coordinates charts:
plt.figure()
@savefig parallel_gist_rainbow.png
- parallel_coordinates(data, 'Name', colormap='gist_rainbow')
+ parallel_coordinates(data, "Name", colormap="gist_rainbow")
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
Andrews curves charts:
@@ -1626,12 +1640,12 @@ Andrews curves charts:
plt.figure()
@savefig andrews_curve_winter.png
- andrews_curves(data, 'Name', colormap='winter')
+ andrews_curves(data, "Name", colormap="winter")
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
Plotting directly with matplotlib
---------------------------------
@@ -1655,23 +1669,24 @@ when plotting a large number of points.
.. ipython:: python
- price = pd.Series(np.random.randn(150).cumsum(),
- index=pd.date_range('2000-1-1', periods=150, freq='B'))
+ price = pd.Series(
+ np.random.randn(150).cumsum(),
+ index=pd.date_range("2000-1-1", periods=150, freq="B"),
+ )
ma = price.rolling(20).mean()
mstd = price.rolling(20).std()
plt.figure()
- plt.plot(price.index, price, 'k')
- plt.plot(ma.index, ma, 'b')
+ plt.plot(price.index, price, "k")
+ plt.plot(ma.index, ma, "b")
@savefig bollinger.png
- plt.fill_between(mstd.index, ma - 2 * mstd, ma + 2 * mstd,
- color='b', alpha=0.2)
+ plt.fill_between(mstd.index, ma - 2 * mstd, ma + 2 * mstd, color="b", alpha=0.2)
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
Plotting backends
-----------------
@@ -1685,21 +1700,21 @@ function. For example:
.. code-block:: python
- >>> Series([1, 2, 3]).plot(backend='backend.module')
+ >>> Series([1, 2, 3]).plot(backend="backend.module")
Alternatively, you can also set this option globally, do you don't need to specify
the keyword in each ``plot`` call. For example:
.. code-block:: python
- >>> pd.set_option('plotting.backend', 'backend.module')
+ >>> pd.set_option("plotting.backend", "backend.module")
>>> pd.Series([1, 2, 3]).plot()
Or:
.. code-block:: python
- >>> pd.options.plotting.backend = 'backend.module'
+ >>> pd.options.plotting.backend = "backend.module"
>>> pd.Series([1, 2, 3]).plot()
This would be more or less equivalent to:
| ref #36777
I tried to format development/extending.rst, please review
cc @dsaxton | https://api.github.com/repos/pandas-dev/pandas/pulls/36825 | 2020-10-03T06:31:44Z | 2020-10-03T14:46:51Z | null | 2020-10-03T14:48:26Z |
DOC: use black to fix code style in doc pandas-dev#36777 | diff --git a/doc/source/development/extending.rst b/doc/source/development/extending.rst
index c708ebb361ed1..6fabffc314fe5 100644
--- a/doc/source/development/extending.rst
+++ b/doc/source/development/extending.rst
@@ -34,7 +34,7 @@ decorate a class, providing the name of attribute to add. The class's
@staticmethod
def _validate(obj):
# verify there is a column latitude and a column longitude
- if 'latitude' not in obj.columns or 'longitude' not in obj.columns:
+ if "latitude" not in obj.columns or "longitude" not in obj.columns:
raise AttributeError("Must have 'latitude' and 'longitude'.")
@property
@@ -50,8 +50,9 @@ decorate a class, providing the name of attribute to add. The class's
Now users can access your methods using the ``geo`` namespace:
- >>> ds = pd.DataFrame({'longitude': np.linspace(0, 10),
- ... 'latitude': np.linspace(0, 20)})
+ >>> ds = pd.Dataframe(
+ ... {"longitude": np.linspace(0, 10), "latitude": np.linspace(0, 20)}
+ ... )
>>> ds.geo.center
(5.0, 10.0)
>>> ds.geo.plot()
@@ -271,6 +272,7 @@ included as a column in a pandas DataFrame):
def __arrow_array__(self, type=None):
# convert the underlying array values to a pyarrow Array
import pyarrow
+
return pyarrow.array(..., type=type)
The ``ExtensionDtype.__from_arrow__`` method then controls the conversion
@@ -377,7 +379,7 @@ Below example shows how to define ``SubclassedSeries`` and ``SubclassedDataFrame
>>> type(to_framed)
<class '__main__.SubclassedDataFrame'>
- >>> df = SubclassedDataFrame({'A': [1, 2, 3], 'B': [4, 5, 6], 'C': [7, 8, 9]})
+ >>> df = SubclassedDataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]})
>>> df
A B C
0 1 4 7
@@ -387,7 +389,7 @@ Below example shows how to define ``SubclassedSeries`` and ``SubclassedDataFrame
>>> type(df)
<class '__main__.SubclassedDataFrame'>
- >>> sliced1 = df[['A', 'B']]
+ >>> sliced1 = df[["A", "B"]]
>>> sliced1
A B
0 1 4
@@ -397,7 +399,7 @@ Below example shows how to define ``SubclassedSeries`` and ``SubclassedDataFrame
>>> type(sliced1)
<class '__main__.SubclassedDataFrame'>
- >>> sliced2 = df['A']
+ >>> sliced2 = df["A"]
>>> sliced2
0 1
1 2
@@ -422,11 +424,11 @@ Below is an example to define two original properties, "internal_cache" as a tem
class SubclassedDataFrame2(pd.DataFrame):
# temporary properties
- _internal_names = pd.DataFrame._internal_names + ['internal_cache']
+ _internal_names = pd.DataFrame._internal_names + ["internal_cache"]
_internal_names_set = set(_internal_names)
# normal properties
- _metadata = ['added_property']
+ _metadata = ["added_property"]
@property
def _constructor(self):
@@ -434,15 +436,15 @@ Below is an example to define two original properties, "internal_cache" as a tem
.. code-block:: python
- >>> df = SubclassedDataFrame2({'A': [1, 2, 3], 'B': [4, 5, 6], 'C': [7, 8, 9]})
+ >>> df = SubclassedDataFrame2({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]})
>>> df
A B C
0 1 4 7
1 2 5 8
2 3 6 9
- >>> df.internal_cache = 'cached'
- >>> df.added_property = 'property'
+ >>> df.internal_cache = "cached"
+ >>> df.added_property = "property"
>>> df.internal_cache
cached
@@ -450,11 +452,11 @@ Below is an example to define two original properties, "internal_cache" as a tem
property
# properties defined in _internal_names is reset after manipulation
- >>> df[['A', 'B']].internal_cache
+ >>> df[["A", "B"]].internal_cache
AttributeError: 'SubclassedDataFrame2' object has no attribute 'internal_cache'
# properties defined in _metadata are retained
- >>> df[['A', 'B']].added_property
+ >>> df[["A", "B"]].added_property
property
.. _extending.plotting-backends:
@@ -468,7 +470,7 @@ one based on Matplotlib. For example:
.. code-block:: python
- >>> pd.set_option('plotting.backend', 'backend.module')
+ >>> pd.set_option("plotting.backend", "backend.module")
>>> pd.Series([1, 2, 3]).plot()
This would be more or less equivalent to:
@@ -499,4 +501,4 @@ registers the default "matplotlib" backend as follows.
More information on how to implement a third-party plotting backend can be found at
-https://github.com/pandas-dev/pandas/blob/master/pandas/plotting/__init__.py#L1.
\ No newline at end of file
+https://github.com/pandas-dev/pandas/blob/master/pandas/plotting/__init__.py#L1.
diff --git a/doc/source/user_guide/duplicates.rst b/doc/source/user_guide/duplicates.rst
index b65822fab2b23..2993ca7799510 100644
--- a/doc/source/user_guide/duplicates.rst
+++ b/doc/source/user_guide/duplicates.rst
@@ -29,8 +29,8 @@ duplicates present. The output can't be determined, and so pandas raises.
.. ipython:: python
:okexcept:
- s1 = pd.Series([0, 1, 2], index=['a', 'b', 'b'])
- s1.reindex(['a', 'b', 'c'])
+ s1 = pd.Series([0, 1, 2], index=["a", "b", "b"])
+ s1.reindex(["a", "b", "c"])
Other methods, like indexing, can give very surprising results. Typically
indexing with a scalar will *reduce dimensionality*. Slicing a ``DataFrame``
@@ -39,30 +39,30 @@ return a scalar. But with duplicates, this isn't the case.
.. ipython:: python
- df1 = pd.DataFrame([[0, 1, 2], [3, 4, 5]], columns=['A', 'A', 'B'])
+ df1 = pd.DataFrame([[0, 1, 2], [3, 4, 5]], columns=["A", "A", "B"])
df1
We have duplicates in the columns. If we slice ``'B'``, we get back a ``Series``
.. ipython:: python
- df1['B'] # a series
+ df1["B"] # a series
But slicing ``'A'`` returns a ``DataFrame``
.. ipython:: python
- df1['A'] # a DataFrame
+ df1["A"] # a DataFrame
This applies to row labels as well
.. ipython:: python
- df2 = pd.DataFrame({"A": [0, 1, 2]}, index=['a', 'a', 'b'])
+ df2 = pd.DataFrame({"A": [0, 1, 2]}, index=["a", "a", "b"])
df2
- df2.loc['b', 'A'] # a scalar
- df2.loc['a', 'A'] # a Series
+ df2.loc["b", "A"] # a scalar
+ df2.loc["a", "A"] # a Series
Duplicate Label Detection
~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -121,29 +121,24 @@ will be raised.
.. ipython:: python
:okexcept:
- pd.Series(
- [0, 1, 2],
- index=['a', 'b', 'b']
- ).set_flags(allows_duplicate_labels=False)
+ pd.Series([0, 1, 2], index=["a", "b", "b"]).set_flags(allows_duplicate_labels=False)
This applies to both row and column labels for a :class:`DataFrame`
.. ipython:: python
:okexcept:
- pd.DataFrame(
- [[0, 1, 2], [3, 4, 5]], columns=["A", "B", "C"],
- ).set_flags(allows_duplicate_labels=False)
+ pd.DataFrame([[0, 1, 2], [3, 4, 5]], columns=["A", "B", "C"],).set_flags(
+ allows_duplicate_labels=False
+ )
This attribute can be checked or set with :attr:`~DataFrame.flags.allows_duplicate_labels`,
which indicates whether that object can have duplicate labels.
.. ipython:: python
- df = (
- pd.DataFrame({"A": [0, 1, 2, 3]},
- index=['x', 'y', 'X', 'Y'])
- .set_flags(allows_duplicate_labels=False)
+ df = pd.DataFrame({"A": [0, 1, 2, 3]}, index=["x", "y", "X", "Y"]).set_flags(
+ allows_duplicate_labels=False
)
df
df.flags.allows_duplicate_labels
@@ -198,7 +193,7 @@ operations.
.. ipython:: python
:okexcept:
- s1 = pd.Series(0, index=['a', 'b']).set_flags(allows_duplicate_labels=False)
+ s1 = pd.Series(0, index=["a", "b"]).set_flags(allows_duplicate_labels=False)
s1
s1.head().rename({"a": "b"})
diff --git a/doc/source/user_guide/gotchas.rst b/doc/source/user_guide/gotchas.rst
index a96c70405d859..07c856c96426d 100644
--- a/doc/source/user_guide/gotchas.rst
+++ b/doc/source/user_guide/gotchas.rst
@@ -21,12 +21,19 @@ when calling :meth:`~DataFrame.info`:
.. ipython:: python
- dtypes = ['int64', 'float64', 'datetime64[ns]', 'timedelta64[ns]',
- 'complex128', 'object', 'bool']
+ dtypes = [
+ "int64",
+ "float64",
+ "datetime64[ns]",
+ "timedelta64[ns]",
+ "complex128",
+ "object",
+ "bool",
+ ]
n = 5000
data = {t: np.random.randint(100, size=n).astype(t) for t in dtypes}
df = pd.DataFrame(data)
- df['categorical'] = df['object'].astype('category')
+ df["categorical"] = df["object"].astype("category")
df.info()
@@ -40,7 +47,7 @@ as it can be expensive to do this deeper introspection.
.. ipython:: python
- df.info(memory_usage='deep')
+ df.info(memory_usage="deep")
By default the display option is set to ``True`` but can be explicitly
overridden by passing the ``memory_usage`` argument when invoking ``df.info()``.
@@ -155,7 +162,7 @@ index, not membership among the values.
.. ipython:: python
- s = pd.Series(range(5), index=list('abcde'))
+ s = pd.Series(range(5), index=list("abcde"))
2 in s
'b' in s
@@ -206,11 +213,11 @@ arrays. For example:
.. ipython:: python
- s = pd.Series([1, 2, 3, 4, 5], index=list('abcde'))
+ s = pd.Series([1, 2, 3, 4, 5], index=list("abcde"))
s
s.dtype
- s2 = s.reindex(['a', 'b', 'c', 'f', 'u'])
+ s2 = s.reindex(["a", "b", "c", "f", "u"])
s2
s2.dtype
@@ -227,12 +234,11 @@ the nullable-integer extension dtypes provided by pandas
.. ipython:: python
- s_int = pd.Series([1, 2, 3, 4, 5], index=list('abcde'),
- dtype=pd.Int64Dtype())
+ s_int = pd.Series([1, 2, 3, 4, 5], index=list("abcde"), dtype=pd.Int64Dtype())
s_int
s_int.dtype
- s2_int = s_int.reindex(['a', 'b', 'c', 'f', 'u'])
+ s2_int = s_int.reindex(["a", "b", "c", "f", "u"])
s2_int
s2_int.dtype
@@ -334,7 +340,7 @@ constructors using something similar to the following:
.. ipython:: python
- x = np.array(list(range(10)), '>i4') # big endian
+ x = np.array(list(range(10)), ">i4") # big endian
newx = x.byteswap().newbyteorder() # force native byteorder
s = pd.Series(newx)
diff --git a/doc/source/user_guide/scale.rst b/doc/source/user_guide/scale.rst
index 206d8dd0f4739..f36f27269a996 100644
--- a/doc/source/user_guide/scale.rst
+++ b/doc/source/user_guide/scale.rst
@@ -72,7 +72,7 @@ Option 1 loads in all the data and then filters to what we need.
.. ipython:: python
- columns = ['id_0', 'name_0', 'x_0', 'y_0']
+ columns = ["id_0", "name_0", "x_0", "y_0"]
pd.read_parquet("timeseries_wide.parquet")[columns]
@@ -123,7 +123,7 @@ space-efficient integers to know which specific name is used in each row.
.. ipython:: python
ts2 = ts.copy()
- ts2['name'] = ts2['name'].astype('category')
+ ts2["name"] = ts2["name"].astype("category")
ts2.memory_usage(deep=True)
We can go a bit further and downcast the numeric columns to their smallest types
@@ -131,8 +131,8 @@ using :func:`pandas.to_numeric`.
.. ipython:: python
- ts2['id'] = pd.to_numeric(ts2['id'], downcast='unsigned')
- ts2[['x', 'y']] = ts2[['x', 'y']].apply(pd.to_numeric, downcast='float')
+ ts2["id"] = pd.to_numeric(ts2["id"], downcast="unsigned")
+ ts2[["x", "y"]] = ts2[["x", "y"]].apply(pd.to_numeric, downcast="float")
ts2.dtypes
.. ipython:: python
@@ -141,8 +141,7 @@ using :func:`pandas.to_numeric`.
.. ipython:: python
- reduction = (ts2.memory_usage(deep=True).sum()
- / ts.memory_usage(deep=True).sum())
+ reduction = ts2.memory_usage(deep=True).sum() / ts.memory_usage(deep=True).sum()
print(f"{reduction:0.2f}")
In all, we've reduced the in-memory footprint of this dataset to 1/5 of its
@@ -174,13 +173,13 @@ files. Each file in the directory represents a different year of the entire data
import pathlib
N = 12
- starts = [f'20{i:>02d}-01-01' for i in range(N)]
- ends = [f'20{i:>02d}-12-13' for i in range(N)]
+ starts = [f"20{i:>02d}-01-01" for i in range(N)]
+ ends = [f"20{i:>02d}-12-13" for i in range(N)]
pathlib.Path("data/timeseries").mkdir(exist_ok=True)
for i, (start, end) in enumerate(zip(starts, ends)):
- ts = _make_timeseries(start=start, end=end, freq='1T', seed=i)
+ ts = _make_timeseries(start=start, end=end, freq="1T", seed=i)
ts.to_parquet(f"data/timeseries/ts-{i:0>2d}.parquet")
@@ -215,7 +214,7 @@ work for arbitrary-sized datasets.
# Only one dataframe is in memory at a time...
df = pd.read_parquet(path)
# ... plus a small Series ``counts``, which is updated.
- counts = counts.add(df['name'].value_counts(), fill_value=0)
+ counts = counts.add(df["name"].value_counts(), fill_value=0)
counts.astype(int)
Some readers, like :meth:`pandas.read_csv`, offer parameters to control the
@@ -278,8 +277,8 @@ Rather than executing immediately, doing operations build up a **task graph**.
.. ipython:: python
ddf
- ddf['name']
- ddf['name'].value_counts()
+ ddf["name"]
+ ddf["name"].value_counts()
Each of these calls is instant because the result isn't being computed yet.
We're just building up a list of computation to do when someone needs the
@@ -291,7 +290,7 @@ To get the actual result you can call ``.compute()``.
.. ipython:: python
- %time ddf['name'].value_counts().compute()
+ %time ddf["name"].value_counts().compute()
At that point, you get back the same thing you'd get with pandas, in this case
a concrete pandas Series with the count of each ``name``.
@@ -324,7 +323,7 @@ a familiar groupby aggregation.
.. ipython:: python
- %time ddf.groupby('name')[['x', 'y']].mean().compute().head()
+ %time ddf.groupby("name")[["x", "y"]].mean().compute().head()
The grouping and aggregation is done out-of-core and in parallel.
@@ -336,8 +335,8 @@ we need to supply the divisions manually.
.. ipython:: python
N = 12
- starts = [f'20{i:>02d}-01-01' for i in range(N)]
- ends = [f'20{i:>02d}-12-13' for i in range(N)]
+ starts = [f"20{i:>02d}-01-01" for i in range(N)]
+ ends = [f"20{i:>02d}-12-13" for i in range(N)]
divisions = tuple(pd.to_datetime(starts)) + (pd.Timestamp(ends[-1]),)
ddf.divisions = divisions
@@ -347,7 +346,7 @@ Now we can do things like fast random access with ``.loc``.
.. ipython:: python
- ddf.loc['2002-01-01 12:01':'2002-01-01 12:05'].compute()
+ ddf.loc["2002-01-01 12:01":"2002-01-01 12:05"].compute()
Dask knows to just look in the 3rd partition for selecting values in 2002. It
doesn't need to look at any other data.
@@ -362,7 +361,7 @@ out of memory. At that point it's just a regular pandas object.
:okwarning:
@savefig dask_resample.png
- ddf[['x', 'y']].resample("1D").mean().cumsum().compute().plot()
+ ddf[["x", "y"]].resample("1D").mean().cumsum().compute().plot()
These Dask examples have all be done using multiple processes on a single
machine. Dask can be `deployed on a cluster
| Partially addresses #36777.
Fixed the following files:
- `doc/source/development/extending.rst`
- `doc/source/user_guide/duplicates.rst`
- `doc/source/user_guide/gotchas.rst`
- `doc/source/user_guide/scale.rst` | https://api.github.com/repos/pandas-dev/pandas/pulls/36824 | 2020-10-03T04:34:51Z | 2020-10-03T14:35:03Z | 2020-10-03T14:35:03Z | 2020-10-06T12:21:29Z |
DOC: update code style for user guide for #36777 | diff --git a/doc/source/user_guide/groupby.rst b/doc/source/user_guide/groupby.rst
index 52342de98de79..9696f14f03b56 100644
--- a/doc/source/user_guide/groupby.rst
+++ b/doc/source/user_guide/groupby.rst
@@ -68,19 +68,23 @@ object (more on what the GroupBy object is later), you may do the following:
.. ipython:: python
- df = pd.DataFrame([('bird', 'Falconiformes', 389.0),
- ('bird', 'Psittaciformes', 24.0),
- ('mammal', 'Carnivora', 80.2),
- ('mammal', 'Primates', np.nan),
- ('mammal', 'Carnivora', 58)],
- index=['falcon', 'parrot', 'lion', 'monkey', 'leopard'],
- columns=('class', 'order', 'max_speed'))
+ df = pd.DataFrame(
+ [
+ ("bird", "Falconiformes", 389.0),
+ ("bird", "Psittaciformes", 24.0),
+ ("mammal", "Carnivora", 80.2),
+ ("mammal", "Primates", np.nan),
+ ("mammal", "Carnivora", 58),
+ ],
+ index=["falcon", "parrot", "lion", "monkey", "leopard"],
+ columns=("class", "order", "max_speed"),
+ )
df
# default is axis=0
- grouped = df.groupby('class')
- grouped = df.groupby('order', axis='columns')
- grouped = df.groupby(['class', 'order'])
+ grouped = df.groupby("class")
+ grouped = df.groupby("order", axis="columns")
+ grouped = df.groupby(["class", "order"])
The mapping can be specified many different ways:
@@ -103,12 +107,14 @@ consider the following ``DataFrame``:
.. ipython:: python
- df = pd.DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
- 'foo', 'bar', 'foo', 'foo'],
- 'B': ['one', 'one', 'two', 'three',
- 'two', 'two', 'one', 'three'],
- 'C': np.random.randn(8),
- 'D': np.random.randn(8)})
+ df = pd.DataFrame(
+ {
+ "A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
+ "B": ["one", "one", "two", "three", "two", "two", "one", "three"],
+ "C": np.random.randn(8),
+ "D": np.random.randn(8),
+ }
+ )
df
On a DataFrame, we obtain a GroupBy object by calling :meth:`~DataFrame.groupby`.
@@ -116,8 +122,8 @@ We could naturally group by either the ``A`` or ``B`` columns, or both:
.. ipython:: python
- grouped = df.groupby('A')
- grouped = df.groupby(['A', 'B'])
+ grouped = df.groupby("A")
+ grouped = df.groupby(["A", "B"])
.. versionadded:: 0.24
@@ -126,8 +132,8 @@ but the specified columns
.. ipython:: python
- df2 = df.set_index(['A', 'B'])
- grouped = df2.groupby(level=df2.index.names.difference(['B']))
+ df2 = df.set_index(["A", "B"])
+ grouped = df2.groupby(level=df2.index.names.difference(["B"]))
grouped.sum()
These will split the DataFrame on its index (rows). We could also split by the
@@ -181,9 +187,9 @@ By default the group keys are sorted during the ``groupby`` operation. You may h
.. ipython:: python
- df2 = pd.DataFrame({'X': ['B', 'B', 'A', 'A'], 'Y': [1, 2, 3, 4]})
- df2.groupby(['X']).sum()
- df2.groupby(['X'], sort=False).sum()
+ df2 = pd.DataFrame({"X": ["B", "B", "A", "A"], "Y": [1, 2, 3, 4]})
+ df2.groupby(["X"]).sum()
+ df2.groupby(["X"], sort=False).sum()
Note that ``groupby`` will preserve the order in which *observations* are sorted *within* each group.
@@ -191,10 +197,10 @@ For example, the groups created by ``groupby()`` below are in the order they app
.. ipython:: python
- df3 = pd.DataFrame({'X': ['A', 'B', 'A', 'B'], 'Y': [1, 4, 3, 2]})
- df3.groupby(['X']).get_group('A')
+ df3 = pd.DataFrame({"X": ["A", "B", "A", "B"], "Y": [1, 4, 3, 2]})
+ df3.groupby(["X"]).get_group("A")
- df3.groupby(['X']).get_group('B')
+ df3.groupby(["X"]).get_group("B")
.. _groupby.dropna:
@@ -236,7 +242,7 @@ above example we have:
.. ipython:: python
- df.groupby('A').groups
+ df.groupby("A").groups
df.groupby(get_letter_type, axis=1).groups
Calling the standard Python ``len`` function on the GroupBy object just returns
@@ -244,7 +250,7 @@ the length of the ``groups`` dict, so it is largely just a convenience:
.. ipython:: python
- grouped = df.groupby(['A', 'B'])
+ grouped = df.groupby(["A", "B"])
grouped.groups
len(grouped)
@@ -259,15 +265,14 @@ the length of the ``groups`` dict, so it is largely just a convenience:
n = 10
weight = np.random.normal(166, 20, size=n)
height = np.random.normal(60, 10, size=n)
- time = pd.date_range('1/1/2000', periods=n)
- gender = np.random.choice(['male', 'female'], size=n)
- df = pd.DataFrame({'height': height, 'weight': weight,
- 'gender': gender}, index=time)
+ time = pd.date_range("1/1/2000", periods=n)
+ gender = np.random.choice(["male", "female"], size=n)
+ df = pd.DataFrame({"height": height, "weight": weight, "gender": gender}, index=time)
.. ipython:: python
df
- gb = df.groupby('gender')
+ gb = df.groupby("gender")
.. ipython::
@@ -291,9 +296,11 @@ Let's create a Series with a two-level ``MultiIndex``.
.. ipython:: python
- arrays = [['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'],
- ['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
- index = pd.MultiIndex.from_arrays(arrays, names=['first', 'second'])
+ arrays = [
+ ["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
+ ["one", "two", "one", "two", "one", "two", "one", "two"],
+ ]
+ index = pd.MultiIndex.from_arrays(arrays, names=["first", "second"])
s = pd.Series(np.random.randn(8), index=index)
s
@@ -309,7 +316,7 @@ number:
.. ipython:: python
- s.groupby(level='second').sum()
+ s.groupby(level="second").sum()
The aggregation functions such as ``sum`` will take the level parameter
directly. Additionally, the resulting index will be named according to the
@@ -317,30 +324,32 @@ chosen level:
.. ipython:: python
- s.sum(level='second')
+ s.sum(level="second")
Grouping with multiple levels is supported.
.. ipython:: python
:suppress:
- arrays = [['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'],
- ['doo', 'doo', 'bee', 'bee', 'bop', 'bop', 'bop', 'bop'],
- ['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
+ arrays = [
+ ["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
+ ["doo", "doo", "bee", "bee", "bop", "bop", "bop", "bop"],
+ ["one", "two", "one", "two", "one", "two", "one", "two"],
+ ]
tuples = list(zip(*arrays))
- index = pd.MultiIndex.from_tuples(tuples, names=['first', 'second', 'third'])
+ index = pd.MultiIndex.from_tuples(tuples, names=["first", "second", "third"])
s = pd.Series(np.random.randn(8), index=index)
.. ipython:: python
s
- s.groupby(level=['first', 'second']).sum()
+ s.groupby(level=["first", "second"]).sum()
Index level names may be supplied as keys.
.. ipython:: python
- s.groupby(['first', 'second']).sum()
+ s.groupby(["first", "second"]).sum()
More on the ``sum`` function and aggregation later.
@@ -352,14 +361,14 @@ objects.
.. ipython:: python
- arrays = [['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'],
- ['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
+ arrays = [
+ ["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
+ ["one", "two", "one", "two", "one", "two", "one", "two"],
+ ]
- index = pd.MultiIndex.from_arrays(arrays, names=['first', 'second'])
+ index = pd.MultiIndex.from_arrays(arrays, names=["first", "second"])
- df = pd.DataFrame({'A': [1, 1, 1, 1, 2, 2, 3, 3],
- 'B': np.arange(8)},
- index=index)
+ df = pd.DataFrame({"A": [1, 1, 1, 1, 2, 2, 3, 3], "B": np.arange(8)}, index=index)
df
@@ -368,19 +377,19 @@ the ``A`` column.
.. ipython:: python
- df.groupby([pd.Grouper(level=1), 'A']).sum()
+ df.groupby([pd.Grouper(level=1), "A"]).sum()
Index levels may also be specified by name.
.. ipython:: python
- df.groupby([pd.Grouper(level='second'), 'A']).sum()
+ df.groupby([pd.Grouper(level="second"), "A"]).sum()
Index level names may be specified as keys directly to ``groupby``.
.. ipython:: python
- df.groupby(['second', 'A']).sum()
+ df.groupby(["second", "A"]).sum()
DataFrame column selection in GroupBy
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -392,24 +401,26 @@ getting a column from a DataFrame, you can do:
.. ipython:: python
:suppress:
- df = pd.DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
- 'foo', 'bar', 'foo', 'foo'],
- 'B': ['one', 'one', 'two', 'three',
- 'two', 'two', 'one', 'three'],
- 'C': np.random.randn(8),
- 'D': np.random.randn(8)})
+ df = pd.DataFrame(
+ {
+ "A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
+ "B": ["one", "one", "two", "three", "two", "two", "one", "three"],
+ "C": np.random.randn(8),
+ "D": np.random.randn(8),
+ }
+ )
.. ipython:: python
- grouped = df.groupby(['A'])
- grouped_C = grouped['C']
- grouped_D = grouped['D']
+ grouped = df.groupby(["A"])
+ grouped_C = grouped["C"]
+ grouped_D = grouped["D"]
This is mainly syntactic sugar for the alternative and much more verbose:
.. ipython:: python
- df['C'].groupby(df['A'])
+ df["C"].groupby(df["A"])
Additionally this method avoids recomputing the internal grouping information
derived from the passed key.
@@ -450,13 +461,13 @@ A single group can be selected using
.. ipython:: python
- grouped.get_group('bar')
+ grouped.get_group("bar")
Or for an object grouped on multiple columns:
.. ipython:: python
- df.groupby(['A', 'B']).get_group(('bar', 'one'))
+ df.groupby(["A", "B"]).get_group(("bar", "one"))
.. _groupby.aggregate:
@@ -474,10 +485,10 @@ An obvious one is aggregation via the
.. ipython:: python
- grouped = df.groupby('A')
+ grouped = df.groupby("A")
grouped.aggregate(np.sum)
- grouped = df.groupby(['A', 'B'])
+ grouped = df.groupby(["A", "B"])
grouped.aggregate(np.sum)
As you can see, the result of the aggregation will have the group names as the
@@ -487,17 +498,17 @@ changed by using the ``as_index`` option:
.. ipython:: python
- grouped = df.groupby(['A', 'B'], as_index=False)
+ grouped = df.groupby(["A", "B"], as_index=False)
grouped.aggregate(np.sum)
- df.groupby('A', as_index=False).sum()
+ df.groupby("A", as_index=False).sum()
Note that you could use the ``reset_index`` DataFrame function to achieve the
same result as the column names are stored in the resulting ``MultiIndex``:
.. ipython:: python
- df.groupby(['A', 'B']).sum().reset_index()
+ df.groupby(["A", "B"]).sum().reset_index()
Another simple aggregation example is to compute the size of each group.
This is included in GroupBy as the ``size`` method. It returns a Series whose
@@ -559,8 +570,8 @@ aggregation with, outputting a DataFrame:
.. ipython:: python
- grouped = df.groupby('A')
- grouped['C'].agg([np.sum, np.mean, np.std])
+ grouped = df.groupby("A")
+ grouped["C"].agg([np.sum, np.mean, np.std])
On a grouped ``DataFrame``, you can pass a list of functions to apply to each
column, which produces an aggregated result with a hierarchical index:
@@ -575,19 +586,21 @@ need to rename, then you can add in a chained operation for a ``Series`` like th
.. ipython:: python
- (grouped['C'].agg([np.sum, np.mean, np.std])
- .rename(columns={'sum': 'foo',
- 'mean': 'bar',
- 'std': 'baz'}))
+ (
+ grouped["C"]
+ .agg([np.sum, np.mean, np.std])
+ .rename(columns={"sum": "foo", "mean": "bar", "std": "baz"})
+ )
For a grouped ``DataFrame``, you can rename in a similar manner:
.. ipython:: python
- (grouped.agg([np.sum, np.mean, np.std])
- .rename(columns={'sum': 'foo',
- 'mean': 'bar',
- 'std': 'baz'}))
+ (
+ grouped.agg([np.sum, np.mean, np.std]).rename(
+ columns={"sum": "foo", "mean": "bar", "std": "baz"}
+ )
+ )
.. note::
@@ -598,7 +611,7 @@ For a grouped ``DataFrame``, you can rename in a similar manner:
.. ipython:: python
:okexcept:
- grouped['C'].agg(['sum', 'sum'])
+ grouped["C"].agg(["sum", "sum"])
Pandas *does* allow you to provide multiple lambdas. In this case, pandas
@@ -607,8 +620,7 @@ For a grouped ``DataFrame``, you can rename in a similar manner:
.. ipython:: python
- grouped['C'].agg([lambda x: x.max() - x.min(),
- lambda x: x.median() - x.mean()])
+ grouped["C"].agg([lambda x: x.max() - x.min(), lambda x: x.median() - x.mean()])
@@ -631,15 +643,19 @@ accepts the special syntax in :meth:`GroupBy.agg`, known as "named aggregation",
.. ipython:: python
- animals = pd.DataFrame({'kind': ['cat', 'dog', 'cat', 'dog'],
- 'height': [9.1, 6.0, 9.5, 34.0],
- 'weight': [7.9, 7.5, 9.9, 198.0]})
+ animals = pd.DataFrame(
+ {
+ "kind": ["cat", "dog", "cat", "dog"],
+ "height": [9.1, 6.0, 9.5, 34.0],
+ "weight": [7.9, 7.5, 9.9, 198.0],
+ }
+ )
animals
animals.groupby("kind").agg(
- min_height=pd.NamedAgg(column='height', aggfunc='min'),
- max_height=pd.NamedAgg(column='height', aggfunc='max'),
- average_weight=pd.NamedAgg(column='weight', aggfunc=np.mean),
+ min_height=pd.NamedAgg(column="height", aggfunc="min"),
+ max_height=pd.NamedAgg(column="height", aggfunc="max"),
+ average_weight=pd.NamedAgg(column="weight", aggfunc=np.mean),
)
@@ -648,9 +664,9 @@ accepts the special syntax in :meth:`GroupBy.agg`, known as "named aggregation",
.. ipython:: python
animals.groupby("kind").agg(
- min_height=('height', 'min'),
- max_height=('height', 'max'),
- average_weight=('weight', np.mean),
+ min_height=("height", "min"),
+ max_height=("height", "max"),
+ average_weight=("weight", np.mean),
)
@@ -659,9 +675,11 @@ and unpack the keyword arguments
.. ipython:: python
- animals.groupby("kind").agg(**{
- 'total weight': pd.NamedAgg(column='weight', aggfunc=sum),
- })
+ animals.groupby("kind").agg(
+ **{
+ "total weight": pd.NamedAgg(column="weight", aggfunc=sum),
+ }
+ )
Additional keyword arguments are not passed through to the aggregation functions. Only pairs
of ``(column, aggfunc)`` should be passed as ``**kwargs``. If your aggregation functions
@@ -680,8 +698,8 @@ no column selection, so the values are just the functions.
.. ipython:: python
animals.groupby("kind").height.agg(
- min_height='min',
- max_height='max',
+ min_height="min",
+ max_height="max",
)
Applying different functions to DataFrame columns
@@ -692,8 +710,7 @@ columns of a DataFrame:
.. ipython:: python
- grouped.agg({'C': np.sum,
- 'D': lambda x: np.std(x, ddof=1)})
+ grouped.agg({"C": np.sum, "D": lambda x: np.std(x, ddof=1)})
The function names can also be strings. In order for a string to be valid it
must be either implemented on GroupBy or available via :ref:`dispatching
@@ -701,7 +718,7 @@ must be either implemented on GroupBy or available via :ref:`dispatching
.. ipython:: python
- grouped.agg({'C': 'sum', 'D': 'std'})
+ grouped.agg({"C": "sum", "D": "std"})
.. _groupby.aggregate.cython:
@@ -713,8 +730,8 @@ optimized Cython implementations:
.. ipython:: python
- df.groupby('A').sum()
- df.groupby(['A', 'B']).mean()
+ df.groupby("A").sum()
+ df.groupby(["A", "B"]).mean()
Of course ``sum`` and ``mean`` are implemented on pandas objects, so the above
code would work even without the special versions via dispatching (see below).
@@ -743,15 +760,14 @@ For example, suppose we wished to standardize the data within each group:
.. ipython:: python
- index = pd.date_range('10/1/1999', periods=1100)
+ index = pd.date_range("10/1/1999", periods=1100)
ts = pd.Series(np.random.normal(0.5, 2, 1100), index)
ts = ts.rolling(window=100, min_periods=100).mean().dropna()
ts.head()
ts.tail()
- transformed = (ts.groupby(lambda x: x.year)
- .transform(lambda x: (x - x.mean()) / x.std()))
+ transformed = ts.groupby(lambda x: x.year).transform(lambda x: (x - x.mean()) / x.std())
We would expect the result to now have mean 0 and standard deviation 1 within
each group, which we can easily check:
@@ -772,7 +788,7 @@ We can also visually compare the original and transformed data sets.
.. ipython:: python
- compare = pd.DataFrame({'Original': ts, 'Transformed': transformed})
+ compare = pd.DataFrame({"Original": ts, "Transformed": transformed})
@savefig groupby_transform_plot.png
compare.plot()
@@ -788,8 +804,8 @@ Alternatively, the built-in methods could be used to produce the same outputs.
.. ipython:: python
- max = ts.groupby(lambda x: x.year).transform('max')
- min = ts.groupby(lambda x: x.year).transform('min')
+ max = ts.groupby(lambda x: x.year).transform("max")
+ min = ts.groupby(lambda x: x.year).transform("min")
max - min
@@ -798,7 +814,7 @@ Another common data transform is to replace missing data with the group mean.
.. ipython:: python
:suppress:
- cols = ['A', 'B', 'C']
+ cols = ["A", "B", "C"]
values = np.random.randn(1000, 3)
values[np.random.randint(0, 1000, 100), 0] = np.nan
values[np.random.randint(0, 1000, 50), 1] = np.nan
@@ -809,7 +825,7 @@ Another common data transform is to replace missing data with the group mean.
data_df
- countries = np.array(['US', 'UK', 'GR', 'JP'])
+ countries = np.array(["US", "UK", "GR", "JP"])
key = countries[np.random.randint(0, 4, 1000)]
grouped = data_df.groupby(key)
@@ -859,11 +875,10 @@ the column B based on the groups of column A.
.. ipython:: python
- df_re = pd.DataFrame({'A': [1] * 10 + [5] * 10,
- 'B': np.arange(20)})
+ df_re = pd.DataFrame({"A": [1] * 10 + [5] * 10, "B": np.arange(20)})
df_re
- df_re.groupby('A').rolling(4).B.mean()
+ df_re.groupby("A").rolling(4).B.mean()
The ``expanding()`` method will accumulate a given operation
@@ -872,7 +887,7 @@ group.
.. ipython:: python
- df_re.groupby('A').expanding().sum()
+ df_re.groupby("A").expanding().sum()
Suppose you want to use the ``resample()`` method to get a daily
@@ -881,13 +896,16 @@ missing values with the ``ffill()`` method.
.. ipython:: python
- df_re = pd.DataFrame({'date': pd.date_range(start='2016-01-01', periods=4,
- freq='W'),
- 'group': [1, 1, 2, 2],
- 'val': [5, 6, 7, 8]}).set_index('date')
+ df_re = pd.DataFrame(
+ {
+ "date": pd.date_range(start="2016-01-01", periods=4, freq="W"),
+ "group": [1, 1, 2, 2],
+ "val": [5, 6, 7, 8],
+ }
+ ).set_index("date")
df_re
- df_re.groupby('group').resample('1D').ffill()
+ df_re.groupby("group").resample("1D").ffill()
.. _groupby.filter:
@@ -911,8 +929,8 @@ with only a couple members.
.. ipython:: python
- dff = pd.DataFrame({'A': np.arange(8), 'B': list('aabbbbcc')})
- dff.groupby('B').filter(lambda x: len(x) > 2)
+ dff = pd.DataFrame({"A": np.arange(8), "B": list("aabbbbcc")})
+ dff.groupby("B").filter(lambda x: len(x) > 2)
Alternatively, instead of dropping the offending groups, we can return a
like-indexed objects where the groups that do not pass the filter are filled
@@ -920,14 +938,14 @@ with NaNs.
.. ipython:: python
- dff.groupby('B').filter(lambda x: len(x) > 2, dropna=False)
+ dff.groupby("B").filter(lambda x: len(x) > 2, dropna=False)
For DataFrames with multiple columns, filters should explicitly specify a column as the filter criterion.
.. ipython:: python
- dff['C'] = np.arange(8)
- dff.groupby('B').filter(lambda x: len(x['C']) > 2)
+ dff["C"] = np.arange(8)
+ dff.groupby("B").filter(lambda x: len(x["C"]) > 2)
.. note::
@@ -939,7 +957,7 @@ For DataFrames with multiple columns, filters should explicitly specify a column
.. ipython:: python
- dff.groupby('B').head(2)
+ dff.groupby("B").head(2)
.. _groupby.dispatch:
@@ -953,7 +971,7 @@ functions:
.. ipython:: python
- grouped = df.groupby('A')
+ grouped = df.groupby("A")
grouped.agg(lambda x: x.std())
But, it's rather verbose and can be untidy if you need to pass additional
@@ -973,12 +991,14 @@ next). This enables some operations to be carried out rather succinctly:
.. ipython:: python
- tsdf = pd.DataFrame(np.random.randn(1000, 3),
- index=pd.date_range('1/1/2000', periods=1000),
- columns=['A', 'B', 'C'])
+ tsdf = pd.DataFrame(
+ np.random.randn(1000, 3),
+ index=pd.date_range("1/1/2000", periods=1000),
+ columns=["A", "B", "C"],
+ )
tsdf.iloc[::2] = np.nan
grouped = tsdf.groupby(lambda x: x.year)
- grouped.fillna(method='pad')
+ grouped.fillna(method="pad")
In this example, we chopped the collection of time series into yearly chunks
then independently called :ref:`fillna <missing_data.fillna>` on the
@@ -989,7 +1009,7 @@ The ``nlargest`` and ``nsmallest`` methods work on ``Series`` style groupbys:
.. ipython:: python
s = pd.Series([9, 8, 7, 5, 19, 1, 4.2, 3.3])
- g = pd.Series(list('abababab'))
+ g = pd.Series(list("abababab"))
gb = s.groupby(g)
gb.nlargest(3)
gb.nsmallest(3)
@@ -1008,10 +1028,10 @@ for both ``aggregate`` and ``transform`` in many standard use cases. However,
.. ipython:: python
df
- grouped = df.groupby('A')
+ grouped = df.groupby("A")
# could also just call .describe()
- grouped['C'].apply(lambda x: x.describe())
+ grouped["C"].apply(lambda x: x.describe())
The dimension of the returned result can also change:
@@ -1032,7 +1052,8 @@ that is itself a series, and possibly upcast the result to a DataFrame:
.. ipython:: python
def f(x):
- return pd.Series([x, x ** 2], index=['x', 'x^2'])
+ return pd.Series([x, x ** 2], index=["x", "x^2"])
+
s = pd.Series(np.random.rand(5))
s
@@ -1133,7 +1154,7 @@ will be (silently) dropped. Thus, this does not pose any problems:
.. ipython:: python
- df.groupby('A').std()
+ df.groupby("A").std()
Note that ``df.groupby('A').colname.std().`` is more efficient than
``df.groupby('A').std().colname``, so if the result of an aggregation function
@@ -1151,23 +1172,29 @@ is only interesting over one column (here ``colname``), it may be filtered
.. ipython:: python
from decimal import Decimal
+
df_dec = pd.DataFrame(
- {'id': [1, 2, 1, 2],
- 'int_column': [1, 2, 3, 4],
- 'dec_column': [Decimal('0.50'), Decimal('0.15'),
- Decimal('0.25'), Decimal('0.40')]
- }
+ {
+ "id": [1, 2, 1, 2],
+ "int_column": [1, 2, 3, 4],
+ "dec_column": [
+ Decimal("0.50"),
+ Decimal("0.15"),
+ Decimal("0.25"),
+ Decimal("0.40"),
+ ],
+ }
)
# Decimal columns can be sum'd explicitly by themselves...
- df_dec.groupby(['id'])[['dec_column']].sum()
+ df_dec.groupby(["id"])[["dec_column"]].sum()
# ...but cannot be combined with standard data types or they will be excluded
- df_dec.groupby(['id'])[['int_column', 'dec_column']].sum()
+ df_dec.groupby(["id"])[["int_column", "dec_column"]].sum()
# Use .agg function to aggregate over standard and "nuisance" data types
# at the same time
- df_dec.groupby(['id']).agg({'int_column': 'sum', 'dec_column': 'sum'})
+ df_dec.groupby(["id"]).agg({"int_column": "sum", "dec_column": "sum"})
.. _groupby.observed:
@@ -1182,25 +1209,27 @@ Show all values:
.. ipython:: python
- pd.Series([1, 1, 1]).groupby(pd.Categorical(['a', 'a', 'a'],
- categories=['a', 'b']),
- observed=False).count()
+ pd.Series([1, 1, 1]).groupby(
+ pd.Categorical(["a", "a", "a"], categories=["a", "b"]), observed=False
+ ).count()
Show only the observed values:
.. ipython:: python
- pd.Series([1, 1, 1]).groupby(pd.Categorical(['a', 'a', 'a'],
- categories=['a', 'b']),
- observed=True).count()
+ pd.Series([1, 1, 1]).groupby(
+ pd.Categorical(["a", "a", "a"], categories=["a", "b"]), observed=True
+ ).count()
The returned dtype of the grouped will *always* include *all* of the categories that were grouped.
.. ipython:: python
- s = pd.Series([1, 1, 1]).groupby(pd.Categorical(['a', 'a', 'a'],
- categories=['a', 'b']),
- observed=False).count()
+ s = (
+ pd.Series([1, 1, 1])
+ .groupby(pd.Categorical(["a", "a", "a"], categories=["a", "b"]), observed=False)
+ .count()
+ )
s.index.dtype
.. _groupby.missing:
@@ -1224,7 +1253,7 @@ can be used as group keys. If so, the order of the levels will be preserved:
data = pd.Series(np.random.randn(100))
- factor = pd.qcut(data, [0, .25, .5, .75, 1.])
+ factor = pd.qcut(data, [0, 0.25, 0.5, 0.75, 1.0])
data.groupby(factor).mean()
@@ -1240,19 +1269,23 @@ use the ``pd.Grouper`` to provide this local control.
import datetime
- df = pd.DataFrame({'Branch': 'A A A A A A A B'.split(),
- 'Buyer': 'Carl Mark Carl Carl Joe Joe Joe Carl'.split(),
- 'Quantity': [1, 3, 5, 1, 8, 1, 9, 3],
- 'Date': [
- datetime.datetime(2013, 1, 1, 13, 0),
- datetime.datetime(2013, 1, 1, 13, 5),
- datetime.datetime(2013, 10, 1, 20, 0),
- datetime.datetime(2013, 10, 2, 10, 0),
- datetime.datetime(2013, 10, 1, 20, 0),
- datetime.datetime(2013, 10, 2, 10, 0),
- datetime.datetime(2013, 12, 2, 12, 0),
- datetime.datetime(2013, 12, 2, 14, 0)]
- })
+ df = pd.DataFrame(
+ {
+ "Branch": "A A A A A A A B".split(),
+ "Buyer": "Carl Mark Carl Carl Joe Joe Joe Carl".split(),
+ "Quantity": [1, 3, 5, 1, 8, 1, 9, 3],
+ "Date": [
+ datetime.datetime(2013, 1, 1, 13, 0),
+ datetime.datetime(2013, 1, 1, 13, 5),
+ datetime.datetime(2013, 10, 1, 20, 0),
+ datetime.datetime(2013, 10, 2, 10, 0),
+ datetime.datetime(2013, 10, 1, 20, 0),
+ datetime.datetime(2013, 10, 2, 10, 0),
+ datetime.datetime(2013, 12, 2, 12, 0),
+ datetime.datetime(2013, 12, 2, 14, 0),
+ ],
+ }
+ )
df
@@ -1260,18 +1293,18 @@ Groupby a specific column with the desired frequency. This is like resampling.
.. ipython:: python
- df.groupby([pd.Grouper(freq='1M', key='Date'), 'Buyer']).sum()
+ df.groupby([pd.Grouper(freq="1M", key="Date"), "Buyer"]).sum()
You have an ambiguous specification in that you have a named index and a column
that could be potential groupers.
.. ipython:: python
- df = df.set_index('Date')
- df['Date'] = df.index + pd.offsets.MonthEnd(2)
- df.groupby([pd.Grouper(freq='6M', key='Date'), 'Buyer']).sum()
+ df = df.set_index("Date")
+ df["Date"] = df.index + pd.offsets.MonthEnd(2)
+ df.groupby([pd.Grouper(freq="6M", key="Date"), "Buyer"]).sum()
- df.groupby([pd.Grouper(freq='6M', level='Date'), 'Buyer']).sum()
+ df.groupby([pd.Grouper(freq="6M", level="Date"), "Buyer"]).sum()
Taking the first rows of each group
@@ -1281,10 +1314,10 @@ Just like for a DataFrame or Series you can call head and tail on a groupby:
.. ipython:: python
- df = pd.DataFrame([[1, 2], [1, 4], [5, 6]], columns=['A', 'B'])
+ df = pd.DataFrame([[1, 2], [1, 4], [5, 6]], columns=["A", "B"])
df
- g = df.groupby('A')
+ g = df.groupby("A")
g.head(1)
g.tail(1)
@@ -1302,8 +1335,8 @@ will return a single row (or no row) per group if you pass an int for n:
.. ipython:: python
- df = pd.DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=['A', 'B'])
- g = df.groupby('A')
+ df = pd.DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=["A", "B"])
+ g = df.groupby("A")
g.nth(0)
g.nth(-1)
@@ -1314,21 +1347,21 @@ If you want to select the nth not-null item, use the ``dropna`` kwarg. For a Dat
.. ipython:: python
# nth(0) is the same as g.first()
- g.nth(0, dropna='any')
+ g.nth(0, dropna="any")
g.first()
# nth(-1) is the same as g.last()
- g.nth(-1, dropna='any') # NaNs denote group exhausted when using dropna
+ g.nth(-1, dropna="any") # NaNs denote group exhausted when using dropna
g.last()
- g.B.nth(0, dropna='all')
+ g.B.nth(0, dropna="all")
As with other methods, passing ``as_index=False``, will achieve a filtration, which returns the grouped row.
.. ipython:: python
- df = pd.DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=['A', 'B'])
- g = df.groupby('A', as_index=False)
+ df = pd.DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=["A", "B"])
+ g = df.groupby("A", as_index=False)
g.nth(0)
g.nth(-1)
@@ -1337,8 +1370,8 @@ You can also select multiple rows from each group by specifying multiple nth val
.. ipython:: python
- business_dates = pd.date_range(start='4/1/2014', end='6/30/2014', freq='B')
- df = pd.DataFrame(1, index=business_dates, columns=['a', 'b'])
+ business_dates = pd.date_range(start="4/1/2014", end="6/30/2014", freq="B")
+ df = pd.DataFrame(1, index=business_dates, columns=["a", "b"])
# get the first, 4th, and last date index for each month
df.groupby([df.index.year, df.index.month]).nth([0, 3, -1])
@@ -1350,12 +1383,12 @@ To see the order in which each row appears within its group, use the
.. ipython:: python
- dfg = pd.DataFrame(list('aaabba'), columns=['A'])
+ dfg = pd.DataFrame(list("aaabba"), columns=["A"])
dfg
- dfg.groupby('A').cumcount()
+ dfg.groupby("A").cumcount()
- dfg.groupby('A').cumcount(ascending=False)
+ dfg.groupby("A").cumcount(ascending=False)
.. _groupby.ngroup:
@@ -1374,12 +1407,12 @@ order they are first observed.
.. ipython:: python
- dfg = pd.DataFrame(list('aaabba'), columns=['A'])
+ dfg = pd.DataFrame(list("aaabba"), columns=["A"])
dfg
- dfg.groupby('A').ngroup()
+ dfg.groupby("A").ngroup()
- dfg.groupby('A').ngroup(ascending=False)
+ dfg.groupby("A").ngroup(ascending=False)
Plotting
~~~~~~~~
@@ -1392,8 +1425,8 @@ the values in column 1 where the group is "B" are 3 higher on average.
np.random.seed(1234)
df = pd.DataFrame(np.random.randn(50, 2))
- df['g'] = np.random.choice(['A', 'B'], size=50)
- df.loc[df['g'] == 'B', 1] += 3
+ df["g"] = np.random.choice(["A", "B"], size=50)
+ df.loc[df["g"] == "B", 1] += 3
We can easily visualize this with a boxplot:
@@ -1401,7 +1434,7 @@ We can easily visualize this with a boxplot:
:okwarning:
@savefig groupby_boxplot.png
- df.groupby('g').boxplot()
+ df.groupby("g").boxplot()
The result of calling ``boxplot`` is a dictionary whose keys are the values
of our grouping column ``g`` ("A" and "B"). The values of the resulting dictionary
@@ -1436,20 +1469,26 @@ code more readable. First we set the data:
.. ipython:: python
n = 1000
- df = pd.DataFrame({'Store': np.random.choice(['Store_1', 'Store_2'], n),
- 'Product': np.random.choice(['Product_1',
- 'Product_2'], n),
- 'Revenue': (np.random.random(n) * 50 + 10).round(2),
- 'Quantity': np.random.randint(1, 10, size=n)})
+ df = pd.DataFrame(
+ {
+ "Store": np.random.choice(["Store_1", "Store_2"], n),
+ "Product": np.random.choice(["Product_1", "Product_2"], n),
+ "Revenue": (np.random.random(n) * 50 + 10).round(2),
+ "Quantity": np.random.randint(1, 10, size=n),
+ }
+ )
df.head(2)
Now, to find prices per store/product, we can simply do:
.. ipython:: python
- (df.groupby(['Store', 'Product'])
- .pipe(lambda grp: grp.Revenue.sum() / grp.Quantity.sum())
- .unstack().round(2))
+ (
+ df.groupby(["Store", "Product"])
+ .pipe(lambda grp: grp.Revenue.sum() / grp.Quantity.sum())
+ .unstack()
+ .round(2)
+ )
Piping can also be expressive when you want to deliver a grouped object to some
arbitrary function, for example:
@@ -1459,7 +1498,8 @@ arbitrary function, for example:
def mean(groupby):
return groupby.mean()
- df.groupby(['Store', 'Product']).pipe(mean)
+
+ df.groupby(["Store", "Product"]).pipe(mean)
where ``mean`` takes a GroupBy object and finds the mean of the Revenue and Quantity
columns respectively for each Store-Product combination. The ``mean`` function can
@@ -1476,8 +1516,7 @@ Regroup columns of a DataFrame according to their sum, and sum the aggregated on
.. ipython:: python
- df = pd.DataFrame({'a': [1, 0, 0], 'b': [0, 1, 0],
- 'c': [1, 0, 0], 'd': [2, 3, 4]})
+ df = pd.DataFrame({"a": [1, 0, 0], "b": [0, 1, 0], "c": [1, 0, 0], "d": [2, 3, 4]})
df
df.groupby(df.sum(), axis=1).sum()
@@ -1536,16 +1575,22 @@ column index name will be used as the name of the inserted column:
.. ipython:: python
- df = pd.DataFrame({'a': [0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2],
- 'b': [0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1],
- 'c': [1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0],
- 'd': [0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1]})
+ df = pd.DataFrame(
+ {
+ "a": [0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2],
+ "b": [0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1],
+ "c": [1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0],
+ "d": [0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1],
+ }
+ )
+
def compute_metrics(x):
- result = {'b_sum': x['b'].sum(), 'c_mean': x['c'].mean()}
- return pd.Series(result, name='metrics')
+ result = {"b_sum": x["b"].sum(), "c_mean": x["c"].mean()}
+ return pd.Series(result, name="metrics")
+
- result = df.groupby('a').apply(compute_metrics)
+ result = df.groupby("a").apply(compute_metrics)
result
diff --git a/doc/source/user_guide/io.rst b/doc/source/user_guide/io.rst
index e483cebf71614..184894bbafe28 100644
--- a/doc/source/user_guide/io.rst
+++ b/doc/source/user_guide/io.rst
@@ -3310,10 +3310,10 @@ applications (CTRL-V on many operating systems). Here we illustrate writing a
.. code-block:: python
- >>> df = pd.DataFrame({'A': [1, 2, 3],
- ... 'B': [4, 5, 6],
- ... 'C': ['p', 'q', 'r']},
- ... index=['x', 'y', 'z'])
+ >>> df = pd.DataFrame(
+ ... {"A": [1, 2, 3], "B": [4, 5, 6], "C": ["p", "q", "r"]}, index=["x", "y", "z"]
+ ... )
+
>>> df
A B C
x 1 4 p
@@ -3607,8 +3607,8 @@ This format is specified by default when using ``put`` or ``to_hdf`` or by ``for
.. code-block:: python
- >>> pd.DataFrame(np.random.randn(10, 2)).to_hdf('test_fixed.h5', 'df')
- >>> pd.read_hdf('test_fixed.h5', 'df', where='index>5')
+ >>> pd.DataFrame(np.random.randn(10, 2)).to_hdf("test_fixed.h5", "df")
+ >>> pd.read_hdf("test_fixed.h5", "df", where="index>5")
TypeError: cannot pass a where specification when reading a fixed format.
this store must be selected in its entirety
diff --git a/doc/source/user_guide/missing_data.rst b/doc/source/user_guide/missing_data.rst
index 9294897686d46..3c97cc7da6edb 100644
--- a/doc/source/user_guide/missing_data.rst
+++ b/doc/source/user_guide/missing_data.rst
@@ -38,12 +38,15 @@ arise and we wish to also consider that "missing" or "not available" or "NA".
.. ipython:: python
- df = pd.DataFrame(np.random.randn(5, 3), index=['a', 'c', 'e', 'f', 'h'],
- columns=['one', 'two', 'three'])
- df['four'] = 'bar'
- df['five'] = df['one'] > 0
+ df = pd.DataFrame(
+ np.random.randn(5, 3),
+ index=["a", "c", "e", "f", "h"],
+ columns=["one", "two", "three"],
+ )
+ df["four"] = "bar"
+ df["five"] = df["one"] > 0
df
- df2 = df.reindex(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'])
+ df2 = df.reindex(["a", "b", "c", "d", "e", "f", "g", "h"])
df2
To make detecting missing values easier (and across different array dtypes),
@@ -53,9 +56,9 @@ Series and DataFrame objects:
.. ipython:: python
- df2['one']
- pd.isna(df2['one'])
- df2['four'].notna()
+ df2["one"]
+ pd.isna(df2["one"])
+ df2["four"].notna()
df2.isna()
.. warning::
@@ -65,14 +68,14 @@ Series and DataFrame objects:
.. ipython:: python
- None == None # noqa: E711
+ None == None # noqa: E711
np.nan == np.nan
So as compared to above, a scalar equality comparison versus a ``None/np.nan`` doesn't provide useful information.
.. ipython:: python
- df2['one'] == np.nan
+ df2["one"] == np.nan
Integer dtypes and missing data
-------------------------------
@@ -101,9 +104,9 @@ pandas objects provide compatibility between ``NaT`` and ``NaN``.
.. ipython:: python
df2 = df.copy()
- df2['timestamp'] = pd.Timestamp('20120101')
+ df2["timestamp"] = pd.Timestamp("20120101")
df2
- df2.loc[['a', 'c', 'h'], ['one', 'timestamp']] = np.nan
+ df2.loc[["a", "c", "h"], ["one", "timestamp"]] = np.nan
df2
df2.dtypes.value_counts()
@@ -146,9 +149,9 @@ objects.
.. ipython:: python
:suppress:
- df = df2.loc[:, ['one', 'two', 'three']]
- a = df2.loc[df2.index[:5], ['one', 'two']].fillna(method='pad')
- b = df2.loc[df2.index[:5], ['one', 'two', 'three']]
+ df = df2.loc[:, ["one", "two", "three"]]
+ a = df2.loc[df2.index[:5], ["one", "two"]].fillna(method="pad")
+ b = df2.loc[df2.index[:5], ["one", "two", "three"]]
.. ipython:: python
@@ -168,7 +171,7 @@ account for missing data. For example:
.. ipython:: python
df
- df['one'].sum()
+ df["one"].sum()
df.mean(1)
df.cumsum()
df.cumsum(skipna=False)
@@ -210,7 +213,7 @@ with R, for example:
.. ipython:: python
df
- df.groupby('one').mean()
+ df.groupby("one").mean()
See the groupby section :ref:`here <groupby.missing>` for more information.
@@ -234,7 +237,7 @@ of ways, which we illustrate:
df2
df2.fillna(0)
- df2['one'].fillna('missing')
+ df2["one"].fillna("missing")
**Fill gaps forward or backward**
@@ -244,7 +247,7 @@ can propagate non-NA values forward or backward:
.. ipython:: python
df
- df.fillna(method='pad')
+ df.fillna(method="pad")
.. _missing_data.fillna.limit:
@@ -261,7 +264,7 @@ we can use the ``limit`` keyword:
.. ipython:: python
df
- df.fillna(method='pad', limit=1)
+ df.fillna(method="pad", limit=1)
To remind you, these are the available filling methods:
@@ -289,21 +292,21 @@ use case of this is to fill a DataFrame with the mean of that column.
.. ipython:: python
- dff = pd.DataFrame(np.random.randn(10, 3), columns=list('ABC'))
+ dff = pd.DataFrame(np.random.randn(10, 3), columns=list("ABC"))
dff.iloc[3:5, 0] = np.nan
dff.iloc[4:6, 1] = np.nan
dff.iloc[5:8, 2] = np.nan
dff
dff.fillna(dff.mean())
- dff.fillna(dff.mean()['B':'C'])
+ dff.fillna(dff.mean()["B":"C"])
Same result as above, but is aligning the 'fill' value which is
a Series in this case.
.. ipython:: python
- dff.where(pd.notna(dff), dff.mean(), axis='columns')
+ dff.where(pd.notna(dff), dff.mean(), axis="columns")
.. _missing_data.dropna:
@@ -317,15 +320,15 @@ data. To do this, use :meth:`~DataFrame.dropna`:
.. ipython:: python
:suppress:
- df['two'] = df['two'].fillna(0)
- df['three'] = df['three'].fillna(0)
+ df["two"] = df["two"].fillna(0)
+ df["three"] = df["three"].fillna(0)
.. ipython:: python
df
df.dropna(axis=0)
df.dropna(axis=1)
- df['one'].dropna()
+ df["one"].dropna()
An equivalent :meth:`~Series.dropna` is available for Series.
DataFrame.dropna has considerably more options than Series.dropna, which can be
@@ -343,7 +346,7 @@ that, by default, performs linear interpolation at missing data points.
:suppress:
np.random.seed(123456)
- idx = pd.date_range('1/1/2000', periods=100, freq='BM')
+ idx = pd.date_range("1/1/2000", periods=100, freq="BM")
ts = pd.Series(np.random.randn(100), index=idx)
ts[1:5] = np.nan
ts[20:30] = np.nan
@@ -376,28 +379,29 @@ Index aware interpolation is available via the ``method`` keyword:
ts2
ts2.interpolate()
- ts2.interpolate(method='time')
+ ts2.interpolate(method="time")
For a floating-point index, use ``method='values'``:
.. ipython:: python
:suppress:
- idx = [0., 1., 10.]
- ser = pd.Series([0., np.nan, 10.], idx)
+ idx = [0.0, 1.0, 10.0]
+ ser = pd.Series([0.0, np.nan, 10.0], idx)
.. ipython:: python
ser
ser.interpolate()
- ser.interpolate(method='values')
+ ser.interpolate(method="values")
You can also interpolate with a DataFrame:
.. ipython:: python
- df = pd.DataFrame({'A': [1, 2.1, np.nan, 4.7, 5.6, 6.8],
- 'B': [.25, np.nan, np.nan, 4, 12.2, 14.4]})
+ df = pd.DataFrame(
+ {"A": [1, 2.1, np.nan, 4.7, 5.6, 6.8], "B": [0.25, np.nan, np.nan, 4, 12.2, 14.4]}
+ )
df
df.interpolate()
@@ -418,20 +422,20 @@ The appropriate interpolation method will depend on the type of data you are wor
.. ipython:: python
- df.interpolate(method='barycentric')
+ df.interpolate(method="barycentric")
- df.interpolate(method='pchip')
+ df.interpolate(method="pchip")
- df.interpolate(method='akima')
+ df.interpolate(method="akima")
When interpolating via a polynomial or spline approximation, you must also specify
the degree or order of the approximation:
.. ipython:: python
- df.interpolate(method='spline', order=2)
+ df.interpolate(method="spline", order=2)
- df.interpolate(method='polynomial', order=2)
+ df.interpolate(method="polynomial", order=2)
Compare several methods:
@@ -439,10 +443,10 @@ Compare several methods:
np.random.seed(2)
- ser = pd.Series(np.arange(1, 10.1, .25) ** 2 + np.random.randn(37))
+ ser = pd.Series(np.arange(1, 10.1, 0.25) ** 2 + np.random.randn(37))
missing = np.array([4, 13, 14, 15, 16, 17, 18, 20, 29])
ser[missing] = np.nan
- methods = ['linear', 'quadratic', 'cubic']
+ methods = ["linear", "quadratic", "cubic"]
df = pd.DataFrame({m: ser.interpolate(method=m) for m in methods})
@savefig compare_interpolations.png
@@ -460,7 +464,7 @@ at the new values.
# interpolate at new_index
new_index = ser.index | pd.Index([49.25, 49.5, 49.75, 50.25, 50.5, 50.75])
- interp_s = ser.reindex(new_index).interpolate(method='pchip')
+ interp_s = ser.reindex(new_index).interpolate(method="pchip")
interp_s[49:51]
.. _scipy: https://www.scipy.org
@@ -478,8 +482,7 @@ filled since the last valid observation:
.. ipython:: python
- ser = pd.Series([np.nan, np.nan, 5, np.nan, np.nan,
- np.nan, 13, np.nan, np.nan])
+ ser = pd.Series([np.nan, np.nan, 5, np.nan, np.nan, np.nan, 13, np.nan, np.nan])
ser
# fill all consecutive values in a forward direction
@@ -494,13 +497,13 @@ By default, ``NaN`` values are filled in a ``forward`` direction. Use
.. ipython:: python
# fill one consecutive value backwards
- ser.interpolate(limit=1, limit_direction='backward')
+ ser.interpolate(limit=1, limit_direction="backward")
# fill one consecutive value in both directions
- ser.interpolate(limit=1, limit_direction='both')
+ ser.interpolate(limit=1, limit_direction="both")
# fill all consecutive values in both directions
- ser.interpolate(limit_direction='both')
+ ser.interpolate(limit_direction="both")
By default, ``NaN`` values are filled whether they are inside (surrounded by)
existing valid values, or outside existing valid values. The ``limit_area``
@@ -509,13 +512,13 @@ parameter restricts filling to either inside or outside values.
.. ipython:: python
# fill one consecutive inside value in both directions
- ser.interpolate(limit_direction='both', limit_area='inside', limit=1)
+ ser.interpolate(limit_direction="both", limit_area="inside", limit=1)
# fill all consecutive outside values backward
- ser.interpolate(limit_direction='backward', limit_area='outside')
+ ser.interpolate(limit_direction="backward", limit_area="outside")
# fill all consecutive outside values in both directions
- ser.interpolate(limit_direction='both', limit_area='outside')
+ ser.interpolate(limit_direction="both", limit_area="outside")
.. _missing_data.replace:
@@ -531,7 +534,7 @@ value:
.. ipython:: python
- ser = pd.Series([0., 1., 2., 3., 4.])
+ ser = pd.Series([0.0, 1.0, 2.0, 3.0, 4.0])
ser.replace(0, 5)
@@ -551,16 +554,16 @@ For a DataFrame, you can specify individual values by column:
.. ipython:: python
- df = pd.DataFrame({'a': [0, 1, 2, 3, 4], 'b': [5, 6, 7, 8, 9]})
+ df = pd.DataFrame({"a": [0, 1, 2, 3, 4], "b": [5, 6, 7, 8, 9]})
- df.replace({'a': 0, 'b': 5}, 100)
+ df.replace({"a": 0, "b": 5}, 100)
Instead of replacing with specified values, you can treat all given values as
missing and interpolate over them:
.. ipython:: python
- ser.replace([1, 2, 3], method='pad')
+ ser.replace([1, 2, 3], method="pad")
.. _missing_data.replace_expression:
@@ -581,67 +584,67 @@ Replace the '.' with ``NaN`` (str -> str):
.. ipython:: python
- d = {'a': list(range(4)), 'b': list('ab..'), 'c': ['a', 'b', np.nan, 'd']}
+ d = {"a": list(range(4)), "b": list("ab.."), "c": ["a", "b", np.nan, "d"]}
df = pd.DataFrame(d)
- df.replace('.', np.nan)
+ df.replace(".", np.nan)
Now do it with a regular expression that removes surrounding whitespace
(regex -> regex):
.. ipython:: python
- df.replace(r'\s*\.\s*', np.nan, regex=True)
+ df.replace(r"\s*\.\s*", np.nan, regex=True)
Replace a few different values (list -> list):
.. ipython:: python
- df.replace(['a', '.'], ['b', np.nan])
+ df.replace(["a", "."], ["b", np.nan])
list of regex -> list of regex:
.. ipython:: python
- df.replace([r'\.', r'(a)'], ['dot', r'\1stuff'], regex=True)
+ df.replace([r"\.", r"(a)"], ["dot", r"\1stuff"], regex=True)
Only search in column ``'b'`` (dict -> dict):
.. ipython:: python
- df.replace({'b': '.'}, {'b': np.nan})
+ df.replace({"b": "."}, {"b": np.nan})
Same as the previous example, but use a regular expression for
searching instead (dict of regex -> dict):
.. ipython:: python
- df.replace({'b': r'\s*\.\s*'}, {'b': np.nan}, regex=True)
+ df.replace({"b": r"\s*\.\s*"}, {"b": np.nan}, regex=True)
You can pass nested dictionaries of regular expressions that use ``regex=True``:
.. ipython:: python
- df.replace({'b': {'b': r''}}, regex=True)
+ df.replace({"b": {"b": r""}}, regex=True)
Alternatively, you can pass the nested dictionary like so:
.. ipython:: python
- df.replace(regex={'b': {r'\s*\.\s*': np.nan}})
+ df.replace(regex={"b": {r"\s*\.\s*": np.nan}})
You can also use the group of a regular expression match when replacing (dict
of regex -> dict of regex), this works for lists as well.
.. ipython:: python
- df.replace({'b': r'\s*(\.)\s*'}, {'b': r'\1ty'}, regex=True)
+ df.replace({"b": r"\s*(\.)\s*"}, {"b": r"\1ty"}, regex=True)
You can pass a list of regular expressions, of which those that match
will be replaced with a scalar (list of regex -> regex).
.. ipython:: python
- df.replace([r'\s*\.\s*', r'a|b'], np.nan, regex=True)
+ df.replace([r"\s*\.\s*", r"a|b"], np.nan, regex=True)
All of the regular expression examples can also be passed with the
``to_replace`` argument as the ``regex`` argument. In this case the ``value``
@@ -650,7 +653,7 @@ dictionary. The previous example, in this case, would then be:
.. ipython:: python
- df.replace(regex=[r'\s*\.\s*', r'a|b'], value=np.nan)
+ df.replace(regex=[r"\s*\.\s*", r"a|b"], value=np.nan)
This can be convenient if you do not want to pass ``regex=True`` every time you
want to use a regular expression.
@@ -676,7 +679,7 @@ Replacing more than one value is possible by passing a list.
.. ipython:: python
df00 = df.iloc[0, 0]
- df.replace([1.5, df00], [np.nan, 'a'])
+ df.replace([1.5, df00], [np.nan, "a"])
df[1].dtype
You can also operate on the DataFrame in place:
@@ -932,7 +935,7 @@ the first 10 columns.
.. ipython:: python
- bb = pd.read_csv('data/baseball.csv', index_col='id')
+ bb = pd.read_csv("data/baseball.csv", index_col="id")
bb[bb.columns[:10]].dtypes
.. ipython:: python
diff --git a/doc/source/user_guide/scale.rst b/doc/source/user_guide/scale.rst
index 206d8dd0f4739..f36f27269a996 100644
--- a/doc/source/user_guide/scale.rst
+++ b/doc/source/user_guide/scale.rst
@@ -72,7 +72,7 @@ Option 1 loads in all the data and then filters to what we need.
.. ipython:: python
- columns = ['id_0', 'name_0', 'x_0', 'y_0']
+ columns = ["id_0", "name_0", "x_0", "y_0"]
pd.read_parquet("timeseries_wide.parquet")[columns]
@@ -123,7 +123,7 @@ space-efficient integers to know which specific name is used in each row.
.. ipython:: python
ts2 = ts.copy()
- ts2['name'] = ts2['name'].astype('category')
+ ts2["name"] = ts2["name"].astype("category")
ts2.memory_usage(deep=True)
We can go a bit further and downcast the numeric columns to their smallest types
@@ -131,8 +131,8 @@ using :func:`pandas.to_numeric`.
.. ipython:: python
- ts2['id'] = pd.to_numeric(ts2['id'], downcast='unsigned')
- ts2[['x', 'y']] = ts2[['x', 'y']].apply(pd.to_numeric, downcast='float')
+ ts2["id"] = pd.to_numeric(ts2["id"], downcast="unsigned")
+ ts2[["x", "y"]] = ts2[["x", "y"]].apply(pd.to_numeric, downcast="float")
ts2.dtypes
.. ipython:: python
@@ -141,8 +141,7 @@ using :func:`pandas.to_numeric`.
.. ipython:: python
- reduction = (ts2.memory_usage(deep=True).sum()
- / ts.memory_usage(deep=True).sum())
+ reduction = ts2.memory_usage(deep=True).sum() / ts.memory_usage(deep=True).sum()
print(f"{reduction:0.2f}")
In all, we've reduced the in-memory footprint of this dataset to 1/5 of its
@@ -174,13 +173,13 @@ files. Each file in the directory represents a different year of the entire data
import pathlib
N = 12
- starts = [f'20{i:>02d}-01-01' for i in range(N)]
- ends = [f'20{i:>02d}-12-13' for i in range(N)]
+ starts = [f"20{i:>02d}-01-01" for i in range(N)]
+ ends = [f"20{i:>02d}-12-13" for i in range(N)]
pathlib.Path("data/timeseries").mkdir(exist_ok=True)
for i, (start, end) in enumerate(zip(starts, ends)):
- ts = _make_timeseries(start=start, end=end, freq='1T', seed=i)
+ ts = _make_timeseries(start=start, end=end, freq="1T", seed=i)
ts.to_parquet(f"data/timeseries/ts-{i:0>2d}.parquet")
@@ -215,7 +214,7 @@ work for arbitrary-sized datasets.
# Only one dataframe is in memory at a time...
df = pd.read_parquet(path)
# ... plus a small Series ``counts``, which is updated.
- counts = counts.add(df['name'].value_counts(), fill_value=0)
+ counts = counts.add(df["name"].value_counts(), fill_value=0)
counts.astype(int)
Some readers, like :meth:`pandas.read_csv`, offer parameters to control the
@@ -278,8 +277,8 @@ Rather than executing immediately, doing operations build up a **task graph**.
.. ipython:: python
ddf
- ddf['name']
- ddf['name'].value_counts()
+ ddf["name"]
+ ddf["name"].value_counts()
Each of these calls is instant because the result isn't being computed yet.
We're just building up a list of computation to do when someone needs the
@@ -291,7 +290,7 @@ To get the actual result you can call ``.compute()``.
.. ipython:: python
- %time ddf['name'].value_counts().compute()
+ %time ddf["name"].value_counts().compute()
At that point, you get back the same thing you'd get with pandas, in this case
a concrete pandas Series with the count of each ``name``.
@@ -324,7 +323,7 @@ a familiar groupby aggregation.
.. ipython:: python
- %time ddf.groupby('name')[['x', 'y']].mean().compute().head()
+ %time ddf.groupby("name")[["x", "y"]].mean().compute().head()
The grouping and aggregation is done out-of-core and in parallel.
@@ -336,8 +335,8 @@ we need to supply the divisions manually.
.. ipython:: python
N = 12
- starts = [f'20{i:>02d}-01-01' for i in range(N)]
- ends = [f'20{i:>02d}-12-13' for i in range(N)]
+ starts = [f"20{i:>02d}-01-01" for i in range(N)]
+ ends = [f"20{i:>02d}-12-13" for i in range(N)]
divisions = tuple(pd.to_datetime(starts)) + (pd.Timestamp(ends[-1]),)
ddf.divisions = divisions
@@ -347,7 +346,7 @@ Now we can do things like fast random access with ``.loc``.
.. ipython:: python
- ddf.loc['2002-01-01 12:01':'2002-01-01 12:05'].compute()
+ ddf.loc["2002-01-01 12:01":"2002-01-01 12:05"].compute()
Dask knows to just look in the 3rd partition for selecting values in 2002. It
doesn't need to look at any other data.
@@ -362,7 +361,7 @@ out of memory. At that point it's just a regular pandas object.
:okwarning:
@savefig dask_resample.png
- ddf[['x', 'y']].resample("1D").mean().cumsum().compute().plot()
+ ddf[["x", "y"]].resample("1D").mean().cumsum().compute().plot()
These Dask examples have all be done using multiple processes on a single
machine. Dask can be `deployed on a cluster
diff --git a/doc/source/user_guide/timeseries.rst b/doc/source/user_guide/timeseries.rst
index 61902b4a41b7c..11ec90085d9bf 100644
--- a/doc/source/user_guide/timeseries.rst
+++ b/doc/source/user_guide/timeseries.rst
@@ -19,42 +19,43 @@ Parsing time series information from various sources and formats
import datetime
- dti = pd.to_datetime(['1/1/2018', np.datetime64('2018-01-01'),
- datetime.datetime(2018, 1, 1)])
+ dti = pd.to_datetime(
+ ["1/1/2018", np.datetime64("2018-01-01"), datetime.datetime(2018, 1, 1)]
+ )
dti
Generate sequences of fixed-frequency dates and time spans
.. ipython:: python
- dti = pd.date_range('2018-01-01', periods=3, freq='H')
+ dti = pd.date_range("2018-01-01", periods=3, freq="H")
dti
Manipulating and converting date times with timezone information
.. ipython:: python
- dti = dti.tz_localize('UTC')
+ dti = dti.tz_localize("UTC")
dti
- dti.tz_convert('US/Pacific')
+ dti.tz_convert("US/Pacific")
Resampling or converting a time series to a particular frequency
.. ipython:: python
- idx = pd.date_range('2018-01-01', periods=5, freq='H')
+ idx = pd.date_range("2018-01-01", periods=5, freq="H")
ts = pd.Series(range(len(idx)), index=idx)
ts
- ts.resample('2H').mean()
+ ts.resample("2H").mean()
Performing date and time arithmetic with absolute or relative time increments
.. ipython:: python
- friday = pd.Timestamp('2018-01-05')
+ friday = pd.Timestamp("2018-01-05")
friday.day_name()
# Add 1 day
- saturday = friday + pd.Timedelta('1 day')
+ saturday = friday + pd.Timedelta("1 day")
saturday.day_name()
# Add 1 business day (Friday --> Monday)
monday = friday + pd.offsets.BDay()
@@ -90,13 +91,13 @@ so manipulations can be performed with respect to the time element.
.. ipython:: python
- pd.Series(range(3), index=pd.date_range('2000', freq='D', periods=3))
+ pd.Series(range(3), index=pd.date_range("2000", freq="D", periods=3))
However, :class:`Series` and :class:`DataFrame` can directly also support the time component as data itself.
.. ipython:: python
- pd.Series(pd.date_range('2000', freq='D', periods=3))
+ pd.Series(pd.date_range("2000", freq="D", periods=3))
:class:`Series` and :class:`DataFrame` have extended data type support and functionality for ``datetime``, ``timedelta``
and ``Period`` data when passed into those constructors. ``DateOffset``
@@ -104,9 +105,9 @@ data however will be stored as ``object`` data.
.. ipython:: python
- pd.Series(pd.period_range('1/1/2011', freq='M', periods=3))
+ pd.Series(pd.period_range("1/1/2011", freq="M", periods=3))
pd.Series([pd.DateOffset(1), pd.DateOffset(2)])
- pd.Series(pd.date_range('1/1/2011', freq='M', periods=3))
+ pd.Series(pd.date_range("1/1/2011", freq="M", periods=3))
Lastly, pandas represents null date times, time deltas, and time spans as ``NaT`` which
is useful for representing missing or null date like values and behaves similar
@@ -132,7 +133,7 @@ time.
.. ipython:: python
pd.Timestamp(datetime.datetime(2012, 5, 1))
- pd.Timestamp('2012-05-01')
+ pd.Timestamp("2012-05-01")
pd.Timestamp(2012, 5, 1)
However, in many cases it is more natural to associate things like change
@@ -143,9 +144,9 @@ For example:
.. ipython:: python
- pd.Period('2011-01')
+ pd.Period("2011-01")
- pd.Period('2012-05', freq='D')
+ pd.Period("2012-05", freq="D")
:class:`Timestamp` and :class:`Period` can serve as an index. Lists of
``Timestamp`` and ``Period`` are automatically coerced to :class:`DatetimeIndex`
@@ -153,9 +154,11 @@ and :class:`PeriodIndex` respectively.
.. ipython:: python
- dates = [pd.Timestamp('2012-05-01'),
- pd.Timestamp('2012-05-02'),
- pd.Timestamp('2012-05-03')]
+ dates = [
+ pd.Timestamp("2012-05-01"),
+ pd.Timestamp("2012-05-02"),
+ pd.Timestamp("2012-05-03"),
+ ]
ts = pd.Series(np.random.randn(3), dates)
type(ts.index)
@@ -163,7 +166,7 @@ and :class:`PeriodIndex` respectively.
ts
- periods = [pd.Period('2012-01'), pd.Period('2012-02'), pd.Period('2012-03')]
+ periods = [pd.Period("2012-01"), pd.Period("2012-02"), pd.Period("2012-03")]
ts = pd.Series(np.random.randn(3), periods)
@@ -193,18 +196,18 @@ is converted to a ``DatetimeIndex``:
.. ipython:: python
- pd.to_datetime(pd.Series(['Jul 31, 2009', '2010-01-10', None]))
+ pd.to_datetime(pd.Series(["Jul 31, 2009", "2010-01-10", None]))
- pd.to_datetime(['2005/11/23', '2010.12.31'])
+ pd.to_datetime(["2005/11/23", "2010.12.31"])
If you use dates which start with the day first (i.e. European style),
you can pass the ``dayfirst`` flag:
.. ipython:: python
- pd.to_datetime(['04-01-2012 10:00'], dayfirst=True)
+ pd.to_datetime(["04-01-2012 10:00"], dayfirst=True)
- pd.to_datetime(['14-01-2012', '01-14-2012'], dayfirst=True)
+ pd.to_datetime(["14-01-2012", "01-14-2012"], dayfirst=True)
.. warning::
@@ -218,22 +221,22 @@ options like ``dayfirst`` or ``format``, so use ``to_datetime`` if these are req
.. ipython:: python
- pd.to_datetime('2010/11/12')
+ pd.to_datetime("2010/11/12")
- pd.Timestamp('2010/11/12')
+ pd.Timestamp("2010/11/12")
You can also use the ``DatetimeIndex`` constructor directly:
.. ipython:: python
- pd.DatetimeIndex(['2018-01-01', '2018-01-03', '2018-01-05'])
+ pd.DatetimeIndex(["2018-01-01", "2018-01-03", "2018-01-05"])
The string 'infer' can be passed in order to set the frequency of the index as the
inferred frequency upon creation:
.. ipython:: python
- pd.DatetimeIndex(['2018-01-01', '2018-01-03', '2018-01-05'], freq='infer')
+ pd.DatetimeIndex(["2018-01-01", "2018-01-03", "2018-01-05"], freq="infer")
.. _timeseries.converting.format:
@@ -245,9 +248,9 @@ This could also potentially speed up the conversion considerably.
.. ipython:: python
- pd.to_datetime('2010/11/12', format='%Y/%m/%d')
+ pd.to_datetime("2010/11/12", format="%Y/%m/%d")
- pd.to_datetime('12-11-2010 00:00', format='%d-%m-%Y %H:%M')
+ pd.to_datetime("12-11-2010 00:00", format="%d-%m-%Y %H:%M")
For more information on the choices available when specifying the ``format``
option, see the Python `datetime documentation`_.
@@ -261,10 +264,9 @@ You can also pass a ``DataFrame`` of integer or string columns to assemble into
.. ipython:: python
- df = pd.DataFrame({'year': [2015, 2016],
- 'month': [2, 3],
- 'day': [4, 5],
- 'hour': [2, 3]})
+ df = pd.DataFrame(
+ {"year": [2015, 2016], "month": [2, 3], "day": [4, 5], "hour": [2, 3]}
+ )
pd.to_datetime(df)
@@ -272,7 +274,7 @@ You can pass only the columns that you need to assemble.
.. ipython:: python
- pd.to_datetime(df[['year', 'month', 'day']])
+ pd.to_datetime(df[["year", "month", "day"]])
``pd.to_datetime`` looks for standard designations of the datetime component in the column names, including:
@@ -293,13 +295,13 @@ Pass ``errors='ignore'`` to return the original input when unparsable:
.. ipython:: python
- pd.to_datetime(['2009/07/31', 'asd'], errors='ignore')
+ pd.to_datetime(["2009/07/31", "asd"], errors="ignore")
Pass ``errors='coerce'`` to convert unparsable data to ``NaT`` (not a time):
.. ipython:: python
- pd.to_datetime(['2009/07/31', 'asd'], errors='coerce')
+ pd.to_datetime(["2009/07/31", "asd"], errors="coerce")
.. _timeseries.converting.epoch:
@@ -315,11 +317,12 @@ which can be specified. These are computed from the starting point specified by
.. ipython:: python
- pd.to_datetime([1349720105, 1349806505, 1349892905,
- 1349979305, 1350065705], unit='s')
+ pd.to_datetime([1349720105, 1349806505, 1349892905, 1349979305, 1350065705], unit="s")
- pd.to_datetime([1349720105100, 1349720105200, 1349720105300,
- 1349720105400, 1349720105500], unit='ms')
+ pd.to_datetime(
+ [1349720105100, 1349720105200, 1349720105300, 1349720105400, 1349720105500],
+ unit="ms",
+ )
.. note::
@@ -336,8 +339,8 @@ as timezone-naive timestamps and then localize to the appropriate timezone:
.. ipython:: python
- pd.Timestamp(1262347200000000000).tz_localize('US/Pacific')
- pd.DatetimeIndex([1262347200000000000]).tz_localize('US/Pacific')
+ pd.Timestamp(1262347200000000000).tz_localize("US/Pacific")
+ pd.DatetimeIndex([1262347200000000000]).tz_localize("US/Pacific")
.. note::
@@ -353,8 +356,8 @@ as timezone-naive timestamps and then localize to the appropriate timezone:
.. ipython:: python
- pd.to_datetime([1490195805.433, 1490195805.433502912], unit='s')
- pd.to_datetime(1490195805433502912, unit='ns')
+ pd.to_datetime([1490195805.433, 1490195805.433502912], unit="s")
+ pd.to_datetime(1490195805433502912, unit="ns")
.. seealso::
@@ -369,7 +372,7 @@ To invert the operation from above, namely, to convert from a ``Timestamp`` to a
.. ipython:: python
- stamps = pd.date_range('2012-10-08 18:15:05', periods=4, freq='D')
+ stamps = pd.date_range("2012-10-08 18:15:05", periods=4, freq="D")
stamps
We subtract the epoch (midnight at January 1, 1970 UTC) and then floor divide by the
@@ -377,7 +380,7 @@ We subtract the epoch (midnight at January 1, 1970 UTC) and then floor divide by
.. ipython:: python
- (stamps - pd.Timestamp("1970-01-01")) // pd.Timedelta('1s')
+ (stamps - pd.Timestamp("1970-01-01")) // pd.Timedelta("1s")
.. _timeseries.origin:
@@ -389,14 +392,14 @@ of a ``DatetimeIndex``. For example, to use 1960-01-01 as the starting date:
.. ipython:: python
- pd.to_datetime([1, 2, 3], unit='D', origin=pd.Timestamp('1960-01-01'))
+ pd.to_datetime([1, 2, 3], unit="D", origin=pd.Timestamp("1960-01-01"))
The default is set at ``origin='unix'``, which defaults to ``1970-01-01 00:00:00``.
Commonly called 'unix epoch' or POSIX time.
.. ipython:: python
- pd.to_datetime([1, 2, 3], unit='D')
+ pd.to_datetime([1, 2, 3], unit="D")
.. _timeseries.daterange:
@@ -408,9 +411,11 @@ To generate an index with timestamps, you can use either the ``DatetimeIndex`` o
.. ipython:: python
- dates = [datetime.datetime(2012, 5, 1),
- datetime.datetime(2012, 5, 2),
- datetime.datetime(2012, 5, 3)]
+ dates = [
+ datetime.datetime(2012, 5, 1),
+ datetime.datetime(2012, 5, 2),
+ datetime.datetime(2012, 5, 3),
+ ]
# Note the frequency information
index = pd.DatetimeIndex(dates)
@@ -442,9 +447,9 @@ variety of :ref:`frequency aliases <timeseries.offset_aliases>`:
.. ipython:: python
- pd.date_range(start, periods=1000, freq='M')
+ pd.date_range(start, periods=1000, freq="M")
- pd.bdate_range(start, periods=250, freq='BQS')
+ pd.bdate_range(start, periods=250, freq="BQS")
``date_range`` and ``bdate_range`` make it easy to generate a range of dates
using various combinations of parameters like ``start``, ``end``, ``periods``,
@@ -453,9 +458,9 @@ of those specified will not be generated:
.. ipython:: python
- pd.date_range(start, end, freq='BM')
+ pd.date_range(start, end, freq="BM")
- pd.date_range(start, end, freq='W')
+ pd.date_range(start, end, freq="W")
pd.bdate_range(end=end, periods=20)
@@ -467,9 +472,9 @@ resulting ``DatetimeIndex``:
.. ipython:: python
- pd.date_range('2018-01-01', '2018-01-05', periods=5)
+ pd.date_range("2018-01-01", "2018-01-05", periods=5)
- pd.date_range('2018-01-01', '2018-01-05', periods=10)
+ pd.date_range("2018-01-01", "2018-01-05", periods=10)
.. _timeseries.custom-freq-ranges:
@@ -482,13 +487,13 @@ used if a custom frequency string is passed.
.. ipython:: python
- weekmask = 'Mon Wed Fri'
+ weekmask = "Mon Wed Fri"
holidays = [datetime.datetime(2011, 1, 5), datetime.datetime(2011, 3, 14)]
- pd.bdate_range(start, end, freq='C', weekmask=weekmask, holidays=holidays)
+ pd.bdate_range(start, end, freq="C", weekmask=weekmask, holidays=holidays)
- pd.bdate_range(start, end, freq='CBMS', weekmask=weekmask)
+ pd.bdate_range(start, end, freq="CBMS", weekmask=weekmask)
.. seealso::
@@ -545,7 +550,7 @@ intelligent functionality like selection, slicing, etc.
.. ipython:: python
- rng = pd.date_range(start, end, freq='BM')
+ rng = pd.date_range(start, end, freq="BM")
ts = pd.Series(np.random.randn(len(rng)), index=rng)
ts.index
ts[:5].index
@@ -560,20 +565,20 @@ Dates and strings that parse to timestamps can be passed as indexing parameters:
.. ipython:: python
- ts['1/31/2011']
+ ts["1/31/2011"]
ts[datetime.datetime(2011, 12, 25):]
- ts['10/31/2011':'12/31/2011']
+ ts["10/31/2011":"12/31/2011"]
To provide convenience for accessing longer time series, you can also pass in
the year or year and month as strings:
.. ipython:: python
- ts['2011']
+ ts["2011"]
- ts['2011-6']
+ ts["2011-6"]
This type of slicing will work on a ``DataFrame`` with a ``DatetimeIndex`` as well. Since the
partial string selection is a form of label slicing, the endpoints **will be** included. This
@@ -586,10 +591,13 @@ would include matching times on an included date:
.. ipython:: python
:okwarning:
- dft = pd.DataFrame(np.random.randn(100000, 1), columns=['A'],
- index=pd.date_range('20130101', periods=100000, freq='T'))
+ dft = pd.DataFrame(
+ np.random.randn(100000, 1),
+ columns=["A"],
+ index=pd.date_range("20130101", periods=100000, freq="T"),
+ )
dft
- dft['2013']
+ dft["2013"]
This starts on the very first time in the month, and includes the last date and
time for the month:
@@ -597,43 +605,45 @@ time for the month:
.. ipython:: python
:okwarning:
- dft['2013-1':'2013-2']
+ dft["2013-1":"2013-2"]
This specifies a stop time **that includes all of the times on the last day**:
.. ipython:: python
:okwarning:
- dft['2013-1':'2013-2-28']
+ dft["2013-1":"2013-2-28"]
This specifies an **exact** stop time (and is not the same as the above):
.. ipython:: python
:okwarning:
- dft['2013-1':'2013-2-28 00:00:00']
+ dft["2013-1":"2013-2-28 00:00:00"]
We are stopping on the included end-point as it is part of the index:
.. ipython:: python
:okwarning:
- dft['2013-1-15':'2013-1-15 12:30:00']
+ dft["2013-1-15":"2013-1-15 12:30:00"]
``DatetimeIndex`` partial string indexing also works on a ``DataFrame`` with a ``MultiIndex``:
.. ipython:: python
- dft2 = pd.DataFrame(np.random.randn(20, 1),
- columns=['A'],
- index=pd.MultiIndex.from_product(
- [pd.date_range('20130101', periods=10, freq='12H'),
- ['a', 'b']]))
+ dft2 = pd.DataFrame(
+ np.random.randn(20, 1),
+ columns=["A"],
+ index=pd.MultiIndex.from_product(
+ [pd.date_range("20130101", periods=10, freq="12H"), ["a", "b"]]
+ ),
+ )
dft2
- dft2.loc['2013-01-05']
+ dft2.loc["2013-01-05"]
idx = pd.IndexSlice
dft2 = dft2.swaplevel(0, 1).sort_index()
- dft2.loc[idx[:, '2013-01-05'], :]
+ dft2.loc[idx[:, "2013-01-05"], :]
.. versionadded:: 0.25.0
@@ -642,9 +652,9 @@ Slicing with string indexing also honors UTC offset.
.. ipython:: python
:okwarning:
- df = pd.DataFrame([0], index=pd.DatetimeIndex(['2019-01-01'], tz='US/Pacific'))
+ df = pd.DataFrame([0], index=pd.DatetimeIndex(["2019-01-01"], tz="US/Pacific"))
df
- df['2019-01-01 12:00:00+04:00':'2019-01-01 13:00:00+04:00']
+ df["2019-01-01 12:00:00+04:00":"2019-01-01 13:00:00+04:00"]
.. _timeseries.slice_vs_exact_match:
@@ -657,45 +667,48 @@ Consider a ``Series`` object with a minute resolution index:
.. ipython:: python
- series_minute = pd.Series([1, 2, 3],
- pd.DatetimeIndex(['2011-12-31 23:59:00',
- '2012-01-01 00:00:00',
- '2012-01-01 00:02:00']))
+ series_minute = pd.Series(
+ [1, 2, 3],
+ pd.DatetimeIndex(
+ ["2011-12-31 23:59:00", "2012-01-01 00:00:00", "2012-01-01 00:02:00"]
+ ),
+ )
series_minute.index.resolution
A timestamp string less accurate than a minute gives a ``Series`` object.
.. ipython:: python
- series_minute['2011-12-31 23']
+ series_minute["2011-12-31 23"]
A timestamp string with minute resolution (or more accurate), gives a scalar instead, i.e. it is not casted to a slice.
.. ipython:: python
- series_minute['2011-12-31 23:59']
- series_minute['2011-12-31 23:59:00']
+ series_minute["2011-12-31 23:59"]
+ series_minute["2011-12-31 23:59:00"]
If index resolution is second, then the minute-accurate timestamp gives a
``Series``.
.. ipython:: python
- series_second = pd.Series([1, 2, 3],
- pd.DatetimeIndex(['2011-12-31 23:59:59',
- '2012-01-01 00:00:00',
- '2012-01-01 00:00:01']))
+ series_second = pd.Series(
+ [1, 2, 3],
+ pd.DatetimeIndex(
+ ["2011-12-31 23:59:59", "2012-01-01 00:00:00", "2012-01-01 00:00:01"]
+ ),
+ )
series_second.index.resolution
- series_second['2011-12-31 23:59']
+ series_second["2011-12-31 23:59"]
If the timestamp string is treated as a slice, it can be used to index ``DataFrame`` with ``[]`` as well.
.. ipython:: python
:okwarning:
- dft_minute = pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]},
- index=series_minute.index)
- dft_minute['2011-12-31 23']
+ dft_minute = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=series_minute.index)
+ dft_minute["2011-12-31 23"]
.. warning::
@@ -706,16 +719,17 @@ If the timestamp string is treated as a slice, it can be used to index ``DataFra
.. ipython:: python
- dft_minute.loc['2011-12-31 23:59']
+ dft_minute.loc["2011-12-31 23:59"]
Note also that ``DatetimeIndex`` resolution cannot be less precise than day.
.. ipython:: python
- series_monthly = pd.Series([1, 2, 3],
- pd.DatetimeIndex(['2011-12', '2012-01', '2012-02']))
+ series_monthly = pd.Series(
+ [1, 2, 3], pd.DatetimeIndex(["2011-12", "2012-01", "2012-02"])
+ )
series_monthly.index.resolution
- series_monthly['2011-12'] # returns Series
+ series_monthly["2011-12"] # returns Series
Exact indexing
@@ -727,14 +741,15 @@ These ``Timestamp`` and ``datetime`` objects have exact ``hours, minutes,`` and
.. ipython:: python
- dft[datetime.datetime(2013, 1, 1):datetime.datetime(2013, 2, 28)]
+ dft[datetime.datetime(2013, 1, 1): datetime.datetime(2013, 2, 28)]
With no defaults.
.. ipython:: python
- dft[datetime.datetime(2013, 1, 1, 10, 12, 0):
- datetime.datetime(2013, 2, 28, 10, 12, 0)]
+ dft[
+ datetime.datetime(2013, 1, 1, 10, 12, 0): datetime.datetime(2013, 2, 28, 10, 12, 0)
+ ]
Truncating & fancy indexing
@@ -747,11 +762,11 @@ partially matching dates:
.. ipython:: python
- rng2 = pd.date_range('2011-01-01', '2012-01-01', freq='W')
+ rng2 = pd.date_range("2011-01-01", "2012-01-01", freq="W")
ts2 = pd.Series(np.random.randn(len(rng2)), index=rng2)
- ts2.truncate(before='2011-11', after='2011-12')
- ts2['2011-11':'2011-12']
+ ts2.truncate(before="2011-11", after="2011-12")
+ ts2["2011-11":"2011-12"]
Even complicated fancy indexing that breaks the ``DatetimeIndex`` frequency
regularity will result in a ``DatetimeIndex``, although frequency is lost:
@@ -807,7 +822,7 @@ You may obtain the year, week and day components of the ISO year from the ISO 86
.. ipython:: python
- idx = pd.date_range(start='2019-12-29', freq='D', periods=4)
+ idx = pd.date_range(start="2019-12-29", freq="D", periods=4)
idx.isocalendar()
idx.to_series().dt.isocalendar()
@@ -837,12 +852,12 @@ arithmetic operator (``+``) or the ``apply`` method can be used to perform the s
.. ipython:: python
# This particular day contains a day light savings time transition
- ts = pd.Timestamp('2016-10-30 00:00:00', tz='Europe/Helsinki')
+ ts = pd.Timestamp("2016-10-30 00:00:00", tz="Europe/Helsinki")
# Respects absolute time
ts + pd.Timedelta(days=1)
# Respects calendar time
ts + pd.DateOffset(days=1)
- friday = pd.Timestamp('2018-01-05')
+ friday = pd.Timestamp("2018-01-05")
friday.day_name()
# Add 2 business days (Friday --> Tuesday)
two_business_days = 2 * pd.offsets.BDay()
@@ -900,10 +915,10 @@ business offsets operate on the weekdays.
.. ipython:: python
- ts = pd.Timestamp('2018-01-06 00:00:00')
+ ts = pd.Timestamp("2018-01-06 00:00:00")
ts.day_name()
# BusinessHour's valid offset dates are Monday through Friday
- offset = pd.offsets.BusinessHour(start='09:00')
+ offset = pd.offsets.BusinessHour(start="09:00")
# Bring the date to the closest offset date (Monday)
offset.rollforward(ts)
# Date is brought to the closest offset date first and then the hour is added
@@ -916,12 +931,12 @@ in the operation).
.. ipython:: python
- ts = pd.Timestamp('2014-01-01 09:00')
+ ts = pd.Timestamp("2014-01-01 09:00")
day = pd.offsets.Day()
day.apply(ts)
day.apply(ts).normalize()
- ts = pd.Timestamp('2014-01-01 22:00')
+ ts = pd.Timestamp("2014-01-01 22:00")
hour = pd.offsets.Hour()
hour.apply(ts)
hour.apply(ts).normalize()
@@ -974,7 +989,7 @@ apply the offset to each element.
.. ipython:: python
- rng = pd.date_range('2012-01-01', '2012-01-03')
+ rng = pd.date_range("2012-01-01", "2012-01-03")
s = pd.Series(rng)
rng
rng + pd.DateOffset(months=2)
@@ -989,7 +1004,7 @@ used exactly like a ``Timedelta`` - see the
.. ipython:: python
s - pd.offsets.Day(2)
- td = s - pd.Series(pd.date_range('2011-12-29', '2011-12-31'))
+ td = s - pd.Series(pd.date_range("2011-12-29", "2011-12-31"))
td
td + pd.offsets.Minute(15)
@@ -1016,16 +1031,13 @@ As an interesting example, let's look at Egypt where a Friday-Saturday weekend i
.. ipython:: python
- weekmask_egypt = 'Sun Mon Tue Wed Thu'
+ weekmask_egypt = "Sun Mon Tue Wed Thu"
# They also observe International Workers' Day so let's
# add that for a couple of years
- holidays = ['2012-05-01',
- datetime.datetime(2013, 5, 1),
- np.datetime64('2014-05-01')]
- bday_egypt = pd.offsets.CustomBusinessDay(holidays=holidays,
- weekmask=weekmask_egypt)
+ holidays = ["2012-05-01", datetime.datetime(2013, 5, 1), np.datetime64("2014-05-01")]
+ bday_egypt = pd.offsets.CustomBusinessDay(holidays=holidays, weekmask=weekmask_egypt)
dt = datetime.datetime(2013, 4, 30)
dt + 2 * bday_egypt
@@ -1035,8 +1047,7 @@ Let's map to the weekday names:
dts = pd.date_range(dt, periods=5, freq=bday_egypt)
- pd.Series(dts.weekday, dts).map(
- pd.Series('Mon Tue Wed Thu Fri Sat Sun'.split()))
+ pd.Series(dts.weekday, dts).map(pd.Series("Mon Tue Wed Thu Fri Sat Sun".split()))
Holiday calendars can be used to provide the list of holidays. See the
:ref:`holiday calendar<timeseries.holiday>` section for more information.
@@ -1058,15 +1069,14 @@ in the usual way.
.. ipython:: python
- bmth_us = pd.offsets.CustomBusinessMonthBegin(
- calendar=USFederalHolidayCalendar())
+ bmth_us = pd.offsets.CustomBusinessMonthBegin(calendar=USFederalHolidayCalendar())
# Skip new years
dt = datetime.datetime(2013, 12, 17)
dt + bmth_us
# Define date index with custom offset
- pd.date_range(start='20100101', end='20120101', freq=bmth_us)
+ pd.date_range(start="20100101", end="20120101", freq=bmth_us)
.. note::
@@ -1097,23 +1107,23 @@ hours are added to the next business day.
bh
# 2014-08-01 is Friday
- pd.Timestamp('2014-08-01 10:00').weekday()
- pd.Timestamp('2014-08-01 10:00') + bh
+ pd.Timestamp("2014-08-01 10:00").weekday()
+ pd.Timestamp("2014-08-01 10:00") + bh
# Below example is the same as: pd.Timestamp('2014-08-01 09:00') + bh
- pd.Timestamp('2014-08-01 08:00') + bh
+ pd.Timestamp("2014-08-01 08:00") + bh
# If the results is on the end time, move to the next business day
- pd.Timestamp('2014-08-01 16:00') + bh
+ pd.Timestamp("2014-08-01 16:00") + bh
# Remainings are added to the next day
- pd.Timestamp('2014-08-01 16:30') + bh
+ pd.Timestamp("2014-08-01 16:30") + bh
# Adding 2 business hours
- pd.Timestamp('2014-08-01 10:00') + pd.offsets.BusinessHour(2)
+ pd.Timestamp("2014-08-01 10:00") + pd.offsets.BusinessHour(2)
# Subtracting 3 business hours
- pd.Timestamp('2014-08-01 10:00') + pd.offsets.BusinessHour(-3)
+ pd.Timestamp("2014-08-01 10:00") + pd.offsets.BusinessHour(-3)
You can also specify ``start`` and ``end`` time by keywords. The argument must
be a ``str`` with an ``hour:minute`` representation or a ``datetime.time``
@@ -1122,12 +1132,12 @@ results in ``ValueError``.
.. ipython:: python
- bh = pd.offsets.BusinessHour(start='11:00', end=datetime.time(20, 0))
+ bh = pd.offsets.BusinessHour(start="11:00", end=datetime.time(20, 0))
bh
- pd.Timestamp('2014-08-01 13:00') + bh
- pd.Timestamp('2014-08-01 09:00') + bh
- pd.Timestamp('2014-08-01 18:00') + bh
+ pd.Timestamp("2014-08-01 13:00") + bh
+ pd.Timestamp("2014-08-01 09:00") + bh
+ pd.Timestamp("2014-08-01 18:00") + bh
Passing ``start`` time later than ``end`` represents midnight business hour.
In this case, business hour exceeds midnight and overlap to the next day.
@@ -1135,19 +1145,19 @@ Valid business hours are distinguished by whether it started from valid ``Busine
.. ipython:: python
- bh = pd.offsets.BusinessHour(start='17:00', end='09:00')
+ bh = pd.offsets.BusinessHour(start="17:00", end="09:00")
bh
- pd.Timestamp('2014-08-01 17:00') + bh
- pd.Timestamp('2014-08-01 23:00') + bh
+ pd.Timestamp("2014-08-01 17:00") + bh
+ pd.Timestamp("2014-08-01 23:00") + bh
# Although 2014-08-02 is Saturday,
# it is valid because it starts from 08-01 (Friday).
- pd.Timestamp('2014-08-02 04:00') + bh
+ pd.Timestamp("2014-08-02 04:00") + bh
# Although 2014-08-04 is Monday,
# it is out of business hours because it starts from 08-03 (Sunday).
- pd.Timestamp('2014-08-04 04:00') + bh
+ pd.Timestamp("2014-08-04 04:00") + bh
Applying ``BusinessHour.rollforward`` and ``rollback`` to out of business hours results in
the next business hour start or previous day's end. Different from other offsets, ``BusinessHour.rollforward``
@@ -1160,19 +1170,19 @@ under the default business hours (9:00 - 17:00), there is no gap (0 minutes) bet
.. ipython:: python
# This adjusts a Timestamp to business hour edge
- pd.offsets.BusinessHour().rollback(pd.Timestamp('2014-08-02 15:00'))
- pd.offsets.BusinessHour().rollforward(pd.Timestamp('2014-08-02 15:00'))
+ pd.offsets.BusinessHour().rollback(pd.Timestamp("2014-08-02 15:00"))
+ pd.offsets.BusinessHour().rollforward(pd.Timestamp("2014-08-02 15:00"))
# It is the same as BusinessHour().apply(pd.Timestamp('2014-08-01 17:00')).
# And it is the same as BusinessHour().apply(pd.Timestamp('2014-08-04 09:00'))
- pd.offsets.BusinessHour().apply(pd.Timestamp('2014-08-02 15:00'))
+ pd.offsets.BusinessHour().apply(pd.Timestamp("2014-08-02 15:00"))
# BusinessDay results (for reference)
- pd.offsets.BusinessHour().rollforward(pd.Timestamp('2014-08-02'))
+ pd.offsets.BusinessHour().rollforward(pd.Timestamp("2014-08-02"))
# It is the same as BusinessDay().apply(pd.Timestamp('2014-08-01'))
# The result is the same as rollworward because BusinessDay never overlap.
- pd.offsets.BusinessHour().apply(pd.Timestamp('2014-08-02'))
+ pd.offsets.BusinessHour().apply(pd.Timestamp("2014-08-02"))
``BusinessHour`` regards Saturday and Sunday as holidays. To use arbitrary
holidays, you can use ``CustomBusinessHour`` offset, as explained in the
@@ -1190,6 +1200,7 @@ as ``BusinessHour`` except that it skips specified custom holidays.
.. ipython:: python
from pandas.tseries.holiday import USFederalHolidayCalendar
+
bhour_us = pd.offsets.CustomBusinessHour(calendar=USFederalHolidayCalendar())
# Friday before MLK Day
dt = datetime.datetime(2014, 1, 17, 15)
@@ -1203,8 +1214,7 @@ You can use keyword arguments supported by either ``BusinessHour`` and ``CustomB
.. ipython:: python
- bhour_mon = pd.offsets.CustomBusinessHour(start='10:00',
- weekmask='Tue Wed Thu Fri')
+ bhour_mon = pd.offsets.CustomBusinessHour(start="10:00", weekmask="Tue Wed Thu Fri")
# Monday is skipped because it's a holiday, business hour starts from 10:00
dt + bhour_mon * 2
@@ -1257,7 +1267,7 @@ most functions:
.. ipython:: python
- pd.date_range(start, periods=5, freq='B')
+ pd.date_range(start, periods=5, freq="B")
pd.date_range(start, periods=5, freq=pd.offsets.BDay())
@@ -1265,9 +1275,9 @@ You can combine together day and intraday offsets:
.. ipython:: python
- pd.date_range(start, periods=10, freq='2h20min')
+ pd.date_range(start, periods=10, freq="2h20min")
- pd.date_range(start, periods=10, freq='1D10U')
+ pd.date_range(start, periods=10, freq="1D10U")
Anchored offsets
~~~~~~~~~~~~~~~~
@@ -1326,39 +1336,39 @@ anchor point, and moved ``|n|-1`` additional steps forwards or backwards.
.. ipython:: python
- pd.Timestamp('2014-01-02') + pd.offsets.MonthBegin(n=1)
- pd.Timestamp('2014-01-02') + pd.offsets.MonthEnd(n=1)
+ pd.Timestamp("2014-01-02") + pd.offsets.MonthBegin(n=1)
+ pd.Timestamp("2014-01-02") + pd.offsets.MonthEnd(n=1)
- pd.Timestamp('2014-01-02') - pd.offsets.MonthBegin(n=1)
- pd.Timestamp('2014-01-02') - pd.offsets.MonthEnd(n=1)
+ pd.Timestamp("2014-01-02") - pd.offsets.MonthBegin(n=1)
+ pd.Timestamp("2014-01-02") - pd.offsets.MonthEnd(n=1)
- pd.Timestamp('2014-01-02') + pd.offsets.MonthBegin(n=4)
- pd.Timestamp('2014-01-02') - pd.offsets.MonthBegin(n=4)
+ pd.Timestamp("2014-01-02") + pd.offsets.MonthBegin(n=4)
+ pd.Timestamp("2014-01-02") - pd.offsets.MonthBegin(n=4)
If the given date *is* on an anchor point, it is moved ``|n|`` points forwards
or backwards.
.. ipython:: python
- pd.Timestamp('2014-01-01') + pd.offsets.MonthBegin(n=1)
- pd.Timestamp('2014-01-31') + pd.offsets.MonthEnd(n=1)
+ pd.Timestamp("2014-01-01") + pd.offsets.MonthBegin(n=1)
+ pd.Timestamp("2014-01-31") + pd.offsets.MonthEnd(n=1)
- pd.Timestamp('2014-01-01') - pd.offsets.MonthBegin(n=1)
- pd.Timestamp('2014-01-31') - pd.offsets.MonthEnd(n=1)
+ pd.Timestamp("2014-01-01") - pd.offsets.MonthBegin(n=1)
+ pd.Timestamp("2014-01-31") - pd.offsets.MonthEnd(n=1)
- pd.Timestamp('2014-01-01') + pd.offsets.MonthBegin(n=4)
- pd.Timestamp('2014-01-31') - pd.offsets.MonthBegin(n=4)
+ pd.Timestamp("2014-01-01") + pd.offsets.MonthBegin(n=4)
+ pd.Timestamp("2014-01-31") - pd.offsets.MonthBegin(n=4)
For the case when ``n=0``, the date is not moved if on an anchor point, otherwise
it is rolled forward to the next anchor point.
.. ipython:: python
- pd.Timestamp('2014-01-02') + pd.offsets.MonthBegin(n=0)
- pd.Timestamp('2014-01-02') + pd.offsets.MonthEnd(n=0)
+ pd.Timestamp("2014-01-02") + pd.offsets.MonthBegin(n=0)
+ pd.Timestamp("2014-01-02") + pd.offsets.MonthEnd(n=0)
- pd.Timestamp('2014-01-01') + pd.offsets.MonthBegin(n=0)
- pd.Timestamp('2014-01-31') + pd.offsets.MonthEnd(n=0)
+ pd.Timestamp("2014-01-01") + pd.offsets.MonthBegin(n=0)
+ pd.Timestamp("2014-01-31") + pd.offsets.MonthEnd(n=0)
.. _timeseries.holiday:
@@ -1394,14 +1404,22 @@ An example of how holidays and holiday calendars are defined:
.. ipython:: python
- from pandas.tseries.holiday import Holiday, USMemorialDay,\
- AbstractHolidayCalendar, nearest_workday, MO
+ from pandas.tseries.holiday import (
+ Holiday,
+ USMemorialDay,
+ AbstractHolidayCalendar,
+ nearest_workday,
+ MO,
+ )
+
+
class ExampleCalendar(AbstractHolidayCalendar):
rules = [
USMemorialDay,
- Holiday('July 4th', month=7, day=4, observance=nearest_workday),
- Holiday('Columbus Day', month=10, day=1,
- offset=pd.DateOffset(weekday=MO(2)))]
+ Holiday("July 4th", month=7, day=4, observance=nearest_workday),
+ Holiday("Columbus Day", month=10, day=1, offset=pd.DateOffset(weekday=MO(2))),
+ ]
+
cal = ExampleCalendar()
cal.holidays(datetime.datetime(2012, 1, 1), datetime.datetime(2012, 12, 31))
@@ -1417,8 +1435,9 @@ or ``Timestamp`` objects.
.. ipython:: python
- pd.date_range(start='7/1/2012', end='7/10/2012',
- freq=pd.offsets.CDay(calendar=cal)).to_pydatetime()
+ pd.date_range(
+ start="7/1/2012", end="7/10/2012", freq=pd.offsets.CDay(calendar=cal)
+ ).to_pydatetime()
offset = pd.offsets.CustomBusinessDay(calendar=cal)
datetime.datetime(2012, 5, 25) + offset
datetime.datetime(2012, 7, 3) + offset
@@ -1450,11 +1469,11 @@ or calendars with additional rules.
.. ipython:: python
- from pandas.tseries.holiday import get_calendar, HolidayCalendarFactory,\
- USLaborDay
- cal = get_calendar('ExampleCalendar')
+ from pandas.tseries.holiday import get_calendar, HolidayCalendarFactory, USLaborDay
+
+ cal = get_calendar("ExampleCalendar")
cal.rules
- new_cal = HolidayCalendarFactory('NewExampleCalendar', cal, USLaborDay)
+ new_cal = HolidayCalendarFactory("NewExampleCalendar", cal, USLaborDay)
new_cal.rules
.. _timeseries.advanced_datetime:
@@ -1484,9 +1503,9 @@ rather than changing the alignment of the data and the index:
.. ipython:: python
- ts.shift(5, freq='D')
+ ts.shift(5, freq="D")
ts.shift(5, freq=pd.offsets.BDay())
- ts.shift(5, freq='BM')
+ ts.shift(5, freq="BM")
Note that with when ``freq`` is specified, the leading entry is no longer NaN
because the data is not being realigned.
@@ -1501,7 +1520,7 @@ calls ``reindex``.
.. ipython:: python
- dr = pd.date_range('1/1/2010', periods=3, freq=3 * pd.offsets.BDay())
+ dr = pd.date_range("1/1/2010", periods=3, freq=3 * pd.offsets.BDay())
ts = pd.Series(np.random.randn(3), index=dr)
ts
ts.asfreq(pd.offsets.BDay())
@@ -1511,7 +1530,7 @@ method for any gaps that may appear after the frequency conversion.
.. ipython:: python
- ts.asfreq(pd.offsets.BDay(), method='pad')
+ ts.asfreq(pd.offsets.BDay(), method="pad")
Filling forward / backward
~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -1552,11 +1571,11 @@ Basics
.. ipython:: python
- rng = pd.date_range('1/1/2012', periods=100, freq='S')
+ rng = pd.date_range("1/1/2012", periods=100, freq="S")
ts = pd.Series(np.random.randint(0, 500, len(rng)), index=rng)
- ts.resample('5Min').sum()
+ ts.resample("5Min").sum()
The ``resample`` function is very flexible and allows you to specify many
different parameters to control the frequency conversion and resampling
@@ -1568,11 +1587,11 @@ a method of the returned object, including ``sum``, ``mean``, ``std``, ``sem``,
.. ipython:: python
- ts.resample('5Min').mean()
+ ts.resample("5Min").mean()
- ts.resample('5Min').ohlc()
+ ts.resample("5Min").ohlc()
- ts.resample('5Min').max()
+ ts.resample("5Min").max()
For downsampling, ``closed`` can be set to 'left' or 'right' to specify which
@@ -1580,9 +1599,9 @@ end of the interval is closed:
.. ipython:: python
- ts.resample('5Min', closed='right').mean()
+ ts.resample("5Min", closed="right").mean()
- ts.resample('5Min', closed='left').mean()
+ ts.resample("5Min", closed="left").mean()
Parameters like ``label`` are used to manipulate the resulting labels.
``label`` specifies whether the result is labeled with the beginning or
@@ -1590,9 +1609,9 @@ the end of the interval.
.. ipython:: python
- ts.resample('5Min').mean() # by default label='left'
+ ts.resample("5Min").mean() # by default label='left'
- ts.resample('5Min', label='left').mean()
+ ts.resample("5Min", label="left").mean()
.. warning::
@@ -1606,12 +1625,12 @@ the end of the interval.
.. ipython:: python
- s = pd.date_range('2000-01-01', '2000-01-05').to_series()
+ s = pd.date_range("2000-01-01", "2000-01-05").to_series()
s.iloc[2] = pd.NaT
s.dt.day_name()
# default: label='left', closed='left'
- s.resample('B').last().dt.day_name()
+ s.resample("B").last().dt.day_name()
Notice how the value for Sunday got pulled back to the previous Friday.
To get the behavior where the value for Sunday is pushed to Monday, use
@@ -1619,7 +1638,7 @@ the end of the interval.
.. ipython:: python
- s.resample('B', label='right', closed='right').last().dt.day_name()
+ s.resample("B", label="right", closed="right").last().dt.day_name()
The ``axis`` parameter can be set to 0 or 1 and allows you to resample the
specified axis for a ``DataFrame``.
@@ -1642,11 +1661,11 @@ For upsampling, you can specify a way to upsample and the ``limit`` parameter to
# from secondly to every 250 milliseconds
- ts[:2].resample('250L').asfreq()
+ ts[:2].resample("250L").asfreq()
- ts[:2].resample('250L').ffill()
+ ts[:2].resample("250L").ffill()
- ts[:2].resample('250L').ffill(limit=2)
+ ts[:2].resample("250L").ffill(limit=2)
Sparse resampling
~~~~~~~~~~~~~~~~~
@@ -1662,14 +1681,14 @@ resample only the groups that are not all ``NaN``.
.. ipython:: python
- rng = pd.date_range('2014-1-1', periods=100, freq='D') + pd.Timedelta('1s')
+ rng = pd.date_range("2014-1-1", periods=100, freq="D") + pd.Timedelta("1s")
ts = pd.Series(range(100), index=rng)
If we want to resample to the full range of the series:
.. ipython:: python
- ts.resample('3T').sum()
+ ts.resample("3T").sum()
We can instead only resample those groups where we have points as follows:
@@ -1678,12 +1697,14 @@ We can instead only resample those groups where we have points as follows:
from functools import partial
from pandas.tseries.frequencies import to_offset
+
def round(t, freq):
# round a Timestamp to a specified freq
freq = to_offset(freq)
return pd.Timestamp((t.value // freq.delta.value) * freq.delta.value)
- ts.groupby(partial(round, freq='3T')).sum()
+
+ ts.groupby(partial(round, freq="3T")).sum()
.. _timeseries.aggregate:
@@ -1697,25 +1718,27 @@ Resampling a ``DataFrame``, the default will be to act on all columns with the s
.. ipython:: python
- df = pd.DataFrame(np.random.randn(1000, 3),
- index=pd.date_range('1/1/2012', freq='S', periods=1000),
- columns=['A', 'B', 'C'])
- r = df.resample('3T')
+ df = pd.DataFrame(
+ np.random.randn(1000, 3),
+ index=pd.date_range("1/1/2012", freq="S", periods=1000),
+ columns=["A", "B", "C"],
+ )
+ r = df.resample("3T")
r.mean()
We can select a specific column or columns using standard getitem.
.. ipython:: python
- r['A'].mean()
+ r["A"].mean()
- r[['A', 'B']].mean()
+ r[["A", "B"]].mean()
You can pass a list or dict of functions to do aggregation with, outputting a ``DataFrame``:
.. ipython:: python
- r['A'].agg([np.sum, np.mean, np.std])
+ r["A"].agg([np.sum, np.mean, np.std])
On a resampled ``DataFrame``, you can pass a list of functions to apply to each
column, which produces an aggregated result with a hierarchical index:
@@ -1730,21 +1753,20 @@ columns of a ``DataFrame``:
.. ipython:: python
:okexcept:
- r.agg({'A': np.sum,
- 'B': lambda x: np.std(x, ddof=1)})
+ r.agg({"A": np.sum, "B": lambda x: np.std(x, ddof=1)})
The function names can also be strings. In order for a string to be valid it
must be implemented on the resampled object:
.. ipython:: python
- r.agg({'A': 'sum', 'B': 'std'})
+ r.agg({"A": "sum", "B": "std"})
Furthermore, you can also specify multiple aggregation functions for each column separately.
.. ipython:: python
- r.agg({'A': ['sum', 'std'], 'B': ['mean', 'std']})
+ r.agg({"A": ["sum", "std"], "B": ["mean", "std"]})
If a ``DataFrame`` does not have a datetimelike index, but instead you want
@@ -1753,14 +1775,15 @@ to resample based on datetimelike column in the frame, it can passed to the
.. ipython:: python
- df = pd.DataFrame({'date': pd.date_range('2015-01-01', freq='W', periods=5),
- 'a': np.arange(5)},
- index=pd.MultiIndex.from_arrays([
- [1, 2, 3, 4, 5],
- pd.date_range('2015-01-01', freq='W', periods=5)],
- names=['v', 'd']))
+ df = pd.DataFrame(
+ {"date": pd.date_range("2015-01-01", freq="W", periods=5), "a": np.arange(5)},
+ index=pd.MultiIndex.from_arrays(
+ [[1, 2, 3, 4, 5], pd.date_range("2015-01-01", freq="W", periods=5)],
+ names=["v", "d"],
+ ),
+ )
df
- df.resample('M', on='date').sum()
+ df.resample("M", on="date").sum()
Similarly, if you instead want to resample by a datetimelike
level of ``MultiIndex``, its name or location can be passed to the
@@ -1768,7 +1791,7 @@ level of ``MultiIndex``, its name or location can be passed to the
.. ipython:: python
- df.resample('M', level='d').sum()
+ df.resample("M", level="d").sum()
.. _timeseries.iterating-label:
@@ -1782,14 +1805,18 @@ natural and functions similarly to :py:func:`itertools.groupby`:
small = pd.Series(
range(6),
- index=pd.to_datetime(['2017-01-01T00:00:00',
- '2017-01-01T00:30:00',
- '2017-01-01T00:31:00',
- '2017-01-01T01:00:00',
- '2017-01-01T03:00:00',
- '2017-01-01T03:05:00'])
+ index=pd.to_datetime(
+ [
+ "2017-01-01T00:00:00",
+ "2017-01-01T00:30:00",
+ "2017-01-01T00:31:00",
+ "2017-01-01T01:00:00",
+ "2017-01-01T03:00:00",
+ "2017-01-01T03:05:00",
+ ]
+ ),
)
- resampled = small.resample('H')
+ resampled = small.resample("H")
for name, group in resampled:
print("Group: ", name)
@@ -1811,9 +1838,9 @@ For example:
.. ipython:: python
- start, end = '2000-10-01 23:30:00', '2000-10-02 00:30:00'
- middle = '2000-10-02 00:00:00'
- rng = pd.date_range(start, end, freq='7min')
+ start, end = "2000-10-01 23:30:00", "2000-10-02 00:30:00"
+ middle = "2000-10-02 00:00:00"
+ rng = pd.date_range(start, end, freq="7min")
ts = pd.Series(np.arange(len(rng)) * 3, index=rng)
ts
@@ -1821,32 +1848,32 @@ Here we can see that, when using ``origin`` with its default value (``'start_day
.. ipython:: python
- ts.resample('17min', origin='start_day').sum()
- ts[middle:end].resample('17min', origin='start_day').sum()
+ ts.resample("17min", origin="start_day").sum()
+ ts[middle:end].resample("17min", origin="start_day").sum()
Here we can see that, when setting ``origin`` to ``'epoch'``, the result after ``'2000-10-02 00:00:00'`` are identical depending on the start of time series:
.. ipython:: python
- ts.resample('17min', origin='epoch').sum()
- ts[middle:end].resample('17min', origin='epoch').sum()
+ ts.resample("17min", origin="epoch").sum()
+ ts[middle:end].resample("17min", origin="epoch").sum()
If needed you can use a custom timestamp for ``origin``:
.. ipython:: python
- ts.resample('17min', origin='2001-01-01').sum()
- ts[middle:end].resample('17min', origin=pd.Timestamp('2001-01-01')).sum()
+ ts.resample("17min", origin="2001-01-01").sum()
+ ts[middle:end].resample("17min", origin=pd.Timestamp("2001-01-01")).sum()
If needed you can just adjust the bins with an ``offset`` Timedelta that would be added to the default ``origin``.
Those two examples are equivalent for this time series:
.. ipython:: python
- ts.resample('17min', origin='start').sum()
- ts.resample('17min', offset='23h30min').sum()
+ ts.resample("17min", origin="start").sum()
+ ts.resample("17min", offset="23h30min").sum()
Note the use of ``'start'`` for ``origin`` on the last example. In that case, ``origin`` will be set to the first value of the timeseries.
@@ -1869,37 +1896,37 @@ Because ``freq`` represents a span of ``Period``, it cannot be negative like "-3
.. ipython:: python
- pd.Period('2012', freq='A-DEC')
+ pd.Period("2012", freq="A-DEC")
- pd.Period('2012-1-1', freq='D')
+ pd.Period("2012-1-1", freq="D")
- pd.Period('2012-1-1 19:00', freq='H')
+ pd.Period("2012-1-1 19:00", freq="H")
- pd.Period('2012-1-1 19:00', freq='5H')
+ pd.Period("2012-1-1 19:00", freq="5H")
Adding and subtracting integers from periods shifts the period by its own
frequency. Arithmetic is not allowed between ``Period`` with different ``freq`` (span).
.. ipython:: python
- p = pd.Period('2012', freq='A-DEC')
+ p = pd.Period("2012", freq="A-DEC")
p + 1
p - 3
- p = pd.Period('2012-01', freq='2M')
+ p = pd.Period("2012-01", freq="2M")
p + 2
p - 1
@okexcept
- p == pd.Period('2012-01', freq='3M')
+ p == pd.Period("2012-01", freq="3M")
If ``Period`` freq is daily or higher (``D``, ``H``, ``T``, ``S``, ``L``, ``U``, ``N``), ``offsets`` and ``timedelta``-like can be added if the result can have the same freq. Otherwise, ``ValueError`` will be raised.
.. ipython:: python
- p = pd.Period('2014-07-01 09:00', freq='H')
+ p = pd.Period("2014-07-01 09:00", freq="H")
p + pd.offsets.Hour(2)
p + datetime.timedelta(minutes=120)
- p + np.timedelta64(7200, 's')
+ p + np.timedelta64(7200, "s")
.. code-block:: ipython
@@ -1912,7 +1939,7 @@ If ``Period`` has other frequencies, only the same ``offsets`` can be added. Oth
.. ipython:: python
- p = pd.Period('2014-07', freq='M')
+ p = pd.Period("2014-07", freq="M")
p + pd.offsets.MonthEnd(3)
.. code-block:: ipython
@@ -1927,7 +1954,7 @@ return the number of frequency units between them:
.. ipython:: python
- pd.Period('2012', freq='A-DEC') - pd.Period('2002', freq='A-DEC')
+ pd.Period("2012", freq="A-DEC") - pd.Period("2002", freq="A-DEC")
PeriodIndex and period_range
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -1936,21 +1963,21 @@ which can be constructed using the ``period_range`` convenience function:
.. ipython:: python
- prng = pd.period_range('1/1/2011', '1/1/2012', freq='M')
+ prng = pd.period_range("1/1/2011", "1/1/2012", freq="M")
prng
The ``PeriodIndex`` constructor can also be used directly:
.. ipython:: python
- pd.PeriodIndex(['2011-1', '2011-2', '2011-3'], freq='M')
+ pd.PeriodIndex(["2011-1", "2011-2", "2011-3"], freq="M")
Passing multiplied frequency outputs a sequence of ``Period`` which
has multiplied span.
.. ipython:: python
- pd.period_range(start='2014-01', freq='3M', periods=4)
+ pd.period_range(start="2014-01", freq="3M", periods=4)
If ``start`` or ``end`` are ``Period`` objects, they will be used as anchor
endpoints for a ``PeriodIndex`` with frequency matching that of the
@@ -1958,8 +1985,9 @@ endpoints for a ``PeriodIndex`` with frequency matching that of the
.. ipython:: python
- pd.period_range(start=pd.Period('2017Q1', freq='Q'),
- end=pd.Period('2017Q2', freq='Q'), freq='M')
+ pd.period_range(
+ start=pd.Period("2017Q1", freq="Q"), end=pd.Period("2017Q2", freq="Q"), freq="M"
+ )
Just like ``DatetimeIndex``, a ``PeriodIndex`` can also be used to index pandas
objects:
@@ -1973,11 +2001,11 @@ objects:
.. ipython:: python
- idx = pd.period_range('2014-07-01 09:00', periods=5, freq='H')
+ idx = pd.period_range("2014-07-01 09:00", periods=5, freq="H")
idx
idx + pd.offsets.Hour(2)
- idx = pd.period_range('2014-07', periods=5, freq='M')
+ idx = pd.period_range("2014-07", periods=5, freq="M")
idx
idx + pd.offsets.MonthEnd(3)
@@ -1996,7 +2024,7 @@ The ``period`` dtype holds the ``freq`` attribute and is represented with
.. ipython:: python
- pi = pd.period_range('2016-01-01', periods=3, freq='M')
+ pi = pd.period_range("2016-01-01", periods=3, freq="M")
pi
pi.dtype
@@ -2007,15 +2035,15 @@ The ``period`` dtype can be used in ``.astype(...)``. It allows one to change th
.. ipython:: python
# change monthly freq to daily freq
- pi.astype('period[D]')
+ pi.astype("period[D]")
# convert to DatetimeIndex
- pi.astype('datetime64[ns]')
+ pi.astype("datetime64[ns]")
# convert to PeriodIndex
- dti = pd.date_range('2011-01-01', freq='M', periods=3)
+ dti = pd.date_range("2011-01-01", freq="M", periods=3)
dti
- dti.astype('period[M]')
+ dti.astype("period[M]")
PeriodIndex partial string indexing
@@ -2029,32 +2057,32 @@ You can pass in dates and strings to ``Series`` and ``DataFrame`` with ``PeriodI
.. ipython:: python
- ps['2011-01']
+ ps["2011-01"]
ps[datetime.datetime(2011, 12, 25):]
- ps['10/31/2011':'12/31/2011']
+ ps["10/31/2011":"12/31/2011"]
Passing a string representing a lower frequency than ``PeriodIndex`` returns partial sliced data.
.. ipython:: python
:okwarning:
- ps['2011']
+ ps["2011"]
- dfp = pd.DataFrame(np.random.randn(600, 1),
- columns=['A'],
- index=pd.period_range('2013-01-01 9:00',
- periods=600,
- freq='T'))
+ dfp = pd.DataFrame(
+ np.random.randn(600, 1),
+ columns=["A"],
+ index=pd.period_range("2013-01-01 9:00", periods=600, freq="T"),
+ )
dfp
- dfp['2013-01-01 10H']
+ dfp["2013-01-01 10H"]
As with ``DatetimeIndex``, the endpoints will be included in the result. The example below slices data starting from 10:00 to 11:59.
.. ipython:: python
- dfp['2013-01-01 10H':'2013-01-01 11H']
+ dfp["2013-01-01 10H":"2013-01-01 11H"]
Frequency conversion and resampling with PeriodIndex
@@ -2064,7 +2092,7 @@ method. Let's start with the fiscal year 2011, ending in December:
.. ipython:: python
- p = pd.Period('2011', freq='A-DEC')
+ p = pd.Period("2011", freq="A-DEC")
p
We can convert it to a monthly frequency. Using the ``how`` parameter, we can
@@ -2072,16 +2100,16 @@ specify whether to return the starting or ending month:
.. ipython:: python
- p.asfreq('M', how='start')
+ p.asfreq("M", how="start")
- p.asfreq('M', how='end')
+ p.asfreq("M", how="end")
The shorthands 's' and 'e' are provided for convenience:
.. ipython:: python
- p.asfreq('M', 's')
- p.asfreq('M', 'e')
+ p.asfreq("M", "s")
+ p.asfreq("M", "e")
Converting to a "super-period" (e.g., annual frequency is a super-period of
quarterly frequency) automatically returns the super-period that includes the
@@ -2089,9 +2117,9 @@ input period:
.. ipython:: python
- p = pd.Period('2011-12', freq='M')
+ p = pd.Period("2011-12", freq="M")
- p.asfreq('A-NOV')
+ p.asfreq("A-NOV")
Note that since we converted to an annual frequency that ends the year in
November, the monthly period of December 2011 is actually in the 2012 A-NOV
@@ -2110,21 +2138,21 @@ frequencies ``Q-JAN`` through ``Q-DEC``.
.. ipython:: python
- p = pd.Period('2012Q1', freq='Q-DEC')
+ p = pd.Period("2012Q1", freq="Q-DEC")
- p.asfreq('D', 's')
+ p.asfreq("D", "s")
- p.asfreq('D', 'e')
+ p.asfreq("D", "e")
``Q-MAR`` defines fiscal year end in March:
.. ipython:: python
- p = pd.Period('2011Q4', freq='Q-MAR')
+ p = pd.Period("2011Q4", freq="Q-MAR")
- p.asfreq('D', 's')
+ p.asfreq("D", "s")
- p.asfreq('D', 'e')
+ p.asfreq("D", "e")
.. _timeseries.interchange:
@@ -2136,7 +2164,7 @@ and vice-versa using ``to_timestamp``:
.. ipython:: python
- rng = pd.date_range('1/1/2012', periods=5, freq='M')
+ rng = pd.date_range("1/1/2012", periods=5, freq="M")
ts = pd.Series(np.random.randn(len(rng)), index=rng)
@@ -2153,7 +2181,7 @@ end of the period:
.. ipython:: python
- ps.to_timestamp('D', how='s')
+ ps.to_timestamp("D", how="s")
Converting between period and timestamp enables some convenient arithmetic
functions to be used. In the following example, we convert a quarterly
@@ -2162,11 +2190,11 @@ the quarter end:
.. ipython:: python
- prng = pd.period_range('1990Q1', '2000Q4', freq='Q-NOV')
+ prng = pd.period_range("1990Q1", "2000Q4", freq="Q-NOV")
ts = pd.Series(np.random.randn(len(prng)), prng)
- ts.index = (prng.asfreq('M', 'e') + 1).asfreq('H', 's') + 9
+ ts.index = (prng.asfreq("M", "e") + 1).asfreq("H", "s") + 9
ts.head()
@@ -2180,7 +2208,7 @@ then you can use a ``PeriodIndex`` and/or ``Series`` of ``Periods`` to do comput
.. ipython:: python
- span = pd.period_range('1215-01-01', '1381-01-01', freq='D')
+ span = pd.period_range("1215-01-01", "1381-01-01", freq="D")
span
To convert from an ``int64`` based YYYYMMDD representation.
@@ -2190,9 +2218,10 @@ To convert from an ``int64`` based YYYYMMDD representation.
s = pd.Series([20121231, 20141130, 99991231])
s
+
def conv(x):
- return pd.Period(year=x // 10000, month=x // 100 % 100,
- day=x % 100, freq='D')
+ return pd.Period(year=x // 10000, month=x // 100 % 100, day=x % 100, freq="D")
+
s.apply(conv)
s.apply(conv)[2]
@@ -2221,7 +2250,7 @@ By default, pandas objects are time zone unaware:
.. ipython:: python
- rng = pd.date_range('3/6/2012 00:00', periods=15, freq='D')
+ rng = pd.date_range("3/6/2012 00:00", periods=15, freq="D")
rng.tz is None
To localize these dates to a time zone (assign a particular time zone to a naive date),
@@ -2241,18 +2270,16 @@ To return ``dateutil`` time zone objects, append ``dateutil/`` before the string
import dateutil
# pytz
- rng_pytz = pd.date_range('3/6/2012 00:00', periods=3, freq='D',
- tz='Europe/London')
+ rng_pytz = pd.date_range("3/6/2012 00:00", periods=3, freq="D", tz="Europe/London")
rng_pytz.tz
# dateutil
- rng_dateutil = pd.date_range('3/6/2012 00:00', periods=3, freq='D')
- rng_dateutil = rng_dateutil.tz_localize('dateutil/Europe/London')
+ rng_dateutil = pd.date_range("3/6/2012 00:00", periods=3, freq="D")
+ rng_dateutil = rng_dateutil.tz_localize("dateutil/Europe/London")
rng_dateutil.tz
# dateutil - utc special case
- rng_utc = pd.date_range('3/6/2012 00:00', periods=3, freq='D',
- tz=dateutil.tz.tzutc())
+ rng_utc = pd.date_range("3/6/2012 00:00", periods=3, freq="D", tz=dateutil.tz.tzutc())
rng_utc.tz
.. versionadded:: 0.25.0
@@ -2260,8 +2287,7 @@ To return ``dateutil`` time zone objects, append ``dateutil/`` before the string
.. ipython:: python
# datetime.timezone
- rng_utc = pd.date_range('3/6/2012 00:00', periods=3, freq='D',
- tz=datetime.timezone.utc)
+ rng_utc = pd.date_range("3/6/2012 00:00", periods=3, freq="D", tz=datetime.timezone.utc)
rng_utc.tz
Note that the ``UTC`` time zone is a special case in ``dateutil`` and should be constructed explicitly
@@ -2273,15 +2299,14 @@ zones objects explicitly first.
import pytz
# pytz
- tz_pytz = pytz.timezone('Europe/London')
- rng_pytz = pd.date_range('3/6/2012 00:00', periods=3, freq='D')
+ tz_pytz = pytz.timezone("Europe/London")
+ rng_pytz = pd.date_range("3/6/2012 00:00", periods=3, freq="D")
rng_pytz = rng_pytz.tz_localize(tz_pytz)
rng_pytz.tz == tz_pytz
# dateutil
- tz_dateutil = dateutil.tz.gettz('Europe/London')
- rng_dateutil = pd.date_range('3/6/2012 00:00', periods=3, freq='D',
- tz=tz_dateutil)
+ tz_dateutil = dateutil.tz.gettz("Europe/London")
+ rng_dateutil = pd.date_range("3/6/2012 00:00", periods=3, freq="D", tz=tz_dateutil)
rng_dateutil.tz == tz_dateutil
To convert a time zone aware pandas object from one time zone to another,
@@ -2289,7 +2314,7 @@ you can use the ``tz_convert`` method.
.. ipython:: python
- rng_pytz.tz_convert('US/Eastern')
+ rng_pytz.tz_convert("US/Eastern")
.. note::
@@ -2301,9 +2326,9 @@ you can use the ``tz_convert`` method.
.. ipython:: python
- dti = pd.date_range('2019-01-01', periods=3, freq='D', tz='US/Pacific')
+ dti = pd.date_range("2019-01-01", periods=3, freq="D", tz="US/Pacific")
dti.tz
- ts = pd.Timestamp('2019-01-01', tz='US/Pacific')
+ ts = pd.Timestamp("2019-01-01", tz="US/Pacific")
ts.tz
.. warning::
@@ -2344,11 +2369,11 @@ you can use the ``tz_convert`` method.
.. ipython:: python
- d_2037 = '2037-03-31T010101'
- d_2038 = '2038-03-31T010101'
- DST = 'Europe/London'
- assert pd.Timestamp(d_2037, tz=DST) != pd.Timestamp(d_2037, tz='GMT')
- assert pd.Timestamp(d_2038, tz=DST) == pd.Timestamp(d_2038, tz='GMT')
+ d_2037 = "2037-03-31T010101"
+ d_2038 = "2038-03-31T010101"
+ DST = "Europe/London"
+ assert pd.Timestamp(d_2037, tz=DST) != pd.Timestamp(d_2037, tz="GMT")
+ assert pd.Timestamp(d_2038, tz=DST) == pd.Timestamp(d_2038, tz="GMT")
Under the hood, all timestamps are stored in UTC. Values from a time zone aware
:class:`DatetimeIndex` or :class:`Timestamp` will have their fields (day, hour, minute, etc.)
@@ -2357,8 +2382,8 @@ still considered to be equal even if they are in different time zones:
.. ipython:: python
- rng_eastern = rng_utc.tz_convert('US/Eastern')
- rng_berlin = rng_utc.tz_convert('Europe/Berlin')
+ rng_eastern = rng_utc.tz_convert("US/Eastern")
+ rng_berlin = rng_utc.tz_convert("Europe/Berlin")
rng_eastern[2]
rng_berlin[2]
@@ -2369,9 +2394,9 @@ Operations between :class:`Series` in different time zones will yield UTC
.. ipython:: python
- ts_utc = pd.Series(range(3), pd.date_range('20130101', periods=3, tz='UTC'))
- eastern = ts_utc.tz_convert('US/Eastern')
- berlin = ts_utc.tz_convert('Europe/Berlin')
+ ts_utc = pd.Series(range(3), pd.date_range("20130101", periods=3, tz="UTC"))
+ eastern = ts_utc.tz_convert("US/Eastern")
+ berlin = ts_utc.tz_convert("Europe/Berlin")
result = eastern + berlin
result
result.index
@@ -2382,14 +2407,13 @@ To remove time zone information, use ``tz_localize(None)`` or ``tz_convert(None)
.. ipython:: python
- didx = pd.date_range(start='2014-08-01 09:00', freq='H',
- periods=3, tz='US/Eastern')
+ didx = pd.date_range(start="2014-08-01 09:00", freq="H", periods=3, tz="US/Eastern")
didx
didx.tz_localize(None)
didx.tz_convert(None)
# tz_convert(None) is identical to tz_convert('UTC').tz_localize(None)
- didx.tz_convert('UTC').tz_localize(None)
+ didx.tz_convert("UTC").tz_localize(None)
.. _timeseries.fold:
@@ -2415,10 +2439,12 @@ control over how they are handled.
.. ipython:: python
- pd.Timestamp(datetime.datetime(2019, 10, 27, 1, 30, 0, 0),
- tz='dateutil/Europe/London', fold=0)
- pd.Timestamp(year=2019, month=10, day=27, hour=1, minute=30,
- tz='dateutil/Europe/London', fold=1)
+ pd.Timestamp(
+ datetime.datetime(2019, 10, 27, 1, 30, 0, 0), tz="dateutil/Europe/London", fold=0
+ )
+ pd.Timestamp(
+ year=2019, month=10, day=27, hour=1, minute=30, tz="dateutil/Europe/London", fold=1
+ )
.. _timeseries.timezone_ambiguous:
@@ -2436,8 +2462,9 @@ twice within one day ("clocks fall back"). The following options are available:
.. ipython:: python
- rng_hourly = pd.DatetimeIndex(['11/06/2011 00:00', '11/06/2011 01:00',
- '11/06/2011 01:00', '11/06/2011 02:00'])
+ rng_hourly = pd.DatetimeIndex(
+ ["11/06/2011 00:00", "11/06/2011 01:00", "11/06/2011 01:00", "11/06/2011 02:00"]
+ )
This will fail as there are ambiguous times (``'11/06/2011 01:00'``)
@@ -2450,9 +2477,9 @@ Handle these ambiguous times by specifying the following.
.. ipython:: python
- rng_hourly.tz_localize('US/Eastern', ambiguous='infer')
- rng_hourly.tz_localize('US/Eastern', ambiguous='NaT')
- rng_hourly.tz_localize('US/Eastern', ambiguous=[True, True, False, False])
+ rng_hourly.tz_localize("US/Eastern", ambiguous="infer")
+ rng_hourly.tz_localize("US/Eastern", ambiguous="NaT")
+ rng_hourly.tz_localize("US/Eastern", ambiguous=[True, True, False, False])
.. _timeseries.timezone_nonexistent:
@@ -2471,7 +2498,7 @@ can be controlled by the ``nonexistent`` argument. The following options are ava
.. ipython:: python
- dti = pd.date_range(start='2015-03-29 02:30:00', periods=3, freq='H')
+ dti = pd.date_range(start="2015-03-29 02:30:00", periods=3, freq="H")
# 2:30 is a nonexistent time
Localization of nonexistent times will raise an error by default.
@@ -2486,10 +2513,10 @@ Transform nonexistent times to ``NaT`` or shift the times.
.. ipython:: python
dti
- dti.tz_localize('Europe/Warsaw', nonexistent='shift_forward')
- dti.tz_localize('Europe/Warsaw', nonexistent='shift_backward')
- dti.tz_localize('Europe/Warsaw', nonexistent=pd.Timedelta(1, unit='H'))
- dti.tz_localize('Europe/Warsaw', nonexistent='NaT')
+ dti.tz_localize("Europe/Warsaw", nonexistent="shift_forward")
+ dti.tz_localize("Europe/Warsaw", nonexistent="shift_backward")
+ dti.tz_localize("Europe/Warsaw", nonexistent=pd.Timedelta(1, unit="H"))
+ dti.tz_localize("Europe/Warsaw", nonexistent="NaT")
.. _timeseries.timezone_series:
@@ -2502,7 +2529,7 @@ represented with a dtype of ``datetime64[ns]``.
.. ipython:: python
- s_naive = pd.Series(pd.date_range('20130101', periods=3))
+ s_naive = pd.Series(pd.date_range("20130101", periods=3))
s_naive
A :class:`Series` with a time zone **aware** values is
@@ -2510,7 +2537,7 @@ represented with a dtype of ``datetime64[ns, tz]`` where ``tz`` is the time zone
.. ipython:: python
- s_aware = pd.Series(pd.date_range('20130101', periods=3, tz='US/Eastern'))
+ s_aware = pd.Series(pd.date_range("20130101", periods=3, tz="US/Eastern"))
s_aware
Both of these :class:`Series` time zone information
@@ -2520,7 +2547,7 @@ For example, to localize and convert a naive stamp to time zone aware.
.. ipython:: python
- s_naive.dt.tz_localize('UTC').dt.tz_convert('US/Eastern')
+ s_naive.dt.tz_localize("UTC").dt.tz_convert("US/Eastern")
Time zone information can also be manipulated using the ``astype`` method.
This method can localize and convert time zone naive timestamps or
@@ -2529,13 +2556,13 @@ convert time zone aware timestamps.
.. ipython:: python
# localize and convert a naive time zone
- s_naive.astype('datetime64[ns, US/Eastern]')
+ s_naive.astype("datetime64[ns, US/Eastern]")
# make an aware tz naive
- s_aware.astype('datetime64[ns]')
+ s_aware.astype("datetime64[ns]")
# convert to a new time zone
- s_aware.astype('datetime64[ns, CET]')
+ s_aware.astype("datetime64[ns, CET]")
.. note::
@@ -2561,4 +2588,4 @@ convert time zone aware timestamps.
.. ipython:: python
- s_aware.to_numpy(dtype='datetime64[ns]')
+ s_aware.to_numpy(dtype="datetime64[ns]")
| Addresses part of #36777
Ran blacken-tools and checked for warnings from flake8-rst for 5 additional doc files. | https://api.github.com/repos/pandas-dev/pandas/pulls/36823 | 2020-10-03T00:03:01Z | 2020-10-03T14:30:49Z | 2020-10-03T14:30:49Z | 2020-10-03T15:00:39Z |
BUG: Raise ValueError with nan in timeaware windows | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index d9d1ce797dd62..339cda4db48fb 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -460,6 +460,7 @@ Groupby/resample/rolling
- Bug in :meth:`DataFrameGroupBy.ffill` and :meth:`DataFrameGroupBy.bfill` where a ``NaN`` group would return filled values instead of ``NaN`` when ``dropna=True`` (:issue:`34725`)
- Bug in :meth:`RollingGroupby.count` where a ``ValueError`` was raised when specifying the ``closed`` parameter (:issue:`35869`)
- Bug in :meth:`DataFrame.groupby.rolling` returning wrong values with partial centered window (:issue:`36040`).
+- Bug in :meth:`DataFrameGroupBy.rolling` returned wrong values with timeaware window containing ``NaN``. Raises ``ValueError`` because windows are not monotonic now (:issue:`34617`)
Reshaping
^^^^^^^^^
diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py
index cc0927437ad1d..fdeb91d37724d 100644
--- a/pandas/core/window/rolling.py
+++ b/pandas/core/window/rolling.py
@@ -2085,10 +2085,13 @@ def _validate_monotonic(self):
Validate monotonic (increasing or decreasing).
"""
if not (self._on.is_monotonic_increasing or self._on.is_monotonic_decreasing):
- formatted = self.on
- if self.on is None:
- formatted = "index"
- raise ValueError(f"{formatted} must be monotonic")
+ self._raise_monotonic_error()
+
+ def _raise_monotonic_error(self):
+ formatted = self.on
+ if self.on is None:
+ formatted = "index"
+ raise ValueError(f"{formatted} must be monotonic")
def _validate_freq(self):
"""
@@ -2323,3 +2326,12 @@ def _get_window_indexer(self, window: int) -> GroupbyIndexer:
indexer_kwargs=indexer_kwargs,
)
return window_indexer
+
+ def _validate_monotonic(self):
+ """
+ Validate that on is monotonic;
+ in this case we have to check only for nans, because
+ monotonicy was already validated at a higher level.
+ """
+ if self._on.hasnans:
+ self._raise_monotonic_error()
diff --git a/pandas/tests/window/test_grouper.py b/pandas/tests/window/test_grouper.py
index 034f941462bb5..27dd6a1591317 100644
--- a/pandas/tests/window/test_grouper.py
+++ b/pandas/tests/window/test_grouper.py
@@ -549,3 +549,20 @@ def test_groupby_rolling_sem(self, func, kwargs):
),
)
tm.assert_frame_equal(result, expected)
+
+ @pytest.mark.parametrize(
+ ("rollings", "key"), [({"on": "a"}, "a"), ({"on": None}, "index")]
+ )
+ def test_groupby_rolling_nans_in_index(self, rollings, key):
+ # GH: 34617
+ df = pd.DataFrame(
+ {
+ "a": pd.to_datetime(["2020-06-01 12:00", "2020-06-01 14:00", np.nan]),
+ "b": [1, 2, 3],
+ "c": [1, 1, 1],
+ }
+ )
+ if key == "index":
+ df = df.set_index("a")
+ with pytest.raises(ValueError, match=f"{key} must be monotonic"):
+ df.groupby("c").rolling("60min", **rollings)
| - [x] closes #34617
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
Raising a ValueError when NaN is in timeaware window.
cc @mroeschke | https://api.github.com/repos/pandas-dev/pandas/pulls/36822 | 2020-10-02T23:47:22Z | 2020-10-15T00:28:32Z | 2020-10-15T00:28:31Z | 2020-10-15T18:54:02Z |
DOC: update code style for development doc and user guide #36777 | diff --git a/doc/source/development/extending.rst b/doc/source/development/extending.rst
index c708ebb361ed1..46960140d3a8c 100644
--- a/doc/source/development/extending.rst
+++ b/doc/source/development/extending.rst
@@ -34,7 +34,7 @@ decorate a class, providing the name of attribute to add. The class's
@staticmethod
def _validate(obj):
# verify there is a column latitude and a column longitude
- if 'latitude' not in obj.columns or 'longitude' not in obj.columns:
+ if "latitude" not in obj.columns or "longitude" not in obj.columns:
raise AttributeError("Must have 'latitude' and 'longitude'.")
@property
@@ -176,6 +176,7 @@ your ``MyExtensionArray`` class, as follows:
from pandas.api.extensions import ExtensionArray, ExtensionScalarOpsMixin
+
class MyExtensionArray(ExtensionArray, ExtensionScalarOpsMixin):
pass
@@ -271,6 +272,7 @@ included as a column in a pandas DataFrame):
def __arrow_array__(self, type=None):
# convert the underlying array values to a pyarrow Array
import pyarrow
+
return pyarrow.array(..., type=type)
The ``ExtensionDtype.__from_arrow__`` method then controls the conversion
@@ -347,7 +349,6 @@ Below example shows how to define ``SubclassedSeries`` and ``SubclassedDataFrame
.. code-block:: python
class SubclassedSeries(pd.Series):
-
@property
def _constructor(self):
return SubclassedSeries
@@ -358,7 +359,6 @@ Below example shows how to define ``SubclassedSeries`` and ``SubclassedDataFrame
class SubclassedDataFrame(pd.DataFrame):
-
@property
def _constructor(self):
return SubclassedDataFrame
@@ -377,7 +377,7 @@ Below example shows how to define ``SubclassedSeries`` and ``SubclassedDataFrame
>>> type(to_framed)
<class '__main__.SubclassedDataFrame'>
- >>> df = SubclassedDataFrame({'A': [1, 2, 3], 'B': [4, 5, 6], 'C': [7, 8, 9]})
+ >>> df = SubclassedDataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]})
>>> df
A B C
0 1 4 7
@@ -387,7 +387,7 @@ Below example shows how to define ``SubclassedSeries`` and ``SubclassedDataFrame
>>> type(df)
<class '__main__.SubclassedDataFrame'>
- >>> sliced1 = df[['A', 'B']]
+ >>> sliced1 = df[["A", "B"]]
>>> sliced1
A B
0 1 4
@@ -397,7 +397,7 @@ Below example shows how to define ``SubclassedSeries`` and ``SubclassedDataFrame
>>> type(sliced1)
<class '__main__.SubclassedDataFrame'>
- >>> sliced2 = df['A']
+ >>> sliced2 = df["A"]
>>> sliced2
0 1
1 2
@@ -422,11 +422,11 @@ Below is an example to define two original properties, "internal_cache" as a tem
class SubclassedDataFrame2(pd.DataFrame):
# temporary properties
- _internal_names = pd.DataFrame._internal_names + ['internal_cache']
+ _internal_names = pd.DataFrame._internal_names + ["internal_cache"]
_internal_names_set = set(_internal_names)
# normal properties
- _metadata = ['added_property']
+ _metadata = ["added_property"]
@property
def _constructor(self):
@@ -434,15 +434,15 @@ Below is an example to define two original properties, "internal_cache" as a tem
.. code-block:: python
- >>> df = SubclassedDataFrame2({'A': [1, 2, 3], 'B': [4, 5, 6], 'C': [7, 8, 9]})
+ >>> df = SubclassedDataFrame2({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]})
>>> df
A B C
0 1 4 7
1 2 5 8
2 3 6 9
- >>> df.internal_cache = 'cached'
- >>> df.added_property = 'property'
+ >>> df.internal_cache = "cached"
+ >>> df.added_property = "property"
>>> df.internal_cache
cached
@@ -450,11 +450,11 @@ Below is an example to define two original properties, "internal_cache" as a tem
property
# properties defined in _internal_names is reset after manipulation
- >>> df[['A', 'B']].internal_cache
+ >>> df[["A", "B"]].internal_cache
AttributeError: 'SubclassedDataFrame2' object has no attribute 'internal_cache'
# properties defined in _metadata are retained
- >>> df[['A', 'B']].added_property
+ >>> df[["A", "B"]].added_property
property
.. _extending.plotting-backends:
@@ -468,7 +468,7 @@ one based on Matplotlib. For example:
.. code-block:: python
- >>> pd.set_option('plotting.backend', 'backend.module')
+ >>> pd.set_option("plotting.backend", "backend.module")
>>> pd.Series([1, 2, 3]).plot()
This would be more or less equivalent to:
diff --git a/doc/source/user_guide/computation.rst b/doc/source/user_guide/computation.rst
index e7edda90610b5..2f6ac6b06d85e 100644
--- a/doc/source/user_guide/computation.rst
+++ b/doc/source/user_guide/computation.rst
@@ -63,8 +63,7 @@ series in the DataFrame, also excluding NA/null values.
.. ipython:: python
- frame = pd.DataFrame(np.random.randn(1000, 5),
- columns=['a', 'b', 'c', 'd', 'e'])
+ frame = pd.DataFrame(np.random.randn(1000, 5), columns=["a", "b", "c", "d", "e"])
frame.cov()
``DataFrame.cov`` also supports an optional ``min_periods`` keyword that
@@ -73,9 +72,9 @@ in order to have a valid result.
.. ipython:: python
- frame = pd.DataFrame(np.random.randn(20, 3), columns=['a', 'b', 'c'])
- frame.loc[frame.index[:5], 'a'] = np.nan
- frame.loc[frame.index[5:10], 'b'] = np.nan
+ frame = pd.DataFrame(np.random.randn(20, 3), columns=["a", "b", "c"])
+ frame.loc[frame.index[:5], "a"] = np.nan
+ frame.loc[frame.index[5:10], "b"] = np.nan
frame.cov()
@@ -116,13 +115,12 @@ Wikipedia has articles covering the above correlation coefficients:
.. ipython:: python
- frame = pd.DataFrame(np.random.randn(1000, 5),
- columns=['a', 'b', 'c', 'd', 'e'])
+ frame = pd.DataFrame(np.random.randn(1000, 5), columns=["a", "b", "c", "d", "e"])
frame.iloc[::2] = np.nan
# Series with Series
- frame['a'].corr(frame['b'])
- frame['a'].corr(frame['b'], method='spearman')
+ frame["a"].corr(frame["b"])
+ frame["a"].corr(frame["b"], method="spearman")
# Pairwise correlation of DataFrame columns
frame.corr()
@@ -134,9 +132,9 @@ Like ``cov``, ``corr`` also supports the optional ``min_periods`` keyword:
.. ipython:: python
- frame = pd.DataFrame(np.random.randn(20, 3), columns=['a', 'b', 'c'])
- frame.loc[frame.index[:5], 'a'] = np.nan
- frame.loc[frame.index[5:10], 'b'] = np.nan
+ frame = pd.DataFrame(np.random.randn(20, 3), columns=["a", "b", "c"])
+ frame.loc[frame.index[:5], "a"] = np.nan
+ frame.loc[frame.index[5:10], "b"] = np.nan
frame.corr()
@@ -154,8 +152,8 @@ compute the correlation based on histogram intersection:
# histogram intersection
def histogram_intersection(a, b):
- return np.minimum(np.true_divide(a, a.sum()),
- np.true_divide(b, b.sum())).sum()
+ return np.minimum(np.true_divide(a, a.sum()), np.true_divide(b, b.sum())).sum()
+
frame.corr(method=histogram_intersection)
@@ -165,8 +163,8 @@ DataFrame objects.
.. ipython:: python
- index = ['a', 'b', 'c', 'd', 'e']
- columns = ['one', 'two', 'three', 'four']
+ index = ["a", "b", "c", "d", "e"]
+ columns = ["one", "two", "three", "four"]
df1 = pd.DataFrame(np.random.randn(5, 4), index=index, columns=columns)
df2 = pd.DataFrame(np.random.randn(4, 4), index=index[:4], columns=columns)
df1.corrwith(df2)
@@ -182,8 +180,8 @@ assigned the mean of the ranks (by default) for the group:
.. ipython:: python
- s = pd.Series(np.random.randn(5), index=list('abcde'))
- s['d'] = s['b'] # so there's a tie
+ s = pd.Series(np.random.randn(5), index=list("abcde"))
+ s["d"] = s["b"] # so there's a tie
s.rank()
:meth:`~DataFrame.rank` is also a DataFrame method and can rank either the rows
@@ -243,8 +241,7 @@ objects, :class:`~pandas.core.window.Rolling`, :class:`~pandas.core.window.Expan
.. ipython:: python
- s = pd.Series(np.random.randn(1000),
- index=pd.date_range('1/1/2000', periods=1000))
+ s = pd.Series(np.random.randn(1000), index=pd.date_range("1/1/2000", periods=1000))
s = s.cumsum()
s
@@ -279,24 +276,26 @@ We can then call methods on these ``rolling`` objects. These return like-indexed
.. ipython:: python
- s.plot(style='k--')
+ s.plot(style="k--")
@savefig rolling_mean_ex.png
- r.mean().plot(style='k')
+ r.mean().plot(style="k")
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
They can also be applied to DataFrame objects. This is really just syntactic
sugar for applying the moving window operator to all of the DataFrame's columns:
.. ipython:: python
- df = pd.DataFrame(np.random.randn(1000, 4),
- index=pd.date_range('1/1/2000', periods=1000),
- columns=['A', 'B', 'C', 'D'])
+ df = pd.DataFrame(
+ np.random.randn(1000, 4),
+ index=pd.date_range("1/1/2000", periods=1000),
+ columns=["A", "B", "C", "D"],
+ )
df = df.cumsum()
@savefig rolling_mean_frame.png
@@ -368,7 +367,7 @@ compute the mean absolute deviation on a rolling basis:
return np.fabs(x - x.mean()).mean()
@savefig rolling_apply_ex.png
- s.rolling(window=60).apply(mad, raw=True).plot(style='k')
+ s.rolling(window=60).apply(mad, raw=True).plot(style="k")
Using the Numba engine
~~~~~~~~~~~~~~~~~~~~~~
@@ -453,23 +452,22 @@ The list of recognized types are the `scipy.signal window functions
.. ipython:: python
- ser = pd.Series(np.random.randn(10),
- index=pd.date_range('1/1/2000', periods=10))
+ ser = pd.Series(np.random.randn(10), index=pd.date_range("1/1/2000", periods=10))
- ser.rolling(window=5, win_type='triang').mean()
+ ser.rolling(window=5, win_type="triang").mean()
Note that the ``boxcar`` window is equivalent to :meth:`~Rolling.mean`.
.. ipython:: python
- ser.rolling(window=5, win_type='boxcar').mean()
+ ser.rolling(window=5, win_type="boxcar").mean()
ser.rolling(window=5).mean()
For some windowing functions, additional parameters must be specified:
.. ipython:: python
- ser.rolling(window=5, win_type='gaussian').mean(std=0.1)
+ ser.rolling(window=5, win_type="gaussian").mean(std=0.1)
.. _stats.moments.normalization:
@@ -498,10 +496,10 @@ This can be particularly useful for a non-regular time frequency index.
.. ipython:: python
- dft = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]},
- index=pd.date_range('20130101 09:00:00',
- periods=5,
- freq='s'))
+ dft = pd.DataFrame(
+ {"B": [0, 1, 2, np.nan, 4]},
+ index=pd.date_range("20130101 09:00:00", periods=5, freq="s"),
+ )
dft
This is a regular frequency index. Using an integer window parameter works to roll along the window frequency.
@@ -515,20 +513,26 @@ Specifying an offset allows a more intuitive specification of the rolling freque
.. ipython:: python
- dft.rolling('2s').sum()
+ dft.rolling("2s").sum()
Using a non-regular, but still monotonic index, rolling with an integer window does not impart any special calculation.
.. ipython:: python
- dft = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]},
- index=pd.Index([pd.Timestamp('20130101 09:00:00'),
- pd.Timestamp('20130101 09:00:02'),
- pd.Timestamp('20130101 09:00:03'),
- pd.Timestamp('20130101 09:00:05'),
- pd.Timestamp('20130101 09:00:06')],
- name='foo'))
+ dft = pd.DataFrame(
+ {"B": [0, 1, 2, np.nan, 4]},
+ index=pd.Index(
+ [
+ pd.Timestamp("20130101 09:00:00"),
+ pd.Timestamp("20130101 09:00:02"),
+ pd.Timestamp("20130101 09:00:03"),
+ pd.Timestamp("20130101 09:00:05"),
+ pd.Timestamp("20130101 09:00:06"),
+ ],
+ name="foo",
+ ),
+ )
dft
dft.rolling(2).sum()
@@ -537,7 +541,7 @@ Using the time-specification generates variable windows for this sparse data.
.. ipython:: python
- dft.rolling('2s').sum()
+ dft.rolling("2s").sum()
Furthermore, we now allow an optional ``on`` parameter to specify a column (rather than the
default of the index) in a DataFrame.
@@ -546,7 +550,7 @@ default of the index) in a DataFrame.
dft = dft.reset_index()
dft
- dft.rolling('2s', on='foo').sum()
+ dft.rolling("2s", on="foo").sum()
.. _stats.custom_rolling_window:
@@ -569,7 +573,7 @@ For example, if we have the following ``DataFrame``:
use_expanding = [True, False, True, False, True]
use_expanding
- df = pd.DataFrame({'values': range(5)})
+ df = pd.DataFrame({"values": range(5)})
df
and we want to use an expanding window where ``use_expanding`` is ``True`` otherwise a window of size
@@ -615,7 +619,8 @@ rolling operations over a non-fixed offset like a ``BusinessDay``.
.. ipython:: python
from pandas.api.indexers import VariableOffsetWindowIndexer
- df = pd.DataFrame(range(10), index=pd.date_range('2020', periods=10))
+
+ df = pd.DataFrame(range(10), index=pd.date_range("2020", periods=10))
offset = pd.offsets.BDay(1)
indexer = VariableOffsetWindowIndexer(index=df.index, offset=offset)
df
@@ -657,17 +662,21 @@ from present information back to past information. This allows the rolling windo
.. ipython:: python
- df = pd.DataFrame({'x': 1},
- index=[pd.Timestamp('20130101 09:00:01'),
- pd.Timestamp('20130101 09:00:02'),
- pd.Timestamp('20130101 09:00:03'),
- pd.Timestamp('20130101 09:00:04'),
- pd.Timestamp('20130101 09:00:06')])
-
- df["right"] = df.rolling('2s', closed='right').x.sum() # default
- df["both"] = df.rolling('2s', closed='both').x.sum()
- df["left"] = df.rolling('2s', closed='left').x.sum()
- df["neither"] = df.rolling('2s', closed='neither').x.sum()
+ df = pd.DataFrame(
+ {"x": 1},
+ index=[
+ pd.Timestamp("20130101 09:00:01"),
+ pd.Timestamp("20130101 09:00:02"),
+ pd.Timestamp("20130101 09:00:03"),
+ pd.Timestamp("20130101 09:00:04"),
+ pd.Timestamp("20130101 09:00:06"),
+ ],
+ )
+
+ df["right"] = df.rolling("2s", closed="right").x.sum() # default
+ df["both"] = df.rolling("2s", closed="both").x.sum()
+ df["left"] = df.rolling("2s", closed="left").x.sum()
+ df["neither"] = df.rolling("2s", closed="neither").x.sum()
df
@@ -745,13 +754,15 @@ For example:
.. ipython:: python
- df = pd.DataFrame(np.random.randn(1000, 4),
- index=pd.date_range('1/1/2000', periods=1000),
- columns=['A', 'B', 'C', 'D'])
+ df = pd.DataFrame(
+ np.random.randn(1000, 4),
+ index=pd.date_range("1/1/2000", periods=1000),
+ columns=["A", "B", "C", "D"],
+ )
df = df.cumsum()
df2 = df[:20]
- df2.rolling(window=5).corr(df2['B'])
+ df2.rolling(window=5).corr(df2["B"])
.. _stats.moments.corr_pairwise:
@@ -776,14 +787,13 @@ can even be omitted:
.. ipython:: python
- covs = (df[['B', 'C', 'D']].rolling(window=50)
- .cov(df[['A', 'B', 'C']], pairwise=True))
- covs.loc['2002-09-22':]
+ covs = df[["B", "C", "D"]].rolling(window=50).cov(df[["A", "B", "C"]], pairwise=True)
+ covs.loc["2002-09-22":]
.. ipython:: python
correls = df.rolling(window=50).corr()
- correls.loc['2002-09-22':]
+ correls.loc["2002-09-22":]
You can efficiently retrieve the time series of correlations between two
columns by reshaping and indexing:
@@ -791,12 +801,12 @@ columns by reshaping and indexing:
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
.. ipython:: python
@savefig rolling_corr_pairwise_ex.png
- correls.unstack(1)[('A', 'C')].plot()
+ correls.unstack(1)[("A", "C")].plot()
.. _stats.aggregate:
@@ -810,9 +820,11 @@ perform multiple computations on the data. These operations are similar to the :
.. ipython:: python
- dfa = pd.DataFrame(np.random.randn(1000, 3),
- index=pd.date_range('1/1/2000', periods=1000),
- columns=['A', 'B', 'C'])
+ dfa = pd.DataFrame(
+ np.random.randn(1000, 3),
+ index=pd.date_range("1/1/2000", periods=1000),
+ columns=["A", "B", "C"],
+ )
r = dfa.rolling(window=60, min_periods=1)
r
@@ -823,9 +835,9 @@ Series (or multiple Series) via standard ``__getitem__``.
r.aggregate(np.sum)
- r['A'].aggregate(np.sum)
+ r["A"].aggregate(np.sum)
- r[['A', 'B']].aggregate(np.sum)
+ r[["A", "B"]].aggregate(np.sum)
As you can see, the result of the aggregation will have the selected columns, or all
columns if none are selected.
@@ -840,7 +852,7 @@ aggregation with, outputting a DataFrame:
.. ipython:: python
- r['A'].agg([np.sum, np.mean, np.std])
+ r["A"].agg([np.sum, np.mean, np.std])
On a windowed DataFrame, you can pass a list of functions to apply to each
column, which produces an aggregated result with a hierarchical index:
@@ -860,20 +872,20 @@ columns of a ``DataFrame``:
.. ipython:: python
- r.agg({'A': np.sum, 'B': lambda x: np.std(x, ddof=1)})
+ r.agg({"A": np.sum, "B": lambda x: np.std(x, ddof=1)})
The function names can also be strings. In order for a string to be valid it
must be implemented on the windowed object
.. ipython:: python
- r.agg({'A': 'sum', 'B': 'std'})
+ r.agg({"A": "sum", "B": "std"})
Furthermore you can pass a nested dict to indicate different aggregations on different columns.
.. ipython:: python
- r.agg({'A': ['sum', 'std'], 'B': ['mean', 'std']})
+ r.agg({"A": ["sum", "std"], "B": ["mean", "std"]})
.. _stats.moments.expanding:
@@ -967,7 +979,7 @@ all accept are:
sn.expanding().sum()
sn.cumsum()
- sn.cumsum().fillna(method='ffill')
+ sn.cumsum().fillna(method="ffill")
An expanding window statistic will be more stable (and less responsive) than
@@ -978,14 +990,14 @@ relative impact of an individual data point. As an example, here is the
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
.. ipython:: python
- s.plot(style='k--')
+ s.plot(style="k--")
@savefig expanding_mean_frame.png
- s.expanding().mean().plot(style='k')
+ s.expanding().mean().plot(style="k")
.. _stats.moments.exponentially_weighted:
@@ -1115,10 +1127,10 @@ of ``times``.
.. ipython:: python
- df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})
+ df = pd.DataFrame({"B": [0, 1, 2, np.nan, 4]})
df
- times = ['2020-01-01', '2020-01-03', '2020-01-10', '2020-01-15', '2020-01-17']
- df.ewm(halflife='4 days', times=pd.DatetimeIndex(times)).mean()
+ times = ["2020-01-01", "2020-01-03", "2020-01-10", "2020-01-15", "2020-01-17"]
+ df.ewm(halflife="4 days", times=pd.DatetimeIndex(times)).mean()
The following formula is used to compute exponentially weighted mean with an input vector of times:
@@ -1130,10 +1142,10 @@ Here is an example for a univariate time series:
.. ipython:: python
- s.plot(style='k--')
+ s.plot(style="k--")
@savefig ewma_ex.png
- s.ewm(span=20).mean().plot(style='k')
+ s.ewm(span=20).mean().plot(style="k")
ExponentialMovingWindow has a ``min_periods`` argument, which has the same
meaning it does for all the ``.expanding`` and ``.rolling`` methods:
diff --git a/doc/source/user_guide/dsintro.rst b/doc/source/user_guide/dsintro.rst
index c27c73d439a0c..d698b316d321e 100644
--- a/doc/source/user_guide/dsintro.rst
+++ b/doc/source/user_guide/dsintro.rst
@@ -51,7 +51,7 @@ index is passed, one will be created having values ``[0, ..., len(data) - 1]``.
.. ipython:: python
- s = pd.Series(np.random.randn(5), index=['a', 'b', 'c', 'd', 'e'])
+ s = pd.Series(np.random.randn(5), index=["a", "b", "c", "d", "e"])
s
s.index
@@ -71,7 +71,7 @@ Series can be instantiated from dicts:
.. ipython:: python
- d = {'b': 1, 'a': 0, 'c': 2}
+ d = {"b": 1, "a": 0, "c": 2}
pd.Series(d)
.. note::
@@ -92,9 +92,9 @@ index will be pulled out.
.. ipython:: python
- d = {'a': 0., 'b': 1., 'c': 2.}
+ d = {"a": 0.0, "b": 1.0, "c": 2.0}
pd.Series(d)
- pd.Series(d, index=['b', 'c', 'd', 'a'])
+ pd.Series(d, index=["b", "c", "d", "a"])
.. note::
@@ -107,7 +107,7 @@ provided. The value will be repeated to match the length of **index**.
.. ipython:: python
- pd.Series(5., index=['a', 'b', 'c', 'd', 'e'])
+ pd.Series(5.0, index=["a", "b", "c", "d", "e"])
Series is ndarray-like
~~~~~~~~~~~~~~~~~~~~~~
@@ -173,26 +173,26 @@ label:
.. ipython:: python
- s['a']
- s['e'] = 12.
+ s["a"]
+ s["e"] = 12.0
s
- 'e' in s
- 'f' in s
+ "e" in s
+ "f" in s
If a label is not contained, an exception is raised:
.. code-block:: python
- >>> s['f']
+ >>> s["f"]
KeyError: 'f'
Using the ``get`` method, a missing label will return None or specified default:
.. ipython:: python
- s.get('f')
+ s.get("f")
- s.get('f', np.nan)
+ s.get("f", np.nan)
See also the :ref:`section on attribute access<indexing.attribute_access>`.
@@ -244,7 +244,7 @@ Series can also have a ``name`` attribute:
.. ipython:: python
- s = pd.Series(np.random.randn(5), name='something')
+ s = pd.Series(np.random.randn(5), name="something")
s
s.name
@@ -306,13 +306,15 @@ keys.
.. ipython:: python
- d = {'one': pd.Series([1., 2., 3.], index=['a', 'b', 'c']),
- 'two': pd.Series([1., 2., 3., 4.], index=['a', 'b', 'c', 'd'])}
+ d = {
+ "one": pd.Series([1.0, 2.0, 3.0], index=["a", "b", "c"]),
+ "two": pd.Series([1.0, 2.0, 3.0, 4.0], index=["a", "b", "c", "d"]),
+ }
df = pd.DataFrame(d)
df
- pd.DataFrame(d, index=['d', 'b', 'a'])
- pd.DataFrame(d, index=['d', 'b', 'a'], columns=['two', 'three'])
+ pd.DataFrame(d, index=["d", "b", "a"])
+ pd.DataFrame(d, index=["d", "b", "a"], columns=["two", "three"])
The row and column labels can be accessed respectively by accessing the
**index** and **columns** attributes:
@@ -336,10 +338,9 @@ result will be ``range(n)``, where ``n`` is the array length.
.. ipython:: python
- d = {'one': [1., 2., 3., 4.],
- 'two': [4., 3., 2., 1.]}
+ d = {"one": [1.0, 2.0, 3.0, 4.0], "two": [4.0, 3.0, 2.0, 1.0]}
pd.DataFrame(d)
- pd.DataFrame(d, index=['a', 'b', 'c', 'd'])
+ pd.DataFrame(d, index=["a", "b", "c", "d"])
From structured or record array
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -348,12 +349,12 @@ This case is handled identically to a dict of arrays.
.. ipython:: python
- data = np.zeros((2, ), dtype=[('A', 'i4'), ('B', 'f4'), ('C', 'a10')])
- data[:] = [(1, 2., 'Hello'), (2, 3., "World")]
+ data = np.zeros((2,), dtype=[("A", "i4"), ("B", "f4"), ("C", "a10")])
+ data[:] = [(1, 2.0, "Hello"), (2, 3.0, "World")]
pd.DataFrame(data)
- pd.DataFrame(data, index=['first', 'second'])
- pd.DataFrame(data, columns=['C', 'A', 'B'])
+ pd.DataFrame(data, index=["first", "second"])
+ pd.DataFrame(data, columns=["C", "A", "B"])
.. note::
@@ -367,10 +368,10 @@ From a list of dicts
.. ipython:: python
- data2 = [{'a': 1, 'b': 2}, {'a': 5, 'b': 10, 'c': 20}]
+ data2 = [{"a": 1, "b": 2}, {"a": 5, "b": 10, "c": 20}]
pd.DataFrame(data2)
- pd.DataFrame(data2, index=['first', 'second'])
- pd.DataFrame(data2, columns=['a', 'b'])
+ pd.DataFrame(data2, index=["first", "second"])
+ pd.DataFrame(data2, columns=["a", "b"])
.. _basics.dataframe.from_dict_of_tuples:
@@ -382,11 +383,15 @@ dictionary.
.. ipython:: python
- pd.DataFrame({('a', 'b'): {('A', 'B'): 1, ('A', 'C'): 2},
- ('a', 'a'): {('A', 'C'): 3, ('A', 'B'): 4},
- ('a', 'c'): {('A', 'B'): 5, ('A', 'C'): 6},
- ('b', 'a'): {('A', 'C'): 7, ('A', 'B'): 8},
- ('b', 'b'): {('A', 'D'): 9, ('A', 'B'): 10}})
+ pd.DataFrame(
+ {
+ ("a", "b"): {("A", "B"): 1, ("A", "C"): 2},
+ ("a", "a"): {("A", "C"): 3, ("A", "B"): 4},
+ ("a", "c"): {("A", "B"): 5, ("A", "C"): 6},
+ ("b", "a"): {("A", "C"): 7, ("A", "B"): 8},
+ ("b", "b"): {("A", "D"): 9, ("A", "B"): 10},
+ }
+ )
.. _basics.dataframe.from_series:
@@ -414,11 +419,11 @@ first ``namedtuple``, a ``ValueError`` is raised.
from collections import namedtuple
- Point = namedtuple('Point', 'x y')
+ Point = namedtuple("Point", "x y")
pd.DataFrame([Point(0, 0), Point(0, 3), (2, 3)])
- Point3D = namedtuple('Point3D', 'x y z')
+ Point3D = namedtuple("Point3D", "x y z")
pd.DataFrame([Point3D(0, 0, 0), Point3D(0, 3, 5), Point(2, 3)])
@@ -468,15 +473,18 @@ set to ``'index'`` in order to use the dict keys as row labels.
.. ipython:: python
- pd.DataFrame.from_dict(dict([('A', [1, 2, 3]), ('B', [4, 5, 6])]))
+ pd.DataFrame.from_dict(dict([("A", [1, 2, 3]), ("B", [4, 5, 6])]))
If you pass ``orient='index'``, the keys will be the row labels. In this
case, you can also pass the desired column names:
.. ipython:: python
- pd.DataFrame.from_dict(dict([('A', [1, 2, 3]), ('B', [4, 5, 6])]),
- orient='index', columns=['one', 'two', 'three'])
+ pd.DataFrame.from_dict(
+ dict([("A", [1, 2, 3]), ("B", [4, 5, 6])]),
+ orient="index",
+ columns=["one", "two", "three"],
+ )
.. _basics.dataframe.from_records:
@@ -490,7 +498,7 @@ dtype. For example:
.. ipython:: python
data
- pd.DataFrame.from_records(data, index='C')
+ pd.DataFrame.from_records(data, index="C")
.. _basics.dataframe.sel_add_del:
@@ -503,17 +511,17 @@ the analogous dict operations:
.. ipython:: python
- df['one']
- df['three'] = df['one'] * df['two']
- df['flag'] = df['one'] > 2
+ df["one"]
+ df["three"] = df["one"] * df["two"]
+ df["flag"] = df["one"] > 2
df
Columns can be deleted or popped like with a dict:
.. ipython:: python
- del df['two']
- three = df.pop('three')
+ del df["two"]
+ three = df.pop("three")
df
When inserting a scalar value, it will naturally be propagated to fill the
@@ -521,7 +529,7 @@ column:
.. ipython:: python
- df['foo'] = 'bar'
+ df["foo"] = "bar"
df
When inserting a Series that does not have the same index as the DataFrame, it
@@ -529,7 +537,7 @@ will be conformed to the DataFrame's index:
.. ipython:: python
- df['one_trunc'] = df['one'][:2]
+ df["one_trunc"] = df["one"][:2]
df
You can insert raw ndarrays but their length must match the length of the
@@ -540,7 +548,7 @@ available to insert at a particular location in the columns:
.. ipython:: python
- df.insert(1, 'bar', df['one'])
+ df.insert(1, "bar", df["one"])
df
.. _dsintro.chained_assignment:
@@ -556,17 +564,16 @@ derived from existing columns.
.. ipython:: python
- iris = pd.read_csv('data/iris.data')
+ iris = pd.read_csv("data/iris.data")
iris.head()
- (iris.assign(sepal_ratio=iris['SepalWidth'] / iris['SepalLength'])
- .head())
+ iris.assign(sepal_ratio=iris["SepalWidth"] / iris["SepalLength"]).head()
In the example above, we inserted a precomputed value. We can also pass in
a function of one argument to be evaluated on the DataFrame being assigned to.
.. ipython:: python
- iris.assign(sepal_ratio=lambda x: (x['SepalWidth'] / x['SepalLength'])).head()
+ iris.assign(sepal_ratio=lambda x: (x["SepalWidth"] / x["SepalLength"])).head()
``assign`` **always** returns a copy of the data, leaving the original
DataFrame untouched.
@@ -580,10 +587,14 @@ greater than 5, calculate the ratio, and plot:
.. ipython:: python
@savefig basics_assign.png
- (iris.query('SepalLength > 5')
- .assign(SepalRatio=lambda x: x.SepalWidth / x.SepalLength,
- PetalRatio=lambda x: x.PetalWidth / x.PetalLength)
- .plot(kind='scatter', x='SepalRatio', y='PetalRatio'))
+ (
+ iris.query("SepalLength > 5")
+ .assign(
+ SepalRatio=lambda x: x.SepalWidth / x.SepalLength,
+ PetalRatio=lambda x: x.PetalWidth / x.PetalLength,
+ )
+ .plot(kind="scatter", x="SepalRatio", y="PetalRatio")
+ )
Since a function is passed in, the function is computed on the DataFrame
being assigned to. Importantly, this is the DataFrame that's been filtered
@@ -603,10 +614,8 @@ to a column created earlier in the same :meth:`~DataFrame.assign`.
.. ipython:: python
- dfa = pd.DataFrame({"A": [1, 2, 3],
- "B": [4, 5, 6]})
- dfa.assign(C=lambda x: x['A'] + x['B'],
- D=lambda x: x['A'] + x['C'])
+ dfa = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
+ dfa.assign(C=lambda x: x["A"] + x["B"], D=lambda x: x["A"] + x["C"])
In the second expression, ``x['C']`` will refer to the newly created column,
that's equal to ``dfa['A'] + dfa['B']``.
@@ -631,7 +640,7 @@ DataFrame:
.. ipython:: python
- df.loc['b']
+ df.loc["b"]
df.iloc[2]
For a more exhaustive treatment of sophisticated label-based indexing and
@@ -650,8 +659,8 @@ union of the column and row labels.
.. ipython:: python
- df = pd.DataFrame(np.random.randn(10, 4), columns=['A', 'B', 'C', 'D'])
- df2 = pd.DataFrame(np.random.randn(7, 3), columns=['A', 'B', 'C'])
+ df = pd.DataFrame(np.random.randn(10, 4), columns=["A", "B", "C", "D"])
+ df2 = pd.DataFrame(np.random.randn(7, 3), columns=["A", "B", "C"])
df + df2
When doing an operation between DataFrame and Series, the default behavior is
@@ -680,8 +689,8 @@ Boolean operators work as well:
.. ipython:: python
- df1 = pd.DataFrame({'a': [1, 0, 1], 'b': [0, 1, 1]}, dtype=bool)
- df2 = pd.DataFrame({'a': [0, 1, 1], 'b': [1, 1, 0]}, dtype=bool)
+ df1 = pd.DataFrame({"a": [1, 0, 1], "b": [0, 1, 1]}, dtype=bool)
+ df2 = pd.DataFrame({"a": [0, 1, 1], "b": [1, 1, 0]}, dtype=bool)
df1 & df2
df1 | df2
df1 ^ df2
@@ -737,8 +746,8 @@ on two :class:`Series` with differently ordered labels will align before the ope
.. ipython:: python
- ser1 = pd.Series([1, 2, 3], index=['a', 'b', 'c'])
- ser2 = pd.Series([1, 3, 5], index=['b', 'a', 'c'])
+ ser1 = pd.Series([1, 2, 3], index=["a", "b", "c"])
+ ser2 = pd.Series([1, 3, 5], index=["b", "a", "c"])
ser1
ser2
np.remainder(ser1, ser2)
@@ -748,7 +757,7 @@ with missing values.
.. ipython:: python
- ser3 = pd.Series([2, 4, 6], index=['b', 'c', 'd'])
+ ser3 = pd.Series([2, 4, 6], index=["b", "c", "d"])
ser3
np.remainder(ser1, ser3)
@@ -778,11 +787,11 @@ R package):
:suppress:
# force a summary to be printed
- pd.set_option('display.max_rows', 5)
+ pd.set_option("display.max_rows", 5)
.. ipython:: python
- baseball = pd.read_csv('data/baseball.csv')
+ baseball = pd.read_csv("data/baseball.csv")
print(baseball)
baseball.info()
@@ -791,7 +800,7 @@ R package):
:okwarning:
# restore GlobalPrintConfig
- pd.reset_option(r'^display\.')
+ pd.reset_option(r"^display\.")
However, using ``to_string`` will return a string representation of the
DataFrame in tabular form, though it won't always fit the console width:
@@ -812,7 +821,7 @@ option:
.. ipython:: python
- pd.set_option('display.width', 40) # default is 80
+ pd.set_option("display.width", 40) # default is 80
pd.DataFrame(np.random.randn(3, 12))
@@ -820,21 +829,25 @@ You can adjust the max width of the individual columns by setting ``display.max_
.. ipython:: python
- datafile = {'filename': ['filename_01', 'filename_02'],
- 'path': ["media/user_name/storage/folder_01/filename_01",
- "media/user_name/storage/folder_02/filename_02"]}
+ datafile = {
+ "filename": ["filename_01", "filename_02"],
+ "path": [
+ "media/user_name/storage/folder_01/filename_01",
+ "media/user_name/storage/folder_02/filename_02",
+ ],
+ }
- pd.set_option('display.max_colwidth', 30)
+ pd.set_option("display.max_colwidth", 30)
pd.DataFrame(datafile)
- pd.set_option('display.max_colwidth', 100)
+ pd.set_option("display.max_colwidth", 100)
pd.DataFrame(datafile)
.. ipython:: python
:suppress:
- pd.reset_option('display.width')
- pd.reset_option('display.max_colwidth')
+ pd.reset_option("display.width")
+ pd.reset_option("display.max_colwidth")
You can also disable this feature via the ``expand_frame_repr`` option.
This will print the table in one block.
@@ -847,8 +860,7 @@ accessed like an attribute:
.. ipython:: python
- df = pd.DataFrame({'foo1': np.random.randn(5),
- 'foo2': np.random.randn(5)})
+ df = pd.DataFrame({"foo1": np.random.randn(5), "foo2": np.random.randn(5)})
df
df.foo1
diff --git a/doc/source/user_guide/visualization.rst b/doc/source/user_guide/visualization.rst
index f41912445455d..46ab29a52747a 100644
--- a/doc/source/user_guide/visualization.rst
+++ b/doc/source/user_guide/visualization.rst
@@ -11,7 +11,8 @@ We use the standard convention for referencing the matplotlib API:
.. ipython:: python
import matplotlib.pyplot as plt
- plt.close('all')
+
+ plt.close("all")
We provide the basics in pandas to easily create decent looking plots.
See the :ref:`ecosystem <ecosystem.visualization>` section for visualization
@@ -39,8 +40,7 @@ The ``plot`` method on Series and DataFrame is just a simple wrapper around
.. ipython:: python
- ts = pd.Series(np.random.randn(1000),
- index=pd.date_range('1/1/2000', periods=1000))
+ ts = pd.Series(np.random.randn(1000), index=pd.date_range("1/1/2000", periods=1000))
ts = ts.cumsum()
@savefig series_plot_basic.png
@@ -54,18 +54,17 @@ On DataFrame, :meth:`~DataFrame.plot` is a convenience to plot all of the column
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
np.random.seed(123456)
.. ipython:: python
- df = pd.DataFrame(np.random.randn(1000, 4),
- index=ts.index, columns=list('ABCD'))
+ df = pd.DataFrame(np.random.randn(1000, 4), index=ts.index, columns=list("ABCD"))
df = df.cumsum()
plt.figure();
@savefig frame_plot_basic.png
- df.plot();
+ df.plot()
You can plot one column versus another using the ``x`` and ``y`` keywords in
:meth:`~DataFrame.plot`:
@@ -73,17 +72,17 @@ You can plot one column versus another using the ``x`` and ``y`` keywords in
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
plt.figure()
np.random.seed(123456)
.. ipython:: python
- df3 = pd.DataFrame(np.random.randn(1000, 2), columns=['B', 'C']).cumsum()
- df3['A'] = pd.Series(list(range(len(df))))
+ df3 = pd.DataFrame(np.random.randn(1000, 2), columns=["B", "C"]).cumsum()
+ df3["A"] = pd.Series(list(range(len(df))))
@savefig df_plot_xy.png
- df3.plot(x='A', y='B')
+ df3.plot(x="A", y="B")
.. note::
@@ -93,7 +92,7 @@ You can plot one column versus another using the ``x`` and ``y`` keywords in
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
.. _visualization.other:
@@ -120,7 +119,7 @@ For example, a bar plot can be created the following way:
plt.figure();
@savefig bar_plot_ex.png
- df.iloc[5].plot(kind='bar');
+ df.iloc[5].plot(kind="bar")
You can also create these other plots using the methods ``DataFrame.plot.<kind>`` instead of providing the ``kind`` keyword argument. This makes it easier to discover plot methods and the specific arguments they use:
@@ -164,7 +163,7 @@ For labeled, non-time series data, you may wish to produce a bar plot:
@savefig bar_plot_ex.png
df.iloc[5].plot.bar()
- plt.axhline(0, color='k');
+ plt.axhline(0, color="k")
Calling a DataFrame's :meth:`plot.bar() <DataFrame.plot.bar>` method produces a multiple
bar plot:
@@ -172,42 +171,42 @@ bar plot:
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
plt.figure()
np.random.seed(123456)
.. ipython:: python
- df2 = pd.DataFrame(np.random.rand(10, 4), columns=['a', 'b', 'c', 'd'])
+ df2 = pd.DataFrame(np.random.rand(10, 4), columns=["a", "b", "c", "d"])
@savefig bar_plot_multi_ex.png
- df2.plot.bar();
+ df2.plot.bar()
To produce a stacked bar plot, pass ``stacked=True``:
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
plt.figure()
.. ipython:: python
@savefig bar_plot_stacked_ex.png
- df2.plot.bar(stacked=True);
+ df2.plot.bar(stacked=True)
To get horizontal bar plots, use the ``barh`` method:
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
plt.figure()
.. ipython:: python
@savefig barh_plot_stacked_ex.png
- df2.plot.barh(stacked=True);
+ df2.plot.barh(stacked=True)
.. _visualization.hist:
@@ -218,8 +217,14 @@ Histograms can be drawn by using the :meth:`DataFrame.plot.hist` and :meth:`Seri
.. ipython:: python
- df4 = pd.DataFrame({'a': np.random.randn(1000) + 1, 'b': np.random.randn(1000),
- 'c': np.random.randn(1000) - 1}, columns=['a', 'b', 'c'])
+ df4 = pd.DataFrame(
+ {
+ "a": np.random.randn(1000) + 1,
+ "b": np.random.randn(1000),
+ "c": np.random.randn(1000) - 1,
+ },
+ columns=["a", "b", "c"],
+ )
plt.figure();
@@ -230,7 +235,7 @@ Histograms can be drawn by using the :meth:`DataFrame.plot.hist` and :meth:`Seri
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
A histogram can be stacked using ``stacked=True``. Bin size can be changed
using the ``bins`` keyword.
@@ -245,7 +250,7 @@ using the ``bins`` keyword.
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
You can pass other keywords supported by matplotlib ``hist``. For example,
horizontal and cumulative histograms can be drawn by
@@ -256,12 +261,12 @@ horizontal and cumulative histograms can be drawn by
plt.figure();
@savefig hist_new_kwargs.png
- df4['a'].plot.hist(orientation='horizontal', cumulative=True)
+ df4["a"].plot.hist(orientation="horizontal", cumulative=True)
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
See the :meth:`hist <matplotlib.axes.Axes.hist>` method and the
`matplotlib hist documentation <https://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.hist>`__ for more.
@@ -274,12 +279,12 @@ The existing interface ``DataFrame.hist`` to plot histogram still can be used.
plt.figure();
@savefig hist_plot_ex.png
- df['A'].diff().hist()
+ df["A"].diff().hist()
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
:meth:`DataFrame.hist` plots the histograms of the columns on multiple
subplots:
@@ -289,7 +294,7 @@ subplots:
plt.figure()
@savefig frame_hist_ex.png
- df.diff().hist(color='k', alpha=0.5, bins=50)
+ df.diff().hist(color="k", alpha=0.5, bins=50)
The ``by`` keyword can be specified to plot grouped histograms:
@@ -297,7 +302,7 @@ The ``by`` keyword can be specified to plot grouped histograms:
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
plt.figure()
np.random.seed(123456)
@@ -323,12 +328,12 @@ a uniform random variable on [0,1).
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
np.random.seed(123456)
.. ipython:: python
- df = pd.DataFrame(np.random.rand(10, 5), columns=['A', 'B', 'C', 'D', 'E'])
+ df = pd.DataFrame(np.random.rand(10, 5), columns=["A", "B", "C", "D", "E"])
@savefig box_plot_new.png
df.plot.box()
@@ -348,16 +353,20 @@ more complicated colorization, you can get each drawn artists by passing
.. ipython:: python
- color = {'boxes': 'DarkGreen', 'whiskers': 'DarkOrange',
- 'medians': 'DarkBlue', 'caps': 'Gray'}
+ color = {
+ "boxes": "DarkGreen",
+ "whiskers": "DarkOrange",
+ "medians": "DarkBlue",
+ "caps": "Gray",
+ }
@savefig box_new_colorize.png
- df.plot.box(color=color, sym='r+')
+ df.plot.box(color=color, sym="r+")
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
Also, you can pass other keywords supported by matplotlib ``boxplot``.
For example, horizontal and custom-positioned boxplot can be drawn by
@@ -378,7 +387,7 @@ The existing interface ``DataFrame.boxplot`` to plot boxplot still can be used.
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
np.random.seed(123456)
.. ipython:: python
@@ -396,19 +405,19 @@ groupings. For instance,
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
np.random.seed(123456)
.. ipython:: python
:okwarning:
- df = pd.DataFrame(np.random.rand(10, 2), columns=['Col1', 'Col2'])
- df['X'] = pd.Series(['A', 'A', 'A', 'A', 'A', 'B', 'B', 'B', 'B', 'B'])
+ df = pd.DataFrame(np.random.rand(10, 2), columns=["Col1", "Col2"])
+ df["X"] = pd.Series(["A", "A", "A", "A", "A", "B", "B", "B", "B", "B"])
- plt.figure();
+ plt.figure()
@savefig box_plot_ex2.png
- bp = df.boxplot(by='X')
+ bp = df.boxplot(by="X")
You can also pass a subset of columns to plot, as well as group by multiple
columns:
@@ -416,25 +425,25 @@ columns:
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
np.random.seed(123456)
.. ipython:: python
:okwarning:
- df = pd.DataFrame(np.random.rand(10, 3), columns=['Col1', 'Col2', 'Col3'])
- df['X'] = pd.Series(['A', 'A', 'A', 'A', 'A', 'B', 'B', 'B', 'B', 'B'])
- df['Y'] = pd.Series(['A', 'B', 'A', 'B', 'A', 'B', 'A', 'B', 'A', 'B'])
+ df = pd.DataFrame(np.random.rand(10, 3), columns=["Col1", "Col2", "Col3"])
+ df["X"] = pd.Series(["A", "A", "A", "A", "A", "B", "B", "B", "B", "B"])
+ df["Y"] = pd.Series(["A", "B", "A", "B", "A", "B", "A", "B", "A", "B"])
plt.figure();
@savefig box_plot_ex3.png
- bp = df.boxplot(column=['Col1', 'Col2'], by=['X', 'Y'])
+ bp = df.boxplot(column=["Col1", "Col2"], by=["X", "Y"])
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
.. _visualization.box.return:
@@ -462,16 +471,16 @@ keyword, will affect the output type as well:
np.random.seed(1234)
df_box = pd.DataFrame(np.random.randn(50, 2))
- df_box['g'] = np.random.choice(['A', 'B'], size=50)
- df_box.loc[df_box['g'] == 'B', 1] += 3
+ df_box["g"] = np.random.choice(["A", "B"], size=50)
+ df_box.loc[df_box["g"] == "B", 1] += 3
@savefig boxplot_groupby.png
- bp = df_box.boxplot(by='g')
+ bp = df_box.boxplot(by="g")
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
The subplots above are split by the numeric columns first, then the value of
the ``g`` column. Below the subplots are first split by the value of ``g``,
@@ -481,12 +490,12 @@ then by the numeric columns.
:okwarning:
@savefig groupby_boxplot_vis.png
- bp = df_box.groupby('g').boxplot()
+ bp = df_box.groupby("g").boxplot()
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
.. _visualization.area_plot:
@@ -506,23 +515,23 @@ When input data contains ``NaN``, it will be automatically filled by 0. If you w
.. ipython:: python
- df = pd.DataFrame(np.random.rand(10, 4), columns=['a', 'b', 'c', 'd'])
+ df = pd.DataFrame(np.random.rand(10, 4), columns=["a", "b", "c", "d"])
@savefig area_plot_stacked.png
- df.plot.area();
+ df.plot.area()
To produce an unstacked plot, pass ``stacked=False``. Alpha value is set to 0.5 unless otherwise specified:
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
plt.figure()
.. ipython:: python
@savefig area_plot_unstacked.png
- df.plot.area(stacked=False);
+ df.plot.area(stacked=False)
.. _visualization.scatter:
@@ -537,29 +546,29 @@ These can be specified by the ``x`` and ``y`` keywords.
:suppress:
np.random.seed(123456)
- plt.close('all')
+ plt.close("all")
plt.figure()
.. ipython:: python
- df = pd.DataFrame(np.random.rand(50, 4), columns=['a', 'b', 'c', 'd'])
+ df = pd.DataFrame(np.random.rand(50, 4), columns=["a", "b", "c", "d"])
@savefig scatter_plot.png
- df.plot.scatter(x='a', y='b');
+ df.plot.scatter(x="a", y="b")
To plot multiple column groups in a single axes, repeat ``plot`` method specifying target ``ax``.
It is recommended to specify ``color`` and ``label`` keywords to distinguish each groups.
.. ipython:: python
- ax = df.plot.scatter(x='a', y='b', color='DarkBlue', label='Group 1');
+ ax = df.plot.scatter(x="a", y="b", color="DarkBlue", label="Group 1")
@savefig scatter_plot_repeated.png
- df.plot.scatter(x='c', y='d', color='DarkGreen', label='Group 2', ax=ax);
+ df.plot.scatter(x="c", y="d", color="DarkGreen", label="Group 2", ax=ax)
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
The keyword ``c`` may be given as the name of a column to provide colors for
each point:
@@ -567,13 +576,13 @@ each point:
.. ipython:: python
@savefig scatter_plot_colored.png
- df.plot.scatter(x='a', y='b', c='c', s=50);
+ df.plot.scatter(x="a", y="b", c="c", s=50)
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
You can pass other keywords supported by matplotlib
:meth:`scatter <matplotlib.axes.Axes.scatter>`. The example below shows a
@@ -582,12 +591,12 @@ bubble chart using a column of the ``DataFrame`` as the bubble size.
.. ipython:: python
@savefig scatter_plot_bubble.png
- df.plot.scatter(x='a', y='b', s=df['c'] * 200);
+ df.plot.scatter(x="a", y="b", s=df["c"] * 200)
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
See the :meth:`scatter <matplotlib.axes.Axes.scatter>` method and the
`matplotlib scatter documentation <https://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.scatter>`__ for more.
@@ -609,11 +618,11 @@ too dense to plot each point individually.
.. ipython:: python
- df = pd.DataFrame(np.random.randn(1000, 2), columns=['a', 'b'])
- df['b'] = df['b'] + np.arange(1000)
+ df = pd.DataFrame(np.random.randn(1000, 2), columns=["a", "b"])
+ df["b"] = df["b"] + np.arange(1000)
@savefig hexbin_plot.png
- df.plot.hexbin(x='a', y='b', gridsize=25)
+ df.plot.hexbin(x="a", y="b", gridsize=25)
A useful keyword argument is ``gridsize``; it controls the number of hexagons
@@ -631,23 +640,23 @@ given by column ``z``. The bins are aggregated with NumPy's ``max`` function.
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
plt.figure()
np.random.seed(123456)
.. ipython:: python
- df = pd.DataFrame(np.random.randn(1000, 2), columns=['a', 'b'])
- df['b'] = df['b'] = df['b'] + np.arange(1000)
- df['z'] = np.random.uniform(0, 3, 1000)
+ df = pd.DataFrame(np.random.randn(1000, 2), columns=["a", "b"])
+ df["b"] = df["b"] = df["b"] + np.arange(1000)
+ df["z"] = np.random.uniform(0, 3, 1000)
@savefig hexbin_plot_agg.png
- df.plot.hexbin(x='a', y='b', C='z', reduce_C_function=np.max, gridsize=25)
+ df.plot.hexbin(x="a", y="b", C="z", reduce_C_function=np.max, gridsize=25)
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
See the :meth:`hexbin <matplotlib.axes.Axes.hexbin>` method and the
`matplotlib hexbin documentation <https://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.hexbin>`__ for more.
@@ -670,8 +679,7 @@ A ``ValueError`` will be raised if there are any negative values in your data.
.. ipython:: python
:okwarning:
- series = pd.Series(3 * np.random.rand(4),
- index=['a', 'b', 'c', 'd'], name='series')
+ series = pd.Series(3 * np.random.rand(4), index=["a", "b", "c", "d"], name="series")
@savefig series_pie_plot.png
series.plot.pie(figsize=(6, 6))
@@ -679,7 +687,7 @@ A ``ValueError`` will be raised if there are any negative values in your data.
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
For pie plots it's best to use square figures, i.e. a figure aspect ratio 1.
You can create the figure with equal width and height, or force the aspect ratio
@@ -700,8 +708,9 @@ drawn in each pie plots by default; specify ``legend=False`` to hide it.
.. ipython:: python
- df = pd.DataFrame(3 * np.random.rand(4, 2),
- index=['a', 'b', 'c', 'd'], columns=['x', 'y'])
+ df = pd.DataFrame(
+ 3 * np.random.rand(4, 2), index=["a", "b", "c", "d"], columns=["x", "y"]
+ )
@savefig df_pie_plot.png
df.plot.pie(subplots=True, figsize=(8, 4))
@@ -709,7 +718,7 @@ drawn in each pie plots by default; specify ``legend=False`` to hide it.
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
You can use the ``labels`` and ``colors`` keywords to specify the labels and colors of each wedge.
@@ -731,21 +740,26 @@ Also, other keywords supported by :func:`matplotlib.pyplot.pie` can be used.
.. ipython:: python
@savefig series_pie_plot_options.png
- series.plot.pie(labels=['AA', 'BB', 'CC', 'DD'], colors=['r', 'g', 'b', 'c'],
- autopct='%.2f', fontsize=20, figsize=(6, 6))
+ series.plot.pie(
+ labels=["AA", "BB", "CC", "DD"],
+ colors=["r", "g", "b", "c"],
+ autopct="%.2f",
+ fontsize=20,
+ figsize=(6, 6),
+ )
If you pass values whose sum total is less than 1.0, matplotlib draws a semicircle.
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
plt.figure()
.. ipython:: python
:okwarning:
- series = pd.Series([0.1] * 4, index=['a', 'b', 'c', 'd'], name='series2')
+ series = pd.Series([0.1] * 4, index=["a", "b", "c", "d"], name="series2")
@savefig series_pie_plot_semi.png
series.plot.pie(figsize=(6, 6))
@@ -755,7 +769,7 @@ See the `matplotlib pie documentation <https://matplotlib.org/api/pyplot_api.htm
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
.. _visualization.missing_data:
@@ -819,15 +833,16 @@ You can create a scatter plot matrix using the
.. ipython:: python
from pandas.plotting import scatter_matrix
- df = pd.DataFrame(np.random.randn(1000, 4), columns=['a', 'b', 'c', 'd'])
+
+ df = pd.DataFrame(np.random.randn(1000, 4), columns=["a", "b", "c", "d"])
@savefig scatter_matrix_kde.png
- scatter_matrix(df, alpha=0.2, figsize=(6, 6), diagonal='kde');
+ scatter_matrix(df, alpha=0.2, figsize=(6, 6), diagonal="kde")
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
.. _visualization.kde:
@@ -852,7 +867,7 @@ You can create density plots using the :meth:`Series.plot.kde` and :meth:`DataFr
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
.. _visualization.andrews_curves:
@@ -872,12 +887,12 @@ of the same class will usually be closer together and form larger structures.
from pandas.plotting import andrews_curves
- data = pd.read_csv('data/iris.data')
+ data = pd.read_csv("data/iris.data")
plt.figure()
@savefig andrews_curves.png
- andrews_curves(data, 'Name')
+ andrews_curves(data, "Name")
.. _visualization.parallel_coordinates:
@@ -896,17 +911,17 @@ represents one data point. Points that tend to cluster will appear closer togeth
from pandas.plotting import parallel_coordinates
- data = pd.read_csv('data/iris.data')
+ data = pd.read_csv("data/iris.data")
plt.figure()
@savefig parallel_coordinates.png
- parallel_coordinates(data, 'Name')
+ parallel_coordinates(data, "Name")
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
.. _visualization.lag:
@@ -939,7 +954,7 @@ be passed, and when ``lag=1`` the plot is essentially ``data[:-1]`` vs.
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
.. _visualization.autocorrelation:
@@ -976,7 +991,7 @@ autocorrelation plots.
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
.. _visualization.bootstrap:
@@ -1001,12 +1016,12 @@ are what constitutes the bootstrap plot.
data = pd.Series(np.random.rand(1000))
@savefig bootstrap_plot.png
- bootstrap_plot(data, size=50, samples=500, color='grey')
+ bootstrap_plot(data, size=50, samples=500, color="grey")
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
.. _visualization.radviz:
@@ -1032,17 +1047,17 @@ for more information.
from pandas.plotting import radviz
- data = pd.read_csv('data/iris.data')
+ data = pd.read_csv("data/iris.data")
plt.figure()
@savefig radviz.png
- radviz(data, 'Name')
+ radviz(data, "Name")
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
.. _visualization.formatting:
@@ -1071,12 +1086,12 @@ layout and formatting of the returned plot:
plt.figure();
@savefig series_plot_basic2.png
- ts.plot(style='k--', label='Series');
+ ts.plot(style="k--", label="Series")
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
For each kind of plot (e.g. ``line``, ``bar``, ``scatter``) any additional arguments
keywords are passed along to the corresponding matplotlib function
@@ -1098,8 +1113,7 @@ shown by default.
.. ipython:: python
- df = pd.DataFrame(np.random.randn(1000, 4),
- index=ts.index, columns=list('ABCD'))
+ df = pd.DataFrame(np.random.randn(1000, 4), index=ts.index, columns=list("ABCD"))
df = df.cumsum()
@savefig frame_plot_basic_noleg.png
@@ -1108,7 +1122,7 @@ shown by default.
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
Controlling the labels
@@ -1135,7 +1149,7 @@ it empty for ylabel.
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
Scales
@@ -1151,8 +1165,7 @@ You may pass ``logy`` to get a log-scale Y axis.
.. ipython:: python
- ts = pd.Series(np.random.randn(1000),
- index=pd.date_range('1/1/2000', periods=1000))
+ ts = pd.Series(np.random.randn(1000), index=pd.date_range("1/1/2000", periods=1000))
ts = np.exp(ts.cumsum())
@savefig series_plot_logy.png
@@ -1161,7 +1174,7 @@ You may pass ``logy`` to get a log-scale Y axis.
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
See also the ``logx`` and ``loglog`` keyword arguments.
@@ -1177,15 +1190,15 @@ To plot data on a secondary y-axis, use the ``secondary_y`` keyword:
.. ipython:: python
- df['A'].plot()
+ df["A"].plot()
@savefig series_plot_secondary_y.png
- df['B'].plot(secondary_y=True, style='g')
+ df["B"].plot(secondary_y=True, style="g")
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
To plot some columns in a ``DataFrame``, give the column names to the ``secondary_y``
keyword:
@@ -1193,15 +1206,15 @@ keyword:
.. ipython:: python
plt.figure()
- ax = df.plot(secondary_y=['A', 'B'])
- ax.set_ylabel('CD scale')
+ ax = df.plot(secondary_y=["A", "B"])
+ ax.set_ylabel("CD scale")
@savefig frame_plot_secondary_y.png
- ax.right_ax.set_ylabel('AB scale')
+ ax.right_ax.set_ylabel("AB scale")
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
Note that the columns plotted on the secondary y-axis is automatically marked
with "(right)" in the legend. To turn off the automatic marking, use the
@@ -1212,12 +1225,12 @@ with "(right)" in the legend. To turn off the automatic marking, use the
plt.figure()
@savefig frame_plot_secondary_y_no_right.png
- df.plot(secondary_y=['A', 'B'], mark_right=False)
+ df.plot(secondary_y=["A", "B"], mark_right=False)
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
.. _plotting.formatters:
@@ -1249,12 +1262,12 @@ Here is the default behavior, notice how the x-axis tick labeling is performed:
plt.figure()
@savefig ser_plot_suppress.png
- df['A'].plot()
+ df["A"].plot()
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
Using the ``x_compat`` parameter, you can suppress this behavior:
@@ -1263,12 +1276,12 @@ Using the ``x_compat`` parameter, you can suppress this behavior:
plt.figure()
@savefig ser_plot_suppress_parm.png
- df['A'].plot(x_compat=True)
+ df["A"].plot(x_compat=True)
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
If you have more than one plot that needs to be suppressed, the ``use`` method
in ``pandas.plotting.plot_params`` can be used in a ``with`` statement:
@@ -1278,15 +1291,15 @@ in ``pandas.plotting.plot_params`` can be used in a ``with`` statement:
plt.figure()
@savefig ser_plot_suppress_context.png
- with pd.plotting.plot_params.use('x_compat', True):
- df['A'].plot(color='r')
- df['B'].plot(color='g')
- df['C'].plot(color='b')
+ with pd.plotting.plot_params.use("x_compat", True):
+ df["A"].plot(color="r")
+ df["B"].plot(color="g")
+ df["C"].plot(color="b")
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
Automatic date tick adjustment
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -1307,12 +1320,12 @@ with the ``subplots`` keyword:
.. ipython:: python
@savefig frame_plot_subplots.png
- df.plot(subplots=True, figsize=(6, 6));
+ df.plot(subplots=True, figsize=(6, 6))
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
Using layout and targeting multiple axes
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -1330,23 +1343,23 @@ or columns needed, given the other.
.. ipython:: python
@savefig frame_plot_subplots_layout.png
- df.plot(subplots=True, layout=(2, 3), figsize=(6, 6), sharex=False);
+ df.plot(subplots=True, layout=(2, 3), figsize=(6, 6), sharex=False)
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
The above example is identical to using:
.. ipython:: python
- df.plot(subplots=True, layout=(2, -1), figsize=(6, 6), sharex=False);
+ df.plot(subplots=True, layout=(2, -1), figsize=(6, 6), sharex=False)
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
The required number of columns (3) is inferred from the number of series to plot
and the given number of rows (2).
@@ -1366,15 +1379,14 @@ otherwise you will see a warning.
target1 = [axes[0][0], axes[1][1], axes[2][2], axes[3][3]]
target2 = [axes[3][0], axes[2][1], axes[1][2], axes[0][3]]
- df.plot(subplots=True, ax=target1, legend=False, sharex=False, sharey=False);
+ df.plot(subplots=True, ax=target1, legend=False, sharex=False, sharey=False)
@savefig frame_plot_subplots_multi_ax.png
- (-df).plot(subplots=True, ax=target2, legend=False,
- sharex=False, sharey=False);
+ (-df).plot(subplots=True, ax=target2, legend=False, sharex=False, sharey=False)
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
Another option is passing an ``ax`` argument to :meth:`Series.plot` to plot on a particular axis:
@@ -1382,37 +1394,35 @@ Another option is passing an ``ax`` argument to :meth:`Series.plot` to plot on a
:suppress:
np.random.seed(123456)
- ts = pd.Series(np.random.randn(1000),
- index=pd.date_range('1/1/2000', periods=1000))
+ ts = pd.Series(np.random.randn(1000), index=pd.date_range("1/1/2000", periods=1000))
ts = ts.cumsum()
- df = pd.DataFrame(np.random.randn(1000, 4), index=ts.index,
- columns=list('ABCD'))
+ df = pd.DataFrame(np.random.randn(1000, 4), index=ts.index, columns=list("ABCD"))
df = df.cumsum()
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
.. ipython:: python
fig, axes = plt.subplots(nrows=2, ncols=2)
plt.subplots_adjust(wspace=0.2, hspace=0.5)
- df['A'].plot(ax=axes[0, 0]);
- axes[0, 0].set_title('A');
- df['B'].plot(ax=axes[0, 1]);
- axes[0, 1].set_title('B');
- df['C'].plot(ax=axes[1, 0]);
- axes[1, 0].set_title('C');
- df['D'].plot(ax=axes[1, 1]);
+ df["A"].plot(ax=axes[0, 0])
+ axes[0, 0].set_title("A")
+ df["B"].plot(ax=axes[0, 1])
+ axes[0, 1].set_title("B")
+ df["C"].plot(ax=axes[1, 0])
+ axes[1, 0].set_title("C")
+ df["D"].plot(ax=axes[1, 1])
@savefig series_plot_multi.png
- axes[1, 1].set_title('D');
+ axes[1, 1].set_title("D")
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
.. _visualization.errorbars:
@@ -1434,17 +1444,21 @@ Here is an example of one way to easily plot group means with standard deviation
.. ipython:: python
# Generate the data
- ix3 = pd.MultiIndex.from_arrays([
- ['a', 'a', 'a', 'a', 'b', 'b', 'b', 'b'],
- ['foo', 'foo', 'bar', 'bar', 'foo', 'foo', 'bar', 'bar']],
- names=['letter', 'word'])
-
- df3 = pd.DataFrame({'data1': [3, 2, 4, 3, 2, 4, 3, 2],
- 'data2': [6, 5, 7, 5, 4, 5, 6, 5]}, index=ix3)
+ ix3 = pd.MultiIndex.from_arrays(
+ [
+ ["a", "a", "a", "a", "b", "b", "b", "b"],
+ ["foo", "foo", "bar", "bar", "foo", "foo", "bar", "bar"],
+ ],
+ names=["letter", "word"],
+ )
+
+ df3 = pd.DataFrame(
+ {"data1": [3, 2, 4, 3, 2, 4, 3, 2], "data2": [6, 5, 7, 5, 4, 5, 6, 5]}, index=ix3
+ )
# Group by index labels and take the means and standard deviations
# for each group
- gp3 = df3.groupby(level=('letter', 'word'))
+ gp3 = df3.groupby(level=("letter", "word"))
means = gp3.mean()
errors = gp3.std()
means
@@ -1458,7 +1472,7 @@ Here is an example of one way to easily plot group means with standard deviation
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
.. _visualization.table:
@@ -1475,7 +1489,7 @@ Plotting with matplotlib table is now supported in :meth:`DataFrame.plot` and :
.. ipython:: python
fig, ax = plt.subplots(1, 1, figsize=(7, 6.5))
- df = pd.DataFrame(np.random.rand(5, 3), columns=['a', 'b', 'c'])
+ df = pd.DataFrame(np.random.rand(5, 3), columns=["a", "b", "c"])
ax.xaxis.tick_top() # Display x-axis ticks on top.
@savefig line_plot_table_true.png
@@ -1484,7 +1498,7 @@ Plotting with matplotlib table is now supported in :meth:`DataFrame.plot` and :
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
Also, you can pass a different :class:`DataFrame` or :class:`Series` to the
``table`` keyword. The data will be drawn as displayed in print method
@@ -1502,7 +1516,7 @@ as seen in the example below.
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
There also exists a helper function ``pandas.plotting.table``, which creates a
table from :class:`DataFrame` or :class:`Series`, and adds it to an
@@ -1512,10 +1526,10 @@ matplotlib `table <https://matplotlib.org/api/axes_api.html#matplotlib.axes.Axes
.. ipython:: python
from pandas.plotting import table
+
fig, ax = plt.subplots(1, 1)
- table(ax, np.round(df.describe(), 2),
- loc='upper right', colWidths=[0.2, 0.2, 0.2])
+ table(ax, np.round(df.describe(), 2), loc="upper right", colWidths=[0.2, 0.2, 0.2])
@savefig line_plot_table_describe.png
df.plot(ax=ax, ylim=(0, 2), legend=None)
@@ -1523,7 +1537,7 @@ matplotlib `table <https://matplotlib.org/api/axes_api.html#matplotlib.axes.Axes
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
**Note**: You can get table instances on the axes using ``axes.tables`` property for further decorations. See the `matplotlib table documentation <https://matplotlib.org/api/axes_api.html#matplotlib.axes.Axes.table>`__ for more.
@@ -1560,12 +1574,12 @@ To use the cubehelix colormap, we can pass ``colormap='cubehelix'``.
plt.figure()
@savefig cubehelix.png
- df.plot(colormap='cubehelix')
+ df.plot(colormap="cubehelix")
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
Alternatively, we can pass the colormap itself:
@@ -1581,7 +1595,7 @@ Alternatively, we can pass the colormap itself:
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
Colormaps can also be used other plot types, like bar charts:
@@ -1598,12 +1612,12 @@ Colormaps can also be used other plot types, like bar charts:
plt.figure()
@savefig greens.png
- dd.plot.bar(colormap='Greens')
+ dd.plot.bar(colormap="Greens")
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
Parallel coordinates charts:
@@ -1612,12 +1626,12 @@ Parallel coordinates charts:
plt.figure()
@savefig parallel_gist_rainbow.png
- parallel_coordinates(data, 'Name', colormap='gist_rainbow')
+ parallel_coordinates(data, "Name", colormap="gist_rainbow")
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
Andrews curves charts:
@@ -1626,12 +1640,12 @@ Andrews curves charts:
plt.figure()
@savefig andrews_curve_winter.png
- andrews_curves(data, 'Name', colormap='winter')
+ andrews_curves(data, "Name", colormap="winter")
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
Plotting directly with matplotlib
---------------------------------
@@ -1655,23 +1669,24 @@ when plotting a large number of points.
.. ipython:: python
- price = pd.Series(np.random.randn(150).cumsum(),
- index=pd.date_range('2000-1-1', periods=150, freq='B'))
+ price = pd.Series(
+ np.random.randn(150).cumsum(),
+ index=pd.date_range("2000-1-1", periods=150, freq="B"),
+ )
ma = price.rolling(20).mean()
mstd = price.rolling(20).std()
plt.figure()
- plt.plot(price.index, price, 'k')
- plt.plot(ma.index, ma, 'b')
+ plt.plot(price.index, price, "k")
+ plt.plot(ma.index, ma, "b")
@savefig bollinger.png
- plt.fill_between(mstd.index, ma - 2 * mstd, ma + 2 * mstd,
- color='b', alpha=0.2)
+ plt.fill_between(mstd.index, ma - 2 * mstd, ma + 2 * mstd, color="b", alpha=0.2)
.. ipython:: python
:suppress:
- plt.close('all')
+ plt.close("all")
Plotting backends
-----------------
@@ -1685,21 +1700,21 @@ function. For example:
.. code-block:: python
- >>> Series([1, 2, 3]).plot(backend='backend.module')
+ >>> Series([1, 2, 3]).plot(backend="backend.module")
Alternatively, you can also set this option globally, do you don't need to specify
the keyword in each ``plot`` call. For example:
.. code-block:: python
- >>> pd.set_option('plotting.backend', 'backend.module')
+ >>> pd.set_option("plotting.backend", "backend.module")
>>> pd.Series([1, 2, 3]).plot()
Or:
.. code-block:: python
- >>> pd.options.plotting.backend = 'backend.module'
+ >>> pd.options.plotting.backend = "backend.module"
>>> pd.Series([1, 2, 3]).plot()
This would be more or less equivalent to:
| Addresses part of #36777
Ran blacken-tools and checked for warnings from flake8-rst for 4 additional doc files. | https://api.github.com/repos/pandas-dev/pandas/pulls/36821 | 2020-10-02T23:39:23Z | 2020-10-03T14:25:29Z | 2020-10-03T14:25:29Z | 2020-10-03T15:00:04Z |
Issue36124 display of int enums | diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx
index 0b0334d52c1e9..599667a093290 100644
--- a/pandas/_libs/lib.pyx
+++ b/pandas/_libs/lib.pyx
@@ -2115,7 +2115,8 @@ def maybe_convert_numeric(ndarray[object] values, set na_values,
def maybe_convert_objects(ndarray[object] objects, bint try_float=False,
bint safe=False, bint convert_datetime=False,
bint convert_timedelta=False,
- bint convert_to_nullable_integer=False):
+ bint convert_to_nullable_integer=False,
+ bint convert_intenum=False):
"""
Type inference function-- convert object array to proper dtype
@@ -2217,6 +2218,9 @@ def maybe_convert_objects(ndarray[object] objects, bint try_float=False,
seen.object_ = True
break
elif util.is_integer_object(val):
+ if getattr(val, 'name', None) is not None and not convert_intenum:
+ seen.object_ = True
+ break
seen.int_ = True
floats[i] = <float64_t>val
complexes[i] = <double complex>val
diff --git a/pandas/_libs/tslibs/util.pxd b/pandas/_libs/tslibs/util.pxd
index 16d801f69df05..7cc51fea62afd 100644
--- a/pandas/_libs/tslibs/util.pxd
+++ b/pandas/_libs/tslibs/util.pxd
@@ -69,7 +69,7 @@ cdef inline bint is_integer_object(object obj) nogil:
Notes
-----
- This counts np.timedelta64 objects as integers.
+ This counts np.timedelta64 and IntEnums objects as integers.
"""
return (not PyBool_Check(obj) and PyArray_IsIntegerScalar(obj)
and not is_timedelta64_object(obj))
diff --git a/pandas/tests/dtypes/test_inference.py b/pandas/tests/dtypes/test_inference.py
index 438a22c99a4eb..ef5ef8d7a7d1c 100644
--- a/pandas/tests/dtypes/test_inference.py
+++ b/pandas/tests/dtypes/test_inference.py
@@ -7,6 +7,7 @@
from collections import namedtuple
from datetime import date, datetime, time, timedelta
from decimal import Decimal
+from enum import IntEnum
from fractions import Fraction
from io import StringIO
from numbers import Number
@@ -589,6 +590,24 @@ def test_maybe_convert_objects_bool_nan(self):
out = lib.maybe_convert_objects(ind.values, safe=1)
tm.assert_numpy_array_equal(out, exp)
+ def test_maybe_convert_objects_intenum(self):
+ class Colors(IntEnum):
+ red = 1
+ blue = 2
+
+ ind = Index([Colors.red, Colors.blue], dtype=object)
+ expected = np.array([Colors.red, Colors.blue], dtype=object)
+ result = lib.maybe_convert_objects(ind.values)
+
+ # by default, we should not convert IntEnums to ints
+ tm.assert_numpy_array_equal(result, expected)
+
+ expected = np.array([1, 2], dtype=np.int64)
+ result = lib.maybe_convert_objects(ind.values, convert_intenum=True)
+
+ # still coverts to int if convert_intenum set to True
+ tm.assert_numpy_array_equal(result, expected)
+
def test_mixed_dtypes_remain_object_array(self):
# GH14956
array = np.array([datetime(2015, 1, 1, tzinfo=pytz.utc), 1], dtype=object)
| - [x] closes #36124
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/36820 | 2020-10-02T23:33:30Z | 2021-04-11T00:51:02Z | null | 2022-08-31T14:30:50Z |
Backport PR #36675 on branch 1.1.x (REGR: Series.loc with a MultiIndex containing Timestamp raises InvalidIndexError) | diff --git a/doc/source/whatsnew/v1.1.3.rst b/doc/source/whatsnew/v1.1.3.rst
index 15777abcb8084..acf1dafc59885 100644
--- a/doc/source/whatsnew/v1.1.3.rst
+++ b/doc/source/whatsnew/v1.1.3.rst
@@ -37,6 +37,7 @@ Fixed regressions
- Fixed regression in modulo of :class:`Index`, :class:`Series` and :class:`DataFrame` using ``numexpr`` using C not Python semantics (:issue:`36047`, :issue:`36526`)
- Fixed regression in :meth:`read_excel` with ``engine="odf"`` caused ``UnboundLocalError`` in some cases where cells had nested child nodes (:issue:`36122`, :issue:`35802`)
- Fixed regression in :meth:`DataFrame.replace` inconsistent replace when using a float in the replace method (:issue:`35376`)
+- Fixed regression in :meth:`Series.loc` on a :class:`Series` with a :class:`MultiIndex` containing :class:`Timestamp` raising ``InvalidIndexError`` (:issue:`35858`)
- Fixed regression in :class:`DataFrame` and :class:`Series` comparisons between numeric arrays and strings (:issue:`35700`, :issue:`36377`)
- Fixed regression in :meth:`DataFrame.apply` with ``raw=True`` and user-function returning string (:issue:`35940`)
- Fixed regression when setting empty :class:`DataFrame` column to a :class:`Series` in preserving name of index in frame (:issue:`36527`)
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 04d1dbceb3342..5a24addf46d93 100644
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -1064,7 +1064,7 @@ def _handle_lowerdim_multi_index_axis0(self, tup: Tuple):
try:
# fast path for series or for tup devoid of slices
return self._get_label(tup, axis=axis)
- except TypeError:
+ except (TypeError, InvalidIndexError):
# slices are unhashable
pass
except KeyError as ek:
diff --git a/pandas/tests/indexing/multiindex/test_loc.py b/pandas/tests/indexing/multiindex/test_loc.py
index 63983f45d7832..95a23a9bcf63b 100644
--- a/pandas/tests/indexing/multiindex/test_loc.py
+++ b/pandas/tests/indexing/multiindex/test_loc.py
@@ -493,6 +493,16 @@ def test_loc_datetime_mask_slicing():
tm.assert_series_equal(result, expected)
+def test_loc_datetime_series_tuple_slicing():
+ # https://github.com/pandas-dev/pandas/issues/35858
+ date = pd.Timestamp("2000")
+ ser = pd.Series(
+ 1, index=pd.MultiIndex.from_tuples([("a", date)], names=["a", "b"]), name="c",
+ )
+ result = ser.loc[:, [date]]
+ tm.assert_series_equal(result, ser)
+
+
def test_loc_with_mi_indexer():
# https://github.com/pandas-dev/pandas/issues/35351
df = DataFrame(
| Backport PR #36675: REGR: Series.loc with a MultiIndex containing Timestamp raises InvalidIndexError | https://api.github.com/repos/pandas-dev/pandas/pulls/36818 | 2020-10-02T23:02:59Z | 2020-10-03T11:50:23Z | 2020-10-03T11:50:23Z | 2020-10-03T11:50:24Z |
DOC: update code style for remaining intro tutorial docs for #36777 | diff --git a/doc/source/getting_started/intro_tutorials/01_table_oriented.rst b/doc/source/getting_started/intro_tutorials/01_table_oriented.rst
index dc9bec2284aab..e8e0fef271a74 100644
--- a/doc/source/getting_started/intro_tutorials/01_table_oriented.rst
+++ b/doc/source/getting_started/intro_tutorials/01_table_oriented.rst
@@ -41,12 +41,16 @@ I want to store passenger data of the Titanic. For a number of passengers, I kno
.. ipython:: python
- df = pd.DataFrame({
- "Name": ["Braund, Mr. Owen Harris",
- "Allen, Mr. William Henry",
- "Bonnell, Miss. Elizabeth"],
- "Age": [22, 35, 58],
- "Sex": ["male", "male", "female"]}
+ df = pd.DataFrame(
+ {
+ "Name": [
+ "Braund, Mr. Owen Harris",
+ "Allen, Mr. William Henry",
+ "Bonnell, Miss. Elizabeth",
+ ],
+ "Age": [22, 35, 58],
+ "Sex": ["male", "male", "female"],
+ }
)
df
diff --git a/doc/source/getting_started/intro_tutorials/02_read_write.rst b/doc/source/getting_started/intro_tutorials/02_read_write.rst
index c6c6bfefc4303..c9b6a12904311 100644
--- a/doc/source/getting_started/intro_tutorials/02_read_write.rst
+++ b/doc/source/getting_started/intro_tutorials/02_read_write.rst
@@ -138,7 +138,7 @@ My colleague requested the Titanic data as a spreadsheet.
.. ipython:: python
- titanic.to_excel('titanic.xlsx', sheet_name='passengers', index=False)
+ titanic.to_excel("titanic.xlsx", sheet_name="passengers", index=False)
Whereas ``read_*`` functions are used to read data to pandas, the
``to_*`` methods are used to store data. The :meth:`~DataFrame.to_excel` method stores
@@ -156,7 +156,7 @@ The equivalent read function :meth:`~DataFrame.read_excel` will reload the data
.. ipython:: python
- titanic = pd.read_excel('titanic.xlsx', sheet_name='passengers')
+ titanic = pd.read_excel("titanic.xlsx", sheet_name="passengers")
.. ipython:: python
@@ -166,7 +166,8 @@ The equivalent read function :meth:`~DataFrame.read_excel` will reload the data
:suppress:
import os
- os.remove('titanic.xlsx')
+
+ os.remove("titanic.xlsx")
.. raw:: html
diff --git a/doc/source/getting_started/intro_tutorials/04_plotting.rst b/doc/source/getting_started/intro_tutorials/04_plotting.rst
index f3d99ee56359a..ae33a6e1fcd9e 100644
--- a/doc/source/getting_started/intro_tutorials/04_plotting.rst
+++ b/doc/source/getting_started/intro_tutorials/04_plotting.rst
@@ -40,8 +40,7 @@ in respectively Paris, Antwerp and London.
.. ipython:: python
- air_quality = pd.read_csv("data/air_quality_no2.csv",
- index_col=0, parse_dates=True)
+ air_quality = pd.read_csv("data/air_quality_no2.csv", index_col=0, parse_dates=True)
air_quality.head()
.. note::
@@ -112,9 +111,7 @@ I want to visually compare the :math:`N0_2` values measured in London versus Par
.. ipython:: python
@savefig 04_airqual_scatter.png
- air_quality.plot.scatter(x="station_london",
- y="station_paris",
- alpha=0.5)
+ air_quality.plot.scatter(x="station_london", y="station_paris", alpha=0.5)
.. raw:: html
@@ -127,8 +124,11 @@ standard Python to get an overview of the available plot methods:
.. ipython:: python
- [method_name for method_name in dir(air_quality.plot)
- if not method_name.startswith("_")]
+ [
+ method_name
+ for method_name in dir(air_quality.plot)
+ if not method_name.startswith("_")
+ ]
.. note::
In many development environments as well as ipython and
@@ -196,17 +196,18 @@ I want to further customize, extend or save the resulting plot.
.. ipython:: python
- fig, axs = plt.subplots(figsize=(12, 4));
- air_quality.plot.area(ax=axs);
+ fig, axs = plt.subplots(figsize=(12, 4))
+ air_quality.plot.area(ax=axs)
@savefig 04_airqual_customized.png
- axs.set_ylabel("NO$_2$ concentration");
+ axs.set_ylabel("NO$_2$ concentration")
fig.savefig("no2_concentrations.png")
.. ipython:: python
:suppress:
import os
- os.remove('no2_concentrations.png')
+
+ os.remove("no2_concentrations.png")
.. raw:: html
diff --git a/doc/source/getting_started/intro_tutorials/05_add_columns.rst b/doc/source/getting_started/intro_tutorials/05_add_columns.rst
index d4f6a8d6bb4a2..a99c2c49585c5 100644
--- a/doc/source/getting_started/intro_tutorials/05_add_columns.rst
+++ b/doc/source/getting_started/intro_tutorials/05_add_columns.rst
@@ -39,8 +39,7 @@ in respectively Paris, Antwerp and London.
.. ipython:: python
- air_quality = pd.read_csv("data/air_quality_no2.csv",
- index_col=0, parse_dates=True)
+ air_quality = pd.read_csv("data/air_quality_no2.csv", index_col=0, parse_dates=True)
air_quality.head()
.. raw:: html
@@ -95,8 +94,9 @@ I want to check the ratio of the values in Paris versus Antwerp and save the res
.. ipython:: python
- air_quality["ratio_paris_antwerp"] = \
+ air_quality["ratio_paris_antwerp"] = (
air_quality["station_paris"] / air_quality["station_antwerp"]
+ )
air_quality.head()
The calculation is again element-wise, so the ``/`` is applied *for the
@@ -122,9 +122,12 @@ I want to rename the data columns to the corresponding station identifiers used
.. ipython:: python
air_quality_renamed = air_quality.rename(
- columns={"station_antwerp": "BETR801",
- "station_paris": "FR04014",
- "station_london": "London Westminster"})
+ columns={
+ "station_antwerp": "BETR801",
+ "station_paris": "FR04014",
+ "station_london": "London Westminster",
+ }
+ )
.. ipython:: python
| Addresses part of #36777
Ran blacken-tools and checked for warnings from flake8-rst for the remaining 5 intro tutorials. Note that tutorial 3 met the formatting requirements as is, so there is no change there. | https://api.github.com/repos/pandas-dev/pandas/pulls/36817 | 2020-10-02T20:28:20Z | 2020-10-02T21:58:11Z | 2020-10-02T21:58:11Z | 2020-10-02T22:39:47Z |
Rolling on DataFrameGroupBy duplicated index column when part of the grouping cols is from index | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index 016e8d90e7d21..b5fca1ec1dc98 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -404,6 +404,7 @@ Groupby/resample/rolling
- Bug in :meth:`DataFrame.groupby` does not always maintain column index name for ``any``, ``all``, ``bfill``, ``ffill``, ``shift`` (:issue:`29764`)
- Bug in :meth:`DataFrameGroupBy.apply` raising error with ``np.nan`` group(s) when ``dropna=False`` (:issue:`35889`)
- Bug in :meth:`Rolling.sum()` returned wrong values when dtypes where mixed between float and integer and axis was equal to one (:issue:`20649`, :issue:`35596`)
+- Bug in :meth:`DataFrameGroupBy.rolling` duplicates index columns when a part of the grouping columns was from the index (:issue:`36794`)
Reshaping
^^^^^^^^^
diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py
index 6ab42dda865e7..eebb3a2c81f86 100644
--- a/pandas/core/window/rolling.py
+++ b/pandas/core/window/rolling.py
@@ -2209,6 +2209,20 @@ def _apply(
grouped_index_name = [*grouped_object_index.names]
groupby_keys = [grouping.name for grouping in self._groupby.grouper._groupings]
result_index_names = groupby_keys + grouped_index_name
+ drop_levels = []
+ obj = self._groupby._selected_obj
+ # We have to handle a unnamed Series different, because we can not say, if
+ # grouping column was index or Series column.
+ if (
+ isinstance(obj, ABCDataFrame)
+ or obj.name is not None
+ or obj.index.names != [None]
+ ):
+ drop_levels = [
+ i for i, name in enumerate(groupby_keys) if name in grouped_index_name
+ ]
+ elif self._groupby.level is not None:
+ drop_levels = com.maybe_make_list(self._groupby.level)
result_index_data = []
for key, values in self._groupby.grouper.indices.items():
@@ -2222,6 +2236,7 @@ def _apply(
result_index = MultiIndex.from_tuples(
result_index_data, names=result_index_names
)
+ result_index = result_index.droplevel(drop_levels)
result.index = result_index
return result
diff --git a/pandas/tests/window/test_grouper.py b/pandas/tests/window/test_grouper.py
index 0eebd657e97b7..e7d9d81da75bf 100644
--- a/pandas/tests/window/test_grouper.py
+++ b/pandas/tests/window/test_grouper.py
@@ -416,3 +416,41 @@ def test_groupby_rolling_empty_frame(self):
result = expected.groupby(["s1", "s2"]).rolling(window=1).sum()
expected.index = pd.MultiIndex.from_tuples([], names=["s1", "s2", None])
tm.assert_frame_equal(result, expected)
+
+ @pytest.mark.parametrize("groupings", [{"level": 0}, {"by": "groupby_col"}])
+ def test_groupby_rolling_index_in_by_columns(self, groupings):
+ # GH: 36794
+ df = pd.DataFrame(
+ {"groupby_col": [1, 1, 1, 2, 2, 2], "agg_col": [1, 1, 0, 0, 1, 0]}
+ ).set_index("groupby_col")
+ result = df.groupby(**groupings).rolling(2).sum()
+ expected = pd.DataFrame(
+ {
+ "groupby_col": [1, 1, 1, 2, 2, 2],
+ "agg_col": [np.nan, 2.0, 1.0, np.nan, 1.0, 1.0],
+ }
+ ).set_index("groupby_col")
+ tm.assert_frame_equal(result, expected)
+
+ @pytest.mark.parametrize("index_name", ["index", None])
+ def test_groupby_rolling_unnamed_series(self, index_name):
+ # GH: 36794
+ ds = pd.Series([1, 2, 3, 4], index=pd.Index([0, 1, 2, 3], name=index_name))
+ result = ds.groupby(ds).rolling(2).sum()
+ expected = pd.Series(
+ [np.nan] * 4,
+ index=pd.MultiIndex.from_tuples(
+ [(1, 0), (2, 1), (3, 2), (4, 3)], names=[None, index_name]
+ ),
+ )
+ tm.assert_series_equal(result, expected)
+
+ @pytest.mark.parametrize("groupings", [{"level": 0}, {"by": "index"}])
+ def test_groupby_rolling_index_of_unnamed_series(self, groupings):
+ # GH: 36794
+ ds = pd.Series([1, 2, 3, 4], index=pd.Index([0, 0, 1, 1], name="index"))
+ result = ds.groupby(**groupings).rolling(2).sum()
+ expected = pd.Series(
+ [np.nan, 3.0, np.nan, 7.0], index=pd.Index([0, 0, 1, 1], name="index")
+ )
+ tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/window/test_rolling.py b/pandas/tests/window/test_rolling.py
index 9ac4871ad24a1..a9ada97227205 100644
--- a/pandas/tests/window/test_rolling.py
+++ b/pandas/tests/window/test_rolling.py
@@ -675,9 +675,7 @@ def test_iter_rolling_datetime(expected, expected_index, window):
[
(
{"level": 0},
- pd.MultiIndex.from_tuples(
- [(0, 0), (0, 0), (1, 1), (1, 1), (1, 1)], names=[None, None]
- ),
+ pd.Index([0, 0, 1, 1, 1]),
),
(
{"by": "X"},
| …grouping cols is from index
- [x] closes #36794
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
Could not find a elegant way, to avoid adding the components in the for loop, so I dropped it from the resulting MultiIndex again.
Had to change a test, which depended on the wrong behavior.
cc @mroeschke | https://api.github.com/repos/pandas-dev/pandas/pulls/36816 | 2020-10-02T19:29:15Z | 2020-10-02T23:35:23Z | null | 2020-10-03T02:26:21Z |
DOC: uses black to fix formatting #36777 | diff --git a/doc/source/user_guide/merging.rst b/doc/source/user_guide/merging.rst
index aee56a2565310..8dbfc261e6fa8 100644
--- a/doc/source/user_guide/merging.rst
+++ b/doc/source/user_guide/merging.rst
@@ -7,6 +7,7 @@
from matplotlib import pyplot as plt
import pandas.util._doctools as doctools
+
p = doctools.TablePlotter()
@@ -38,23 +39,35 @@ a simple example:
.. ipython:: python
- df1 = pd.DataFrame({'A': ['A0', 'A1', 'A2', 'A3'],
- 'B': ['B0', 'B1', 'B2', 'B3'],
- 'C': ['C0', 'C1', 'C2', 'C3'],
- 'D': ['D0', 'D1', 'D2', 'D3']},
- index=[0, 1, 2, 3])
+ df1 = pd.DataFrame(
+ {
+ "A": ["A0", "A1", "A2", "A3"],
+ "B": ["B0", "B1", "B2", "B3"],
+ "C": ["C0", "C1", "C2", "C3"],
+ "D": ["D0", "D1", "D2", "D3"],
+ },
+ index=[0, 1, 2, 3],
+ )
- df2 = pd.DataFrame({'A': ['A4', 'A5', 'A6', 'A7'],
- 'B': ['B4', 'B5', 'B6', 'B7'],
- 'C': ['C4', 'C5', 'C6', 'C7'],
- 'D': ['D4', 'D5', 'D6', 'D7']},
- index=[4, 5, 6, 7])
+ df2 = pd.DataFrame(
+ {
+ "A": ["A4", "A5", "A6", "A7"],
+ "B": ["B4", "B5", "B6", "B7"],
+ "C": ["C4", "C5", "C6", "C7"],
+ "D": ["D4", "D5", "D6", "D7"],
+ },
+ index=[4, 5, 6, 7],
+ )
- df3 = pd.DataFrame({'A': ['A8', 'A9', 'A10', 'A11'],
- 'B': ['B8', 'B9', 'B10', 'B11'],
- 'C': ['C8', 'C9', 'C10', 'C11'],
- 'D': ['D8', 'D9', 'D10', 'D11']},
- index=[8, 9, 10, 11])
+ df3 = pd.DataFrame(
+ {
+ "A": ["A8", "A9", "A10", "A11"],
+ "B": ["B8", "B9", "B10", "B11"],
+ "C": ["C8", "C9", "C10", "C11"],
+ "D": ["D8", "D9", "D10", "D11"],
+ },
+ index=[8, 9, 10, 11],
+ )
frames = [df1, df2, df3]
result = pd.concat(frames)
@@ -109,7 +122,7 @@ with each of the pieces of the chopped up DataFrame. We can do this using the
.. ipython:: python
- result = pd.concat(frames, keys=['x', 'y', 'z'])
+ result = pd.concat(frames, keys=["x", "y", "z"])
.. ipython:: python
:suppress:
@@ -125,7 +138,7 @@ means that we can now select out each chunk by key:
.. ipython:: python
- result.loc['y']
+ result.loc["y"]
It's not a stretch to see how this can be very useful. More detail on this
functionality below.
@@ -158,10 +171,14 @@ behavior:
.. ipython:: python
- df4 = pd.DataFrame({'B': ['B2', 'B3', 'B6', 'B7'],
- 'D': ['D2', 'D3', 'D6', 'D7'],
- 'F': ['F2', 'F3', 'F6', 'F7']},
- index=[2, 3, 6, 7])
+ df4 = pd.DataFrame(
+ {
+ "B": ["B2", "B3", "B6", "B7"],
+ "D": ["D2", "D3", "D6", "D7"],
+ "F": ["F2", "F3", "F6", "F7"],
+ },
+ index=[2, 3, 6, 7],
+ )
result = pd.concat([df1, df4], axis=1, sort=False)
@@ -184,7 +201,7 @@ Here is the same thing with ``join='inner'``:
.. ipython:: python
- result = pd.concat([df1, df4], axis=1, join='inner')
+ result = pd.concat([df1, df4], axis=1, join="inner")
.. ipython:: python
:suppress:
@@ -316,7 +333,7 @@ the name of the ``Series``.
.. ipython:: python
- s1 = pd.Series(['X0', 'X1', 'X2', 'X3'], name='X')
+ s1 = pd.Series(["X0", "X1", "X2", "X3"], name="X")
result = pd.concat([df1, s1], axis=1)
.. ipython:: python
@@ -338,7 +355,7 @@ If unnamed ``Series`` are passed they will be numbered consecutively.
.. ipython:: python
- s2 = pd.Series(['_0', '_1', '_2', '_3'])
+ s2 = pd.Series(["_0", "_1", "_2", "_3"])
result = pd.concat([df1, s2, s2, s2], axis=1)
.. ipython:: python
@@ -373,7 +390,7 @@ inherit the parent ``Series``' name, when these existed.
.. ipython:: python
- s3 = pd.Series([0, 1, 2, 3], name='foo')
+ s3 = pd.Series([0, 1, 2, 3], name="foo")
s4 = pd.Series([0, 1, 2, 3])
s5 = pd.Series([0, 1, 4, 5])
@@ -383,13 +400,13 @@ Through the ``keys`` argument we can override the existing column names.
.. ipython:: python
- pd.concat([s3, s4, s5], axis=1, keys=['red', 'blue', 'yellow'])
+ pd.concat([s3, s4, s5], axis=1, keys=["red", "blue", "yellow"])
Let's consider a variation of the very first example presented:
.. ipython:: python
- result = pd.concat(frames, keys=['x', 'y', 'z'])
+ result = pd.concat(frames, keys=["x", "y", "z"])
.. ipython:: python
:suppress:
@@ -404,7 +421,7 @@ for the ``keys`` argument (unless other keys are specified):
.. ipython:: python
- pieces = {'x': df1, 'y': df2, 'z': df3}
+ pieces = {"x": df1, "y": df2, "z": df3}
result = pd.concat(pieces)
.. ipython:: python
@@ -417,7 +434,7 @@ for the ``keys`` argument (unless other keys are specified):
.. ipython:: python
- result = pd.concat(pieces, keys=['z', 'y'])
+ result = pd.concat(pieces, keys=["z", "y"])
.. ipython:: python
:suppress:
@@ -439,9 +456,9 @@ do so using the ``levels`` argument:
.. ipython:: python
- result = pd.concat(pieces, keys=['x', 'y', 'z'],
- levels=[['z', 'y', 'x', 'w']],
- names=['group_key'])
+ result = pd.concat(
+ pieces, keys=["x", "y", "z"], levels=[["z", "y", "x", "w"]], names=["group_key"]
+ )
.. ipython:: python
:suppress:
@@ -469,7 +486,7 @@ append a single row to a ``DataFrame`` by passing a ``Series`` or dict to
.. ipython:: python
- s2 = pd.Series(['X0', 'X1', 'X2', 'X3'], index=['A', 'B', 'C', 'D'])
+ s2 = pd.Series(["X0", "X1", "X2", "X3"], index=["A", "B", "C", "D"])
result = df1.append(s2, ignore_index=True)
.. ipython:: python
@@ -488,8 +505,7 @@ You can also pass a list of dicts or Series:
.. ipython:: python
- dicts = [{'A': 1, 'B': 2, 'C': 3, 'X': 4},
- {'A': 5, 'B': 6, 'C': 7, 'Y': 8}]
+ dicts = [{"A": 1, "B": 2, "C": 3, "X": 4}, {"A": 5, "B": 6, "C": 7, "Y": 8}]
result = df1.append(dicts, ignore_index=True, sort=False)
.. ipython:: python
@@ -619,14 +635,22 @@ key combination:
.. ipython:: python
- left = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3'],
- 'A': ['A0', 'A1', 'A2', 'A3'],
- 'B': ['B0', 'B1', 'B2', 'B3']})
+ left = pd.DataFrame(
+ {
+ "key": ["K0", "K1", "K2", "K3"],
+ "A": ["A0", "A1", "A2", "A3"],
+ "B": ["B0", "B1", "B2", "B3"],
+ }
+ )
- right = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3'],
- 'C': ['C0', 'C1', 'C2', 'C3'],
- 'D': ['D0', 'D1', 'D2', 'D3']})
- result = pd.merge(left, right, on='key')
+ right = pd.DataFrame(
+ {
+ "key": ["K0", "K1", "K2", "K3"],
+ "C": ["C0", "C1", "C2", "C3"],
+ "D": ["D0", "D1", "D2", "D3"],
+ }
+ )
+ result = pd.merge(left, right, on="key")
.. ipython:: python
:suppress:
@@ -642,17 +666,25 @@ appearing in ``left`` and ``right`` are present (the intersection), since
.. ipython:: python
- left = pd.DataFrame({'key1': ['K0', 'K0', 'K1', 'K2'],
- 'key2': ['K0', 'K1', 'K0', 'K1'],
- 'A': ['A0', 'A1', 'A2', 'A3'],
- 'B': ['B0', 'B1', 'B2', 'B3']})
+ left = pd.DataFrame(
+ {
+ "key1": ["K0", "K0", "K1", "K2"],
+ "key2": ["K0", "K1", "K0", "K1"],
+ "A": ["A0", "A1", "A2", "A3"],
+ "B": ["B0", "B1", "B2", "B3"],
+ }
+ )
- right = pd.DataFrame({'key1': ['K0', 'K1', 'K1', 'K2'],
- 'key2': ['K0', 'K0', 'K0', 'K0'],
- 'C': ['C0', 'C1', 'C2', 'C3'],
- 'D': ['D0', 'D1', 'D2', 'D3']})
+ right = pd.DataFrame(
+ {
+ "key1": ["K0", "K1", "K1", "K2"],
+ "key2": ["K0", "K0", "K0", "K0"],
+ "C": ["C0", "C1", "C2", "C3"],
+ "D": ["D0", "D1", "D2", "D3"],
+ }
+ )
- result = pd.merge(left, right, on=['key1', 'key2'])
+ result = pd.merge(left, right, on=["key1", "key2"])
.. ipython:: python
:suppress:
@@ -678,7 +710,7 @@ either the left or right tables, the values in the joined table will be
.. ipython:: python
- result = pd.merge(left, right, how='left', on=['key1', 'key2'])
+ result = pd.merge(left, right, how="left", on=["key1", "key2"])
.. ipython:: python
:suppress:
@@ -690,7 +722,7 @@ either the left or right tables, the values in the joined table will be
.. ipython:: python
- result = pd.merge(left, right, how='right', on=['key1', 'key2'])
+ result = pd.merge(left, right, how="right", on=["key1", "key2"])
.. ipython:: python
:suppress:
@@ -701,7 +733,7 @@ either the left or right tables, the values in the joined table will be
.. ipython:: python
- result = pd.merge(left, right, how='outer', on=['key1', 'key2'])
+ result = pd.merge(left, right, how="outer", on=["key1", "key2"])
.. ipython:: python
:suppress:
@@ -713,7 +745,7 @@ either the left or right tables, the values in the joined table will be
.. ipython:: python
- result = pd.merge(left, right, how='inner', on=['key1', 'key2'])
+ result = pd.merge(left, right, how="inner", on=["key1", "key2"])
.. ipython:: python
:suppress:
@@ -741,18 +773,18 @@ as shown in the following example.
)
ser
- pd.merge(df, ser.reset_index(), on=['Let', 'Num'])
+ pd.merge(df, ser.reset_index(), on=["Let", "Num"])
Here is another example with duplicate join keys in DataFrames:
.. ipython:: python
- left = pd.DataFrame({'A': [1, 2], 'B': [2, 2]})
+ left = pd.DataFrame({"A": [1, 2], "B": [2, 2]})
- right = pd.DataFrame({'A': [4, 5, 6], 'B': [2, 2, 2]})
+ right = pd.DataFrame({"A": [4, 5, 6], "B": [2, 2, 2]})
- result = pd.merge(left, right, on='B', how='outer')
+ result = pd.merge(left, right, on="B", how="outer")
.. ipython:: python
:suppress:
@@ -784,8 +816,8 @@ In the following example, there are duplicate values of ``B`` in the right
.. ipython:: python
- left = pd.DataFrame({'A' : [1,2], 'B' : [1, 2]})
- right = pd.DataFrame({'A' : [4,5,6], 'B': [2, 2, 2]})
+ left = pd.DataFrame({"A": [1, 2], "B": [1, 2]})
+ right = pd.DataFrame({"A": [4, 5, 6], "B": [2, 2, 2]})
.. code-block:: ipython
@@ -799,7 +831,7 @@ ensure there are no duplicates in the left DataFrame, one can use the
.. ipython:: python
- pd.merge(left, right, on='B', how='outer', validate="one_to_many")
+ pd.merge(left, right, on="B", how="outer", validate="one_to_many")
.. _merging.indicator:
@@ -821,15 +853,15 @@ that takes on values:
.. ipython:: python
- df1 = pd.DataFrame({'col1': [0, 1], 'col_left': ['a', 'b']})
- df2 = pd.DataFrame({'col1': [1, 2, 2], 'col_right': [2, 2, 2]})
- pd.merge(df1, df2, on='col1', how='outer', indicator=True)
+ df1 = pd.DataFrame({"col1": [0, 1], "col_left": ["a", "b"]})
+ df2 = pd.DataFrame({"col1": [1, 2, 2], "col_right": [2, 2, 2]})
+ pd.merge(df1, df2, on="col1", how="outer", indicator=True)
The ``indicator`` argument will also accept string arguments, in which case the indicator function will use the value of the passed string as the name for the indicator column.
.. ipython:: python
- pd.merge(df1, df2, on='col1', how='outer', indicator='indicator_column')
+ pd.merge(df1, df2, on="col1", how="outer", indicator="indicator_column")
.. _merging.dtypes:
@@ -841,25 +873,25 @@ Merging will preserve the dtype of the join keys.
.. ipython:: python
- left = pd.DataFrame({'key': [1], 'v1': [10]})
+ left = pd.DataFrame({"key": [1], "v1": [10]})
left
- right = pd.DataFrame({'key': [1, 2], 'v1': [20, 30]})
+ right = pd.DataFrame({"key": [1, 2], "v1": [20, 30]})
right
We are able to preserve the join keys:
.. ipython:: python
- pd.merge(left, right, how='outer')
- pd.merge(left, right, how='outer').dtypes
+ pd.merge(left, right, how="outer")
+ pd.merge(left, right, how="outer").dtypes
Of course if you have missing values that are introduced, then the
resulting dtype will be upcast.
.. ipython:: python
- pd.merge(left, right, how='outer', on='key')
- pd.merge(left, right, how='outer', on='key').dtypes
+ pd.merge(left, right, how="outer", on="key")
+ pd.merge(left, right, how="outer", on="key").dtypes
Merging will preserve ``category`` dtypes of the mergands. See also the section on :ref:`categoricals <categorical.merge>`.
@@ -869,12 +901,12 @@ The left frame.
from pandas.api.types import CategoricalDtype
- X = pd.Series(np.random.choice(['foo', 'bar'], size=(10,)))
- X = X.astype(CategoricalDtype(categories=['foo', 'bar']))
+ X = pd.Series(np.random.choice(["foo", "bar"], size=(10,)))
+ X = X.astype(CategoricalDtype(categories=["foo", "bar"]))
- left = pd.DataFrame({'X': X,
- 'Y': np.random.choice(['one', 'two', 'three'],
- size=(10,))})
+ left = pd.DataFrame(
+ {"X": X, "Y": np.random.choice(["one", "two", "three"], size=(10,))}
+ )
left
left.dtypes
@@ -882,9 +914,12 @@ The right frame.
.. ipython:: python
- right = pd.DataFrame({'X': pd.Series(['foo', 'bar'],
- dtype=CategoricalDtype(['foo', 'bar'])),
- 'Z': [1, 2]})
+ right = pd.DataFrame(
+ {
+ "X": pd.Series(["foo", "bar"], dtype=CategoricalDtype(["foo", "bar"])),
+ "Z": [1, 2],
+ }
+ )
right
right.dtypes
@@ -892,7 +927,7 @@ The merged result:
.. ipython:: python
- result = pd.merge(left, right, how='outer')
+ result = pd.merge(left, right, how="outer")
result
result.dtypes
@@ -916,13 +951,13 @@ potentially differently-indexed ``DataFrames`` into a single result
.. ipython:: python
- left = pd.DataFrame({'A': ['A0', 'A1', 'A2'],
- 'B': ['B0', 'B1', 'B2']},
- index=['K0', 'K1', 'K2'])
+ left = pd.DataFrame(
+ {"A": ["A0", "A1", "A2"], "B": ["B0", "B1", "B2"]}, index=["K0", "K1", "K2"]
+ )
- right = pd.DataFrame({'C': ['C0', 'C2', 'C3'],
- 'D': ['D0', 'D2', 'D3']},
- index=['K0', 'K2', 'K3'])
+ right = pd.DataFrame(
+ {"C": ["C0", "C2", "C3"], "D": ["D0", "D2", "D3"]}, index=["K0", "K2", "K3"]
+ )
result = left.join(right)
@@ -936,7 +971,7 @@ potentially differently-indexed ``DataFrames`` into a single result
.. ipython:: python
- result = left.join(right, how='outer')
+ result = left.join(right, how="outer")
.. ipython:: python
:suppress:
@@ -950,7 +985,7 @@ The same as above, but with ``how='inner'``.
.. ipython:: python
- result = left.join(right, how='inner')
+ result = left.join(right, how="inner")
.. ipython:: python
:suppress:
@@ -966,7 +1001,7 @@ indexes:
.. ipython:: python
- result = pd.merge(left, right, left_index=True, right_index=True, how='outer')
+ result = pd.merge(left, right, left_index=True, right_index=True, how="outer")
.. ipython:: python
:suppress:
@@ -978,7 +1013,7 @@ indexes:
.. ipython:: python
- result = pd.merge(left, right, left_index=True, right_index=True, how='inner');
+ result = pd.merge(left, right, left_index=True, right_index=True, how="inner")
.. ipython:: python
:suppress:
@@ -1008,15 +1043,17 @@ join key), using ``join`` may be more convenient. Here is a simple example:
.. ipython:: python
- left = pd.DataFrame({'A': ['A0', 'A1', 'A2', 'A3'],
- 'B': ['B0', 'B1', 'B2', 'B3'],
- 'key': ['K0', 'K1', 'K0', 'K1']})
+ left = pd.DataFrame(
+ {
+ "A": ["A0", "A1", "A2", "A3"],
+ "B": ["B0", "B1", "B2", "B3"],
+ "key": ["K0", "K1", "K0", "K1"],
+ }
+ )
- right = pd.DataFrame({'C': ['C0', 'C1'],
- 'D': ['D0', 'D1']},
- index=['K0', 'K1'])
+ right = pd.DataFrame({"C": ["C0", "C1"], "D": ["D0", "D1"]}, index=["K0", "K1"])
- result = left.join(right, on='key')
+ result = left.join(right, on="key")
.. ipython:: python
:suppress:
@@ -1028,8 +1065,7 @@ join key), using ``join`` may be more convenient. Here is a simple example:
.. ipython:: python
- result = pd.merge(left, right, left_on='key', right_index=True,
- how='left', sort=False);
+ result = pd.merge(left, right, left_on="key", right_index=True, how="left", sort=False)
.. ipython:: python
:suppress:
@@ -1045,22 +1081,27 @@ To join on multiple keys, the passed DataFrame must have a ``MultiIndex``:
.. ipython:: python
- left = pd.DataFrame({'A': ['A0', 'A1', 'A2', 'A3'],
- 'B': ['B0', 'B1', 'B2', 'B3'],
- 'key1': ['K0', 'K0', 'K1', 'K2'],
- 'key2': ['K0', 'K1', 'K0', 'K1']})
+ left = pd.DataFrame(
+ {
+ "A": ["A0", "A1", "A2", "A3"],
+ "B": ["B0", "B1", "B2", "B3"],
+ "key1": ["K0", "K0", "K1", "K2"],
+ "key2": ["K0", "K1", "K0", "K1"],
+ }
+ )
- index = pd.MultiIndex.from_tuples([('K0', 'K0'), ('K1', 'K0'),
- ('K2', 'K0'), ('K2', 'K1')])
- right = pd.DataFrame({'C': ['C0', 'C1', 'C2', 'C3'],
- 'D': ['D0', 'D1', 'D2', 'D3']},
- index=index)
+ index = pd.MultiIndex.from_tuples(
+ [("K0", "K0"), ("K1", "K0"), ("K2", "K0"), ("K2", "K1")]
+ )
+ right = pd.DataFrame(
+ {"C": ["C0", "C1", "C2", "C3"], "D": ["D0", "D1", "D2", "D3"]}, index=index
+ )
Now this can be joined by passing the two key column names:
.. ipython:: python
- result = left.join(right, on=['key1', 'key2'])
+ result = left.join(right, on=["key1", "key2"])
.. ipython:: python
:suppress:
@@ -1079,7 +1120,7 @@ easily performed:
.. ipython:: python
- result = left.join(right, on=['key1', 'key2'], how='inner')
+ result = left.join(right, on=["key1", "key2"], how="inner")
.. ipython:: python
:suppress:
@@ -1149,39 +1190,38 @@ the left argument, as in this example:
.. ipython:: python
- leftindex = pd.MultiIndex.from_product([list('abc'), list('xy'), [1, 2]],
- names=['abc', 'xy', 'num'])
- left = pd.DataFrame({'v1': range(12)}, index=leftindex)
+ leftindex = pd.MultiIndex.from_product(
+ [list("abc"), list("xy"), [1, 2]], names=["abc", "xy", "num"]
+ )
+ left = pd.DataFrame({"v1": range(12)}, index=leftindex)
left
- rightindex = pd.MultiIndex.from_product([list('abc'), list('xy')],
- names=['abc', 'xy'])
- right = pd.DataFrame({'v2': [100 * i for i in range(1, 7)]}, index=rightindex)
+ rightindex = pd.MultiIndex.from_product([list("abc"), list("xy")], names=["abc", "xy"])
+ right = pd.DataFrame({"v2": [100 * i for i in range(1, 7)]}, index=rightindex)
right
- left.join(right, on=['abc', 'xy'], how='inner')
+ left.join(right, on=["abc", "xy"], how="inner")
If that condition is not satisfied, a join with two multi-indexes can be
done using the following code.
.. ipython:: python
- leftindex = pd.MultiIndex.from_tuples([('K0', 'X0'), ('K0', 'X1'),
- ('K1', 'X2')],
- names=['key', 'X'])
- left = pd.DataFrame({'A': ['A0', 'A1', 'A2'],
- 'B': ['B0', 'B1', 'B2']},
- index=leftindex)
+ leftindex = pd.MultiIndex.from_tuples(
+ [("K0", "X0"), ("K0", "X1"), ("K1", "X2")], names=["key", "X"]
+ )
+ left = pd.DataFrame({"A": ["A0", "A1", "A2"], "B": ["B0", "B1", "B2"]}, index=leftindex)
- rightindex = pd.MultiIndex.from_tuples([('K0', 'Y0'), ('K1', 'Y1'),
- ('K2', 'Y2'), ('K2', 'Y3')],
- names=['key', 'Y'])
- right = pd.DataFrame({'C': ['C0', 'C1', 'C2', 'C3'],
- 'D': ['D0', 'D1', 'D2', 'D3']},
- index=rightindex)
+ rightindex = pd.MultiIndex.from_tuples(
+ [("K0", "Y0"), ("K1", "Y1"), ("K2", "Y2"), ("K2", "Y3")], names=["key", "Y"]
+ )
+ right = pd.DataFrame(
+ {"C": ["C0", "C1", "C2", "C3"], "D": ["D0", "D1", "D2", "D3"]}, index=rightindex
+ )
- result = pd.merge(left.reset_index(), right.reset_index(),
- on=['key'], how='inner').set_index(['key', 'X', 'Y'])
+ result = pd.merge(
+ left.reset_index(), right.reset_index(), on=["key"], how="inner"
+ ).set_index(["key", "X", "Y"])
.. ipython:: python
:suppress:
@@ -1203,21 +1243,29 @@ resetting indexes.
.. ipython:: python
- left_index = pd.Index(['K0', 'K0', 'K1', 'K2'], name='key1')
+ left_index = pd.Index(["K0", "K0", "K1", "K2"], name="key1")
- left = pd.DataFrame({'A': ['A0', 'A1', 'A2', 'A3'],
- 'B': ['B0', 'B1', 'B2', 'B3'],
- 'key2': ['K0', 'K1', 'K0', 'K1']},
- index=left_index)
+ left = pd.DataFrame(
+ {
+ "A": ["A0", "A1", "A2", "A3"],
+ "B": ["B0", "B1", "B2", "B3"],
+ "key2": ["K0", "K1", "K0", "K1"],
+ },
+ index=left_index,
+ )
- right_index = pd.Index(['K0', 'K1', 'K2', 'K2'], name='key1')
+ right_index = pd.Index(["K0", "K1", "K2", "K2"], name="key1")
- right = pd.DataFrame({'C': ['C0', 'C1', 'C2', 'C3'],
- 'D': ['D0', 'D1', 'D2', 'D3'],
- 'key2': ['K0', 'K0', 'K0', 'K1']},
- index=right_index)
+ right = pd.DataFrame(
+ {
+ "C": ["C0", "C1", "C2", "C3"],
+ "D": ["D0", "D1", "D2", "D3"],
+ "key2": ["K0", "K0", "K0", "K1"],
+ },
+ index=right_index,
+ )
- result = left.merge(right, on=['key1', 'key2'])
+ result = left.merge(right, on=["key1", "key2"])
.. ipython:: python
:suppress:
@@ -1254,10 +1302,10 @@ columns:
.. ipython:: python
- left = pd.DataFrame({'k': ['K0', 'K1', 'K2'], 'v': [1, 2, 3]})
- right = pd.DataFrame({'k': ['K0', 'K0', 'K3'], 'v': [4, 5, 6]})
+ left = pd.DataFrame({"k": ["K0", "K1", "K2"], "v": [1, 2, 3]})
+ right = pd.DataFrame({"k": ["K0", "K0", "K3"], "v": [4, 5, 6]})
- result = pd.merge(left, right, on='k')
+ result = pd.merge(left, right, on="k")
.. ipython:: python
:suppress:
@@ -1269,7 +1317,7 @@ columns:
.. ipython:: python
- result = pd.merge(left, right, on='k', suffixes=('_l', '_r'))
+ result = pd.merge(left, right, on="k", suffixes=("_l", "_r"))
.. ipython:: python
:suppress:
@@ -1284,9 +1332,9 @@ similarly.
.. ipython:: python
- left = left.set_index('k')
- right = right.set_index('k')
- result = left.join(right, lsuffix='_l', rsuffix='_r')
+ left = left.set_index("k")
+ right = right.set_index("k")
+ result = left.join(right, lsuffix="_l", rsuffix="_r")
.. ipython:: python
:suppress:
@@ -1306,7 +1354,7 @@ to join them together on their indexes.
.. ipython:: python
- right2 = pd.DataFrame({'v': [7, 8, 9]}, index=['K1', 'K1', 'K2'])
+ right2 = pd.DataFrame({"v": [7, 8, 9]}, index=["K1", "K1", "K2"])
result = left.join([right, right2])
.. ipython:: python
@@ -1328,10 +1376,8 @@ one object from values for matching indices in the other. Here is an example:
.. ipython:: python
- df1 = pd.DataFrame([[np.nan, 3., 5.], [-4.6, np.nan, np.nan],
- [np.nan, 7., np.nan]])
- df2 = pd.DataFrame([[-42.6, np.nan, -8.2], [-5., 1.6, 4]],
- index=[1, 2])
+ df1 = pd.DataFrame([[np.nan, 3.0, 5.0], [-4.6, np.nan, np.nan], [np.nan, 7.0, np.nan]])
+ df2 = pd.DataFrame([[-42.6, np.nan, -8.2], [-5.0, 1.6, 4]], index=[1, 2])
For this, use the :meth:`~DataFrame.combine_first` method:
@@ -1384,14 +1430,13 @@ fill/interpolate missing data:
.. ipython:: python
- left = pd.DataFrame({'k': ['K0', 'K1', 'K1', 'K2'],
- 'lv': [1, 2, 3, 4],
- 's': ['a', 'b', 'c', 'd']})
+ left = pd.DataFrame(
+ {"k": ["K0", "K1", "K1", "K2"], "lv": [1, 2, 3, 4], "s": ["a", "b", "c", "d"]}
+ )
- right = pd.DataFrame({'k': ['K1', 'K2', 'K4'],
- 'rv': [1, 2, 3]})
+ right = pd.DataFrame({"k": ["K1", "K2", "K4"], "rv": [1, 2, 3]})
- pd.merge_ordered(left, right, fill_method='ffill', left_by='s')
+ pd.merge_ordered(left, right, fill_method="ffill", left_by="s")
.. _merging.merge_asof:
@@ -1411,37 +1456,44 @@ merge them.
.. ipython:: python
- trades = pd.DataFrame({
- 'time': pd.to_datetime(['20160525 13:30:00.023',
- '20160525 13:30:00.038',
- '20160525 13:30:00.048',
- '20160525 13:30:00.048',
- '20160525 13:30:00.048']),
- 'ticker': ['MSFT', 'MSFT',
- 'GOOG', 'GOOG', 'AAPL'],
- 'price': [51.95, 51.95,
- 720.77, 720.92, 98.00],
- 'quantity': [75, 155,
- 100, 100, 100]},
- columns=['time', 'ticker', 'price', 'quantity'])
-
- quotes = pd.DataFrame({
- 'time': pd.to_datetime(['20160525 13:30:00.023',
- '20160525 13:30:00.023',
- '20160525 13:30:00.030',
- '20160525 13:30:00.041',
- '20160525 13:30:00.048',
- '20160525 13:30:00.049',
- '20160525 13:30:00.072',
- '20160525 13:30:00.075']),
- 'ticker': ['GOOG', 'MSFT', 'MSFT',
- 'MSFT', 'GOOG', 'AAPL', 'GOOG',
- 'MSFT'],
- 'bid': [720.50, 51.95, 51.97, 51.99,
- 720.50, 97.99, 720.50, 52.01],
- 'ask': [720.93, 51.96, 51.98, 52.00,
- 720.93, 98.01, 720.88, 52.03]},
- columns=['time', 'ticker', 'bid', 'ask'])
+ trades = pd.DataFrame(
+ {
+ "time": pd.to_datetime(
+ [
+ "20160525 13:30:00.023",
+ "20160525 13:30:00.038",
+ "20160525 13:30:00.048",
+ "20160525 13:30:00.048",
+ "20160525 13:30:00.048",
+ ]
+ ),
+ "ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"],
+ "price": [51.95, 51.95, 720.77, 720.92, 98.00],
+ "quantity": [75, 155, 100, 100, 100],
+ },
+ columns=["time", "ticker", "price", "quantity"],
+ )
+
+ quotes = pd.DataFrame(
+ {
+ "time": pd.to_datetime(
+ [
+ "20160525 13:30:00.023",
+ "20160525 13:30:00.023",
+ "20160525 13:30:00.030",
+ "20160525 13:30:00.041",
+ "20160525 13:30:00.048",
+ "20160525 13:30:00.049",
+ "20160525 13:30:00.072",
+ "20160525 13:30:00.075",
+ ]
+ ),
+ "ticker": ["GOOG", "MSFT", "MSFT", "MSFT", "GOOG", "AAPL", "GOOG", "MSFT"],
+ "bid": [720.50, 51.95, 51.97, 51.99, 720.50, 97.99, 720.50, 52.01],
+ "ask": [720.93, 51.96, 51.98, 52.00, 720.93, 98.01, 720.88, 52.03],
+ },
+ columns=["time", "ticker", "bid", "ask"],
+ )
.. ipython:: python
@@ -1452,18 +1504,13 @@ By default we are taking the asof of the quotes.
.. ipython:: python
- pd.merge_asof(trades, quotes,
- on='time',
- by='ticker')
+ pd.merge_asof(trades, quotes, on="time", by="ticker")
We only asof within ``2ms`` between the quote time and the trade time.
.. ipython:: python
- pd.merge_asof(trades, quotes,
- on='time',
- by='ticker',
- tolerance=pd.Timedelta('2ms'))
+ pd.merge_asof(trades, quotes, on="time", by="ticker", tolerance=pd.Timedelta("2ms"))
We only asof within ``10ms`` between the quote time and the trade time and we
exclude exact matches on time. Note that though we exclude the exact matches
@@ -1471,11 +1518,14 @@ exclude exact matches on time. Note that though we exclude the exact matches
.. ipython:: python
- pd.merge_asof(trades, quotes,
- on='time',
- by='ticker',
- tolerance=pd.Timedelta('10ms'),
- allow_exact_matches=False)
+ pd.merge_asof(
+ trades,
+ quotes,
+ on="time",
+ by="ticker",
+ tolerance=pd.Timedelta("10ms"),
+ allow_exact_matches=False,
+ )
.. _merging.compare:
@@ -1496,7 +1546,7 @@ side by side.
{
"col1": ["a", "a", "b", "b", "a"],
"col2": [1.0, 2.0, 3.0, np.nan, 5.0],
- "col3": [1.0, 2.0, 3.0, 4.0, 5.0]
+ "col3": [1.0, 2.0, 3.0, 4.0, 5.0],
},
columns=["col1", "col2", "col3"],
)
@@ -1505,8 +1555,8 @@ side by side.
.. ipython:: python
df2 = df.copy()
- df2.loc[0, 'col1'] = 'c'
- df2.loc[2, 'col3'] = 4.0
+ df2.loc[0, "col1"] = "c"
+ df2.loc[2, "col3"] = 4.0
df2
.. ipython:: python
| Fixes /docs/source/user_guide/merging.rst formatting. Passes flake8-rst test.
- [x] closes #xxxx
- [x] tests added / passed
- [x] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
ref https://github.com/pandas-dev/pandas/issues/36777 | https://api.github.com/repos/pandas-dev/pandas/pulls/36815 | 2020-10-02T19:29:14Z | 2020-10-02T22:05:24Z | 2020-10-02T22:05:24Z | 2020-10-02T22:05:30Z |
DOC: add example & prose of slicing with labels when index has duplicate labels | diff --git a/doc/source/user_guide/indexing.rst b/doc/source/user_guide/indexing.rst
index 530fdfba7d12c..44e2d8daa4de0 100644
--- a/doc/source/user_guide/indexing.rst
+++ b/doc/source/user_guide/indexing.rst
@@ -422,6 +422,17 @@ above example, ``s.loc[1:6]`` would raise ``KeyError``.
For the rationale behind this behavior, see
:ref:`Endpoints are inclusive <advanced.endpoints_are_inclusive>`.
+.. ipython:: python
+
+ s = pd.Series(list('abcdef'), index=[0, 3, 2, 5, 4, 2])
+ s.loc[3:5]
+
+Also, if the index has duplicate labels *and* either the start or the stop label is dupulicated,
+an error will be raised. For instance, in the above example, ``s.loc[2:5]`` would raise a ``KeyError``.
+
+For more information about duplicate labels, see
+:ref:`Duplicate Labels <duplicates>`.
+
.. _indexing.integer:
Selection by position
| - [x] closes #36251 | https://api.github.com/repos/pandas-dev/pandas/pulls/36814 | 2020-10-02T19:25:58Z | 2020-11-05T01:22:25Z | 2020-11-05T01:22:24Z | 2020-11-06T06:13:08Z |
DOC: use black to fix code style in doc pandas-dev#36777 | diff --git a/doc/source/getting_started/comparison/comparison_with_r.rst b/doc/source/getting_started/comparison/comparison_with_r.rst
index e1a4cfe49b7d1..358bb6ad951f0 100644
--- a/doc/source/getting_started/comparison/comparison_with_r.rst
+++ b/doc/source/getting_started/comparison/comparison_with_r.rst
@@ -122,16 +122,16 @@ Selecting multiple columns by name in ``pandas`` is straightforward
.. ipython:: python
- df = pd.DataFrame(np.random.randn(10, 3), columns=list('abc'))
- df[['a', 'c']]
- df.loc[:, ['a', 'c']]
+ df = pd.DataFrame(np.random.randn(10, 3), columns=list("abc"))
+ df[["a", "c"]]
+ df.loc[:, ["a", "c"]]
Selecting multiple noncontiguous columns by integer location can be achieved
with a combination of the ``iloc`` indexer attribute and ``numpy.r_``.
.. ipython:: python
- named = list('abcdefg')
+ named = list("abcdefg")
n = 30
columns = named + np.arange(len(named), n).tolist()
df = pd.DataFrame(np.random.randn(n, n), columns=columns)
@@ -160,14 +160,29 @@ function.
.. ipython:: python
df = pd.DataFrame(
- {'v1': [1, 3, 5, 7, 8, 3, 5, np.nan, 4, 5, 7, 9],
- 'v2': [11, 33, 55, 77, 88, 33, 55, np.nan, 44, 55, 77, 99],
- 'by1': ["red", "blue", 1, 2, np.nan, "big", 1, 2, "red", 1, np.nan, 12],
- 'by2': ["wet", "dry", 99, 95, np.nan, "damp", 95, 99, "red", 99, np.nan,
- np.nan]})
+ {
+ "v1": [1, 3, 5, 7, 8, 3, 5, np.nan, 4, 5, 7, 9],
+ "v2": [11, 33, 55, 77, 88, 33, 55, np.nan, 44, 55, 77, 99],
+ "by1": ["red", "blue", 1, 2, np.nan, "big", 1, 2, "red", 1, np.nan, 12],
+ "by2": [
+ "wet",
+ "dry",
+ 99,
+ 95,
+ np.nan,
+ "damp",
+ 95,
+ 99,
+ "red",
+ 99,
+ np.nan,
+ np.nan,
+ ],
+ }
+ )
- g = df.groupby(['by1', 'by2'])
- g[['v1', 'v2']].mean()
+ g = df.groupby(["by1", "by2"])
+ g[["v1", "v2"]].mean()
For more details and examples see :ref:`the groupby documentation
<groupby.split>`.
@@ -228,11 +243,14 @@ In ``pandas`` we may use :meth:`~pandas.pivot_table` method to handle this:
import string
baseball = pd.DataFrame(
- {'team': ["team %d" % (x + 1) for x in range(5)] * 5,
- 'player': random.sample(list(string.ascii_lowercase), 25),
- 'batting avg': np.random.uniform(.200, .400, 25)})
+ {
+ "team": ["team %d" % (x + 1) for x in range(5)] * 5,
+ "player": random.sample(list(string.ascii_lowercase), 25),
+ "batting avg": np.random.uniform(0.200, 0.400, 25),
+ }
+ )
- baseball.pivot_table(values='batting avg', columns='team', aggfunc=np.max)
+ baseball.pivot_table(values="batting avg", columns="team", aggfunc=np.max)
For more details and examples see :ref:`the reshaping documentation
<reshaping.pivot>`.
@@ -256,10 +274,10 @@ index/slice as well as standard boolean indexing:
.. ipython:: python
- df = pd.DataFrame({'a': np.random.randn(10), 'b': np.random.randn(10)})
- df.query('a <= b')
- df[df['a'] <= df['b']]
- df.loc[df['a'] <= df['b']]
+ df = pd.DataFrame({"a": np.random.randn(10), "b": np.random.randn(10)})
+ df.query("a <= b")
+ df[df["a"] <= df["b"]]
+ df.loc[df["a"] <= df["b"]]
For more details and examples see :ref:`the query documentation
<indexing.query>`.
@@ -282,9 +300,9 @@ In ``pandas`` the equivalent expression, using the
.. ipython:: python
- df = pd.DataFrame({'a': np.random.randn(10), 'b': np.random.randn(10)})
- df.eval('a + b')
- df['a'] + df['b'] # same as the previous expression
+ df = pd.DataFrame({"a": np.random.randn(10), "b": np.random.randn(10)})
+ df.eval("a + b")
+ df["a"] + df["b"] # same as the previous expression
In certain cases :meth:`~pandas.DataFrame.eval` will be much faster than
evaluation in pure Python. For more details and examples see :ref:`the eval
@@ -334,14 +352,18 @@ In ``pandas`` the equivalent expression, using the
.. ipython:: python
- df = pd.DataFrame({'x': np.random.uniform(1., 168., 120),
- 'y': np.random.uniform(7., 334., 120),
- 'z': np.random.uniform(1.7, 20.7, 120),
- 'month': [5, 6, 7, 8] * 30,
- 'week': np.random.randint(1, 4, 120)})
+ df = pd.DataFrame(
+ {
+ "x": np.random.uniform(1.0, 168.0, 120),
+ "y": np.random.uniform(7.0, 334.0, 120),
+ "z": np.random.uniform(1.7, 20.7, 120),
+ "month": [5, 6, 7, 8] * 30,
+ "week": np.random.randint(1, 4, 120),
+ }
+ )
- grouped = df.groupby(['month', 'week'])
- grouped['x'].agg([np.mean, np.std])
+ grouped = df.groupby(["month", "week"])
+ grouped["x"].agg([np.mean, np.std])
For more details and examples see :ref:`the groupby documentation
@@ -410,13 +432,17 @@ In Python, the :meth:`~pandas.melt` method is the R equivalent:
.. ipython:: python
- cheese = pd.DataFrame({'first': ['John', 'Mary'],
- 'last': ['Doe', 'Bo'],
- 'height': [5.5, 6.0],
- 'weight': [130, 150]})
+ cheese = pd.DataFrame(
+ {
+ "first": ["John", "Mary"],
+ "last": ["Doe", "Bo"],
+ "height": [5.5, 6.0],
+ "weight": [130, 150],
+ }
+ )
- pd.melt(cheese, id_vars=['first', 'last'])
- cheese.set_index(['first', 'last']).stack() # alternative way
+ pd.melt(cheese, id_vars=["first", "last"])
+ cheese.set_index(["first", "last"]).stack() # alternative way
For more details and examples see :ref:`the reshaping documentation
<reshaping.melt>`.
@@ -444,15 +470,24 @@ In Python the best way is to make use of :meth:`~pandas.pivot_table`:
.. ipython:: python
- df = pd.DataFrame({'x': np.random.uniform(1., 168., 12),
- 'y': np.random.uniform(7., 334., 12),
- 'z': np.random.uniform(1.7, 20.7, 12),
- 'month': [5, 6, 7] * 4,
- 'week': [1, 2] * 6})
+ df = pd.DataFrame(
+ {
+ "x": np.random.uniform(1.0, 168.0, 12),
+ "y": np.random.uniform(7.0, 334.0, 12),
+ "z": np.random.uniform(1.7, 20.7, 12),
+ "month": [5, 6, 7] * 4,
+ "week": [1, 2] * 6,
+ }
+ )
- mdf = pd.melt(df, id_vars=['month', 'week'])
- pd.pivot_table(mdf, values='value', index=['variable', 'week'],
- columns=['month'], aggfunc=np.mean)
+ mdf = pd.melt(df, id_vars=["month", "week"])
+ pd.pivot_table(
+ mdf,
+ values="value",
+ index=["variable", "week"],
+ columns=["month"],
+ aggfunc=np.mean,
+ )
Similarly for ``dcast`` which uses a data.frame called ``df`` in R to
aggregate information based on ``Animal`` and ``FeedType``:
@@ -475,21 +510,29 @@ using :meth:`~pandas.pivot_table`:
.. ipython:: python
- df = pd.DataFrame({
- 'Animal': ['Animal1', 'Animal2', 'Animal3', 'Animal2', 'Animal1',
- 'Animal2', 'Animal3'],
- 'FeedType': ['A', 'B', 'A', 'A', 'B', 'B', 'A'],
- 'Amount': [10, 7, 4, 2, 5, 6, 2],
- })
+ df = pd.DataFrame(
+ {
+ "Animal": [
+ "Animal1",
+ "Animal2",
+ "Animal3",
+ "Animal2",
+ "Animal1",
+ "Animal2",
+ "Animal3",
+ ],
+ "FeedType": ["A", "B", "A", "A", "B", "B", "A"],
+ "Amount": [10, 7, 4, 2, 5, 6, 2],
+ }
+ )
- df.pivot_table(values='Amount', index='Animal', columns='FeedType',
- aggfunc='sum')
+ df.pivot_table(values="Amount", index="Animal", columns="FeedType", aggfunc="sum")
The second approach is to use the :meth:`~pandas.DataFrame.groupby` method:
.. ipython:: python
- df.groupby(['Animal', 'FeedType'])['Amount'].sum()
+ df.groupby(["Animal", "FeedType"])["Amount"].sum()
For more details and examples see :ref:`the reshaping documentation
<reshaping.pivot>` or :ref:`the groupby documentation<groupby.split>`.
diff --git a/doc/source/getting_started/comparison/comparison_with_sas.rst b/doc/source/getting_started/comparison/comparison_with_sas.rst
index 85c6ea2c31969..ae9f1caebd556 100644
--- a/doc/source/getting_started/comparison/comparison_with_sas.rst
+++ b/doc/source/getting_started/comparison/comparison_with_sas.rst
@@ -106,7 +106,7 @@ and the values are the data.
.. ipython:: python
- df = pd.DataFrame({'x': [1, 3, 5], 'y': [2, 4, 6]})
+ df = pd.DataFrame({"x": [1, 3, 5], "y": [2, 4, 6]})
df
@@ -130,8 +130,10 @@ The pandas method is :func:`read_csv`, which works similarly.
.. ipython:: python
- url = ('https://raw.github.com/pandas-dev/'
- 'pandas/master/pandas/tests/io/data/csv/tips.csv')
+ url = (
+ "https://raw.github.com/pandas-dev/"
+ "pandas/master/pandas/tests/io/data/csv/tips.csv"
+ )
tips = pd.read_csv(url)
tips.head()
@@ -142,10 +144,10 @@ and did not have column names, the pandas command would be:
.. code-block:: python
- tips = pd.read_csv('tips.csv', sep='\t', header=None)
+ tips = pd.read_csv("tips.csv", sep="\t", header=None)
# alternatively, read_table is an alias to read_csv with tab delimiter
- tips = pd.read_table('tips.csv', header=None)
+ tips = pd.read_table("tips.csv", header=None)
In addition to text/csv, pandas supports a variety of other data formats
such as Excel, HDF5, and SQL databases. These are all read via a ``pd.read_*``
@@ -166,7 +168,7 @@ and other data formats follow a similar api.
.. code-block:: python
- tips.to_csv('tips2.csv')
+ tips.to_csv("tips2.csv")
Data operations
@@ -192,14 +194,14 @@ New columns can be assigned in the same way.
.. ipython:: python
- tips['total_bill'] = tips['total_bill'] - 2
- tips['new_bill'] = tips['total_bill'] / 2.0
+ tips["total_bill"] = tips["total_bill"] - 2
+ tips["new_bill"] = tips["total_bill"] / 2.0
tips.head()
.. ipython:: python
:suppress:
- tips = tips.drop('new_bill', axis=1)
+ tips = tips.drop("new_bill", axis=1)
Filtering
~~~~~~~~~
@@ -226,7 +228,7 @@ DataFrames can be filtered in multiple ways; the most intuitive of which is usin
.. ipython:: python
- tips[tips['total_bill'] > 10].head()
+ tips[tips["total_bill"] > 10].head()
If/then logic
~~~~~~~~~~~~~
@@ -248,13 +250,13 @@ the ``where`` method from ``numpy``.
.. ipython:: python
- tips['bucket'] = np.where(tips['total_bill'] < 10, 'low', 'high')
+ tips["bucket"] = np.where(tips["total_bill"] < 10, "low", "high")
tips.head()
.. ipython:: python
:suppress:
- tips = tips.drop('bucket', axis=1)
+ tips = tips.drop("bucket", axis=1)
Date functionality
~~~~~~~~~~~~~~~~~~
@@ -284,22 +286,26 @@ see the :ref:`timeseries documentation<timeseries>` for more details.
.. ipython:: python
- tips['date1'] = pd.Timestamp('2013-01-15')
- tips['date2'] = pd.Timestamp('2015-02-15')
- tips['date1_year'] = tips['date1'].dt.year
- tips['date2_month'] = tips['date2'].dt.month
- tips['date1_next'] = tips['date1'] + pd.offsets.MonthBegin()
- tips['months_between'] = (
- tips['date2'].dt.to_period('M') - tips['date1'].dt.to_period('M'))
+ tips["date1"] = pd.Timestamp("2013-01-15")
+ tips["date2"] = pd.Timestamp("2015-02-15")
+ tips["date1_year"] = tips["date1"].dt.year
+ tips["date2_month"] = tips["date2"].dt.month
+ tips["date1_next"] = tips["date1"] + pd.offsets.MonthBegin()
+ tips["months_between"] = tips["date2"].dt.to_period("M") - tips[
+ "date1"
+ ].dt.to_period("M")
- tips[['date1', 'date2', 'date1_year', 'date2_month',
- 'date1_next', 'months_between']].head()
+ tips[
+ ["date1", "date2", "date1_year", "date2_month", "date1_next", "months_between"]
+ ].head()
.. ipython:: python
:suppress:
- tips = tips.drop(['date1', 'date2', 'date1_year',
- 'date2_month', 'date1_next', 'months_between'], axis=1)
+ tips = tips.drop(
+ ["date1", "date2", "date1_year", "date2_month", "date1_next", "months_between"],
+ axis=1,
+ )
Selection of columns
~~~~~~~~~~~~~~~~~~~~
@@ -329,13 +335,13 @@ The same operations are expressed in pandas below.
.. ipython:: python
# keep
- tips[['sex', 'total_bill', 'tip']].head()
+ tips[["sex", "total_bill", "tip"]].head()
# drop
- tips.drop('sex', axis=1).head()
+ tips.drop("sex", axis=1).head()
# rename
- tips.rename(columns={'total_bill': 'total_bill_2'}).head()
+ tips.rename(columns={"total_bill": "total_bill_2"}).head()
Sorting by values
@@ -354,7 +360,7 @@ takes a list of columns to sort by.
.. ipython:: python
- tips = tips.sort_values(['sex', 'total_bill'])
+ tips = tips.sort_values(["sex", "total_bill"])
tips.head()
@@ -383,8 +389,8 @@ trailing blanks.
.. ipython:: python
- tips['time'].str.len().head()
- tips['time'].str.rstrip().str.len().head()
+ tips["time"].str.len().head()
+ tips["time"].str.rstrip().str.len().head()
Find
@@ -410,7 +416,7 @@ the function will return -1 if it fails to find the substring.
.. ipython:: python
- tips['sex'].str.find("ale").head()
+ tips["sex"].str.find("ale").head()
Substring
@@ -432,7 +438,7 @@ indexes are zero-based.
.. ipython:: python
- tips['sex'].str[0:1].head()
+ tips["sex"].str[0:1].head()
Scan
@@ -460,9 +466,9 @@ approaches, but this just shows a simple approach.
.. ipython:: python
- firstlast = pd.DataFrame({'String': ['John Smith', 'Jane Cook']})
- firstlast['First_Name'] = firstlast['String'].str.split(" ", expand=True)[0]
- firstlast['Last_Name'] = firstlast['String'].str.rsplit(" ", expand=True)[0]
+ firstlast = pd.DataFrame({"String": ["John Smith", "Jane Cook"]})
+ firstlast["First_Name"] = firstlast["String"].str.split(" ", expand=True)[0]
+ firstlast["Last_Name"] = firstlast["String"].str.rsplit(" ", expand=True)[0]
firstlast
@@ -491,10 +497,10 @@ The equivalent Python functions are ``upper``, ``lower``, and ``title``.
.. ipython:: python
- firstlast = pd.DataFrame({'String': ['John Smith', 'Jane Cook']})
- firstlast['string_up'] = firstlast['String'].str.upper()
- firstlast['string_low'] = firstlast['String'].str.lower()
- firstlast['string_prop'] = firstlast['String'].str.title()
+ firstlast = pd.DataFrame({"String": ["John Smith", "Jane Cook"]})
+ firstlast["string_up"] = firstlast["String"].str.upper()
+ firstlast["string_low"] = firstlast["String"].str.lower()
+ firstlast["string_prop"] = firstlast["String"].str.title()
firstlast
Merging
@@ -504,11 +510,9 @@ The following tables will be used in the merge examples
.. ipython:: python
- df1 = pd.DataFrame({'key': ['A', 'B', 'C', 'D'],
- 'value': np.random.randn(4)})
+ df1 = pd.DataFrame({"key": ["A", "B", "C", "D"], "value": np.random.randn(4)})
df1
- df2 = pd.DataFrame({'key': ['B', 'D', 'D', 'E'],
- 'value': np.random.randn(4)})
+ df2 = pd.DataFrame({"key": ["B", "D", "D", "E"], "value": np.random.randn(4)})
df2
In SAS, data must be explicitly sorted before merging. Different
@@ -542,16 +546,16 @@ types are accomplished via the ``how`` keyword.
.. ipython:: python
- inner_join = df1.merge(df2, on=['key'], how='inner')
+ inner_join = df1.merge(df2, on=["key"], how="inner")
inner_join
- left_join = df1.merge(df2, on=['key'], how='left')
+ left_join = df1.merge(df2, on=["key"], how="left")
left_join
- right_join = df1.merge(df2, on=['key'], how='right')
+ right_join = df1.merge(df2, on=["key"], how="right")
right_join
- outer_join = df1.merge(df2, on=['key'], how='outer')
+ outer_join = df1.merge(df2, on=["key"], how="outer")
outer_join
@@ -566,8 +570,8 @@ operations, and is ignored by default for aggregations.
.. ipython:: python
outer_join
- outer_join['value_x'] + outer_join['value_y']
- outer_join['value_x'].sum()
+ outer_join["value_x"] + outer_join["value_y"]
+ outer_join["value_x"].sum()
One difference is that missing data cannot be compared to its sentinel value.
For example, in SAS you could do this to filter missing values.
@@ -589,8 +593,8 @@ should be used for comparisons.
.. ipython:: python
- outer_join[pd.isna(outer_join['value_x'])]
- outer_join[pd.notna(outer_join['value_x'])]
+ outer_join[pd.isna(outer_join["value_x"])]
+ outer_join[pd.notna(outer_join["value_x"])]
pandas also provides a variety of methods to work with missing data - some of
which would be challenging to express in SAS. For example, there are methods to
@@ -601,8 +605,8 @@ value, like the mean, or forward filling from previous rows. See the
.. ipython:: python
outer_join.dropna()
- outer_join.fillna(method='ffill')
- outer_join['value_x'].fillna(outer_join['value_x'].mean())
+ outer_join.fillna(method="ffill")
+ outer_join["value_x"].fillna(outer_join["value_x"].mean())
GroupBy
@@ -629,7 +633,7 @@ for more details and examples.
.. ipython:: python
- tips_summed = tips.groupby(['sex', 'smoker'])[['total_bill', 'tip']].sum()
+ tips_summed = tips.groupby(["sex", "smoker"])[["total_bill", "tip"]].sum()
tips_summed.head()
@@ -666,8 +670,8 @@ operation.
.. ipython:: python
- gb = tips.groupby('smoker')['total_bill']
- tips['adj_total_bill'] = tips['total_bill'] - gb.transform('mean')
+ gb = tips.groupby("smoker")["total_bill"]
+ tips["adj_total_bill"] = tips["total_bill"] - gb.transform("mean")
tips.head()
@@ -695,7 +699,7 @@ In pandas this would be written as:
.. ipython:: python
- tips.groupby(['sex', 'smoker']).first()
+ tips.groupby(["sex", "smoker"]).first()
Other considerations
@@ -729,16 +733,16 @@ the XPORT or SAS7BDAT binary format.
.. code-block:: python
- df = pd.read_sas('transport-file.xpt')
- df = pd.read_sas('binary-file.sas7bdat')
+ df = pd.read_sas("transport-file.xpt")
+ df = pd.read_sas("binary-file.sas7bdat")
You can also specify the file format directly. By default, pandas will try
to infer the file format based on its extension.
.. code-block:: python
- df = pd.read_sas('transport-file.xpt', format='xport')
- df = pd.read_sas('binary-file.sas7bdat', format='sas7bdat')
+ df = pd.read_sas("transport-file.xpt", format="xport")
+ df = pd.read_sas("binary-file.sas7bdat", format="sas7bdat")
XPORT is a relatively limited format and the parsing of it is not as
optimized as some of the other pandas readers. An alternative way
@@ -752,4 +756,4 @@ to interop data between SAS and pandas is to serialize to csv.
Wall time: 14.6 s
In [9]: %time df = pd.read_csv('big.csv')
- Wall time: 4.86 s
\ No newline at end of file
+ Wall time: 4.86 s
diff --git a/doc/source/getting_started/comparison/comparison_with_sql.rst b/doc/source/getting_started/comparison/comparison_with_sql.rst
index 04f97a27cde39..6848d8df2e46b 100644
--- a/doc/source/getting_started/comparison/comparison_with_sql.rst
+++ b/doc/source/getting_started/comparison/comparison_with_sql.rst
@@ -24,8 +24,10 @@ structure.
.. ipython:: python
- url = ('https://raw.github.com/pandas-dev'
- '/pandas/master/pandas/tests/io/data/csv/tips.csv')
+ url = (
+ "https://raw.github.com/pandas-dev"
+ "/pandas/master/pandas/tests/io/data/csv/tips.csv"
+ )
tips = pd.read_csv(url)
tips.head()
@@ -44,7 +46,7 @@ With pandas, column selection is done by passing a list of column names to your
.. ipython:: python
- tips[['total_bill', 'tip', 'smoker', 'time']].head(5)
+ tips[["total_bill", "tip", "smoker", "time"]].head(5)
Calling the DataFrame without the list of column names would display all columns (akin to SQL's
``*``).
@@ -61,7 +63,7 @@ With pandas, you can use the :meth:`DataFrame.assign` method of a DataFrame to a
.. ipython:: python
- tips.assign(tip_rate=tips['tip'] / tips['total_bill']).head(5)
+ tips.assign(tip_rate=tips["tip"] / tips["total_bill"]).head(5)
WHERE
-----
@@ -79,14 +81,14 @@ DataFrames can be filtered in multiple ways; the most intuitive of which is usin
.. ipython:: python
- tips[tips['time'] == 'Dinner'].head(5)
+ tips[tips["time"] == "Dinner"].head(5)
The above statement is simply passing a ``Series`` of True/False objects to the DataFrame,
returning all rows with True.
.. ipython:: python
- is_dinner = tips['time'] == 'Dinner'
+ is_dinner = tips["time"] == "Dinner"
is_dinner.value_counts()
tips[is_dinner].head(5)
@@ -103,7 +105,7 @@ Just like SQL's OR and AND, multiple conditions can be passed to a DataFrame usi
.. ipython:: python
# tips of more than $5.00 at Dinner meals
- tips[(tips['time'] == 'Dinner') & (tips['tip'] > 5.00)]
+ tips[(tips["time"] == "Dinner") & (tips["tip"] > 5.00)]
.. code-block:: sql
@@ -115,15 +117,16 @@ Just like SQL's OR and AND, multiple conditions can be passed to a DataFrame usi
.. ipython:: python
# tips by parties of at least 5 diners OR bill total was more than $45
- tips[(tips['size'] >= 5) | (tips['total_bill'] > 45)]
+ tips[(tips["size"] >= 5) | (tips["total_bill"] > 45)]
NULL checking is done using the :meth:`~pandas.Series.notna` and :meth:`~pandas.Series.isna`
methods.
.. ipython:: python
- frame = pd.DataFrame({'col1': ['A', 'B', np.NaN, 'C', 'D'],
- 'col2': ['F', np.NaN, 'G', 'H', 'I']})
+ frame = pd.DataFrame(
+ {"col1": ["A", "B", np.NaN, "C", "D"], "col2": ["F", np.NaN, "G", "H", "I"]}
+ )
frame
Assume we have a table of the same structure as our DataFrame above. We can see only the records
@@ -137,7 +140,7 @@ where ``col2`` IS NULL with the following query:
.. ipython:: python
- frame[frame['col2'].isna()]
+ frame[frame["col2"].isna()]
Getting items where ``col1`` IS NOT NULL can be done with :meth:`~pandas.Series.notna`.
@@ -149,7 +152,7 @@ Getting items where ``col1`` IS NOT NULL can be done with :meth:`~pandas.Series.
.. ipython:: python
- frame[frame['col1'].notna()]
+ frame[frame["col1"].notna()]
GROUP BY
@@ -177,7 +180,7 @@ The pandas equivalent would be:
.. ipython:: python
- tips.groupby('sex').size()
+ tips.groupby("sex").size()
Notice that in the pandas code we used :meth:`~pandas.core.groupby.DataFrameGroupBy.size` and not
:meth:`~pandas.core.groupby.DataFrameGroupBy.count`. This is because
@@ -186,14 +189,14 @@ the number of ``not null`` records within each.
.. ipython:: python
- tips.groupby('sex').count()
+ tips.groupby("sex").count()
Alternatively, we could have applied the :meth:`~pandas.core.groupby.DataFrameGroupBy.count` method
to an individual column:
.. ipython:: python
- tips.groupby('sex')['total_bill'].count()
+ tips.groupby("sex")["total_bill"].count()
Multiple functions can also be applied at once. For instance, say we'd like to see how tip amount
differs by day of the week - :meth:`~pandas.core.groupby.DataFrameGroupBy.agg` allows you to pass a dictionary
@@ -213,7 +216,7 @@ to your grouped DataFrame, indicating which functions to apply to specific colum
.. ipython:: python
- tips.groupby('day').agg({'tip': np.mean, 'day': np.size})
+ tips.groupby("day").agg({"tip": np.mean, "day": np.size})
Grouping by more than one column is done by passing a list of columns to the
:meth:`~pandas.DataFrame.groupby` method.
@@ -237,7 +240,7 @@ Grouping by more than one column is done by passing a list of columns to the
.. ipython:: python
- tips.groupby(['smoker', 'day']).agg({'tip': [np.size, np.mean]})
+ tips.groupby(["smoker", "day"]).agg({"tip": [np.size, np.mean]})
.. _compare_with_sql.join:
@@ -250,10 +253,8 @@ columns to join on (column names or indices).
.. ipython:: python
- df1 = pd.DataFrame({'key': ['A', 'B', 'C', 'D'],
- 'value': np.random.randn(4)})
- df2 = pd.DataFrame({'key': ['B', 'D', 'D', 'E'],
- 'value': np.random.randn(4)})
+ df1 = pd.DataFrame({"key": ["A", "B", "C", "D"], "value": np.random.randn(4)})
+ df2 = pd.DataFrame({"key": ["B", "D", "D", "E"], "value": np.random.randn(4)})
Assume we have two database tables of the same name and structure as our DataFrames.
@@ -271,15 +272,15 @@ INNER JOIN
.. ipython:: python
# merge performs an INNER JOIN by default
- pd.merge(df1, df2, on='key')
+ pd.merge(df1, df2, on="key")
:meth:`~pandas.merge` also offers parameters for cases when you'd like to join one DataFrame's
column with another DataFrame's index.
.. ipython:: python
- indexed_df2 = df2.set_index('key')
- pd.merge(df1, indexed_df2, left_on='key', right_index=True)
+ indexed_df2 = df2.set_index("key")
+ pd.merge(df1, indexed_df2, left_on="key", right_index=True)
LEFT OUTER JOIN
~~~~~~~~~~~~~~~
@@ -294,7 +295,7 @@ LEFT OUTER JOIN
.. ipython:: python
# show all records from df1
- pd.merge(df1, df2, on='key', how='left')
+ pd.merge(df1, df2, on="key", how="left")
RIGHT JOIN
~~~~~~~~~~
@@ -309,7 +310,7 @@ RIGHT JOIN
.. ipython:: python
# show all records from df2
- pd.merge(df1, df2, on='key', how='right')
+ pd.merge(df1, df2, on="key", how="right")
FULL JOIN
~~~~~~~~~
@@ -327,7 +328,7 @@ joined columns find a match. As of writing, FULL JOINs are not supported in all
.. ipython:: python
# show all records from both frames
- pd.merge(df1, df2, on='key', how='outer')
+ pd.merge(df1, df2, on="key", how="outer")
UNION
@@ -336,10 +337,12 @@ UNION ALL can be performed using :meth:`~pandas.concat`.
.. ipython:: python
- df1 = pd.DataFrame({'city': ['Chicago', 'San Francisco', 'New York City'],
- 'rank': range(1, 4)})
- df2 = pd.DataFrame({'city': ['Chicago', 'Boston', 'Los Angeles'],
- 'rank': [1, 4, 5]})
+ df1 = pd.DataFrame(
+ {"city": ["Chicago", "San Francisco", "New York City"], "rank": range(1, 4)}
+ )
+ df2 = pd.DataFrame(
+ {"city": ["Chicago", "Boston", "Los Angeles"], "rank": [1, 4, 5]}
+ )
.. code-block:: sql
@@ -403,7 +406,7 @@ Top n rows with offset
.. ipython:: python
- tips.nlargest(10 + 5, columns='tip').tail(10)
+ tips.nlargest(10 + 5, columns="tip").tail(10)
Top n rows per group
~~~~~~~~~~~~~~~~~~~~
@@ -423,20 +426,30 @@ Top n rows per group
.. ipython:: python
- (tips.assign(rn=tips.sort_values(['total_bill'], ascending=False)
- .groupby(['day'])
- .cumcount() + 1)
- .query('rn < 3')
- .sort_values(['day', 'rn']))
+ (
+ tips.assign(
+ rn=tips.sort_values(["total_bill"], ascending=False)
+ .groupby(["day"])
+ .cumcount()
+ + 1
+ )
+ .query("rn < 3")
+ .sort_values(["day", "rn"])
+ )
the same using ``rank(method='first')`` function
.. ipython:: python
- (tips.assign(rnk=tips.groupby(['day'])['total_bill']
- .rank(method='first', ascending=False))
- .query('rnk < 3')
- .sort_values(['day', 'rnk']))
+ (
+ tips.assign(
+ rnk=tips.groupby(["day"])["total_bill"].rank(
+ method="first", ascending=False
+ )
+ )
+ .query("rnk < 3")
+ .sort_values(["day", "rnk"])
+ )
.. code-block:: sql
@@ -458,11 +471,12 @@ Notice that when using ``rank(method='min')`` function
.. ipython:: python
- (tips[tips['tip'] < 2]
- .assign(rnk_min=tips.groupby(['sex'])['tip']
- .rank(method='min'))
- .query('rnk_min < 3')
- .sort_values(['sex', 'rnk_min']))
+ (
+ tips[tips["tip"] < 2]
+ .assign(rnk_min=tips.groupby(["sex"])["tip"].rank(method="min"))
+ .query("rnk_min < 3")
+ .sort_values(["sex", "rnk_min"])
+ )
UPDATE
@@ -476,7 +490,7 @@ UPDATE
.. ipython:: python
- tips.loc[tips['tip'] < 2, 'tip'] *= 2
+ tips.loc[tips["tip"] < 2, "tip"] *= 2
DELETE
------
@@ -490,4 +504,4 @@ In pandas we select the rows that should remain, instead of deleting them
.. ipython:: python
- tips = tips.loc[tips['tip'] <= 9]
+ tips = tips.loc[tips["tip"] <= 9]
diff --git a/doc/source/getting_started/comparison/comparison_with_stata.rst b/doc/source/getting_started/comparison/comparison_with_stata.rst
index 06f9e45466243..7b8d9c6be61db 100644
--- a/doc/source/getting_started/comparison/comparison_with_stata.rst
+++ b/doc/source/getting_started/comparison/comparison_with_stata.rst
@@ -103,7 +103,7 @@ and the values are the data.
.. ipython:: python
- df = pd.DataFrame({'x': [1, 3, 5], 'y': [2, 4, 6]})
+ df = pd.DataFrame({"x": [1, 3, 5], "y": [2, 4, 6]})
df
@@ -127,8 +127,10 @@ the data set if presented with a url.
.. ipython:: python
- url = ('https://raw.github.com/pandas-dev'
- '/pandas/master/pandas/tests/io/data/csv/tips.csv')
+ url = (
+ "https://raw.github.com/pandas-dev"
+ "/pandas/master/pandas/tests/io/data/csv/tips.csv"
+ )
tips = pd.read_csv(url)
tips.head()
@@ -139,16 +141,16 @@ the pandas command would be:
.. code-block:: python
- tips = pd.read_csv('tips.csv', sep='\t', header=None)
+ tips = pd.read_csv("tips.csv", sep="\t", header=None)
# alternatively, read_table is an alias to read_csv with tab delimiter
- tips = pd.read_table('tips.csv', header=None)
+ tips = pd.read_table("tips.csv", header=None)
Pandas can also read Stata data sets in ``.dta`` format with the :func:`read_stata` function.
.. code-block:: python
- df = pd.read_stata('data.dta')
+ df = pd.read_stata("data.dta")
In addition to text/csv and Stata files, pandas supports a variety of other data formats
such as Excel, SAS, HDF5, Parquet, and SQL databases. These are all read via a ``pd.read_*``
@@ -168,13 +170,13 @@ Similarly in pandas, the opposite of ``read_csv`` is :meth:`DataFrame.to_csv`.
.. code-block:: python
- tips.to_csv('tips2.csv')
+ tips.to_csv("tips2.csv")
Pandas can also export to Stata file format with the :meth:`DataFrame.to_stata` method.
.. code-block:: python
- tips.to_stata('tips2.dta')
+ tips.to_stata("tips2.dta")
Data operations
@@ -200,11 +202,11 @@ drops a column from the ``DataFrame``.
.. ipython:: python
- tips['total_bill'] = tips['total_bill'] - 2
- tips['new_bill'] = tips['total_bill'] / 2
+ tips["total_bill"] = tips["total_bill"] - 2
+ tips["new_bill"] = tips["total_bill"] / 2
tips.head()
- tips = tips.drop('new_bill', axis=1)
+ tips = tips.drop("new_bill", axis=1)
Filtering
~~~~~~~~~
@@ -220,7 +222,7 @@ DataFrames can be filtered in multiple ways; the most intuitive of which is usin
.. ipython:: python
- tips[tips['total_bill'] > 10].head()
+ tips[tips["total_bill"] > 10].head()
If/then logic
~~~~~~~~~~~~~
@@ -237,13 +239,13 @@ the ``where`` method from ``numpy``.
.. ipython:: python
- tips['bucket'] = np.where(tips['total_bill'] < 10, 'low', 'high')
+ tips["bucket"] = np.where(tips["total_bill"] < 10, "low", "high")
tips.head()
.. ipython:: python
:suppress:
- tips = tips.drop('bucket', axis=1)
+ tips = tips.drop("bucket", axis=1)
Date functionality
~~~~~~~~~~~~~~~~~~
@@ -273,22 +275,26 @@ see the :ref:`timeseries documentation<timeseries>` for more details.
.. ipython:: python
- tips['date1'] = pd.Timestamp('2013-01-15')
- tips['date2'] = pd.Timestamp('2015-02-15')
- tips['date1_year'] = tips['date1'].dt.year
- tips['date2_month'] = tips['date2'].dt.month
- tips['date1_next'] = tips['date1'] + pd.offsets.MonthBegin()
- tips['months_between'] = (tips['date2'].dt.to_period('M')
- - tips['date1'].dt.to_period('M'))
+ tips["date1"] = pd.Timestamp("2013-01-15")
+ tips["date2"] = pd.Timestamp("2015-02-15")
+ tips["date1_year"] = tips["date1"].dt.year
+ tips["date2_month"] = tips["date2"].dt.month
+ tips["date1_next"] = tips["date1"] + pd.offsets.MonthBegin()
+ tips["months_between"] = tips["date2"].dt.to_period("M") - tips[
+ "date1"
+ ].dt.to_period("M")
- tips[['date1', 'date2', 'date1_year', 'date2_month', 'date1_next',
- 'months_between']].head()
+ tips[
+ ["date1", "date2", "date1_year", "date2_month", "date1_next", "months_between"]
+ ].head()
.. ipython:: python
:suppress:
- tips = tips.drop(['date1', 'date2', 'date1_year', 'date2_month',
- 'date1_next', 'months_between'], axis=1)
+ tips = tips.drop(
+ ["date1", "date2", "date1_year", "date2_month", "date1_next", "months_between"],
+ axis=1,
+ )
Selection of columns
~~~~~~~~~~~~~~~~~~~~
@@ -310,13 +316,13 @@ to a variable.
.. ipython:: python
# keep
- tips[['sex', 'total_bill', 'tip']].head()
+ tips[["sex", "total_bill", "tip"]].head()
# drop
- tips.drop('sex', axis=1).head()
+ tips.drop("sex", axis=1).head()
# rename
- tips.rename(columns={'total_bill': 'total_bill_2'}).head()
+ tips.rename(columns={"total_bill": "total_bill_2"}).head()
Sorting by values
@@ -333,7 +339,7 @@ takes a list of columns to sort by.
.. ipython:: python
- tips = tips.sort_values(['sex', 'total_bill'])
+ tips = tips.sort_values(["sex", "total_bill"])
tips.head()
@@ -357,8 +363,8 @@ Use ``len`` and ``rstrip`` to exclude trailing blanks.
.. ipython:: python
- tips['time'].str.len().head()
- tips['time'].str.rstrip().str.len().head()
+ tips["time"].str.len().head()
+ tips["time"].str.rstrip().str.len().head()
Finding position of substring
@@ -380,7 +386,7 @@ the function will return -1 if it fails to find the substring.
.. ipython:: python
- tips['sex'].str.find("ale").head()
+ tips["sex"].str.find("ale").head()
Extracting substring by position
@@ -398,7 +404,7 @@ indexes are zero-based.
.. ipython:: python
- tips['sex'].str[0:1].head()
+ tips["sex"].str[0:1].head()
Extracting nth word
@@ -425,9 +431,9 @@ approaches, but this just shows a simple approach.
.. ipython:: python
- firstlast = pd.DataFrame({'string': ['John Smith', 'Jane Cook']})
- firstlast['First_Name'] = firstlast['string'].str.split(" ", expand=True)[0]
- firstlast['Last_Name'] = firstlast['string'].str.rsplit(" ", expand=True)[0]
+ firstlast = pd.DataFrame({"string": ["John Smith", "Jane Cook"]})
+ firstlast["First_Name"] = firstlast["string"].str.split(" ", expand=True)[0]
+ firstlast["Last_Name"] = firstlast["string"].str.rsplit(" ", expand=True)[0]
firstlast
@@ -455,10 +461,10 @@ The equivalent Python functions are ``upper``, ``lower``, and ``title``.
.. ipython:: python
- firstlast = pd.DataFrame({'string': ['John Smith', 'Jane Cook']})
- firstlast['upper'] = firstlast['string'].str.upper()
- firstlast['lower'] = firstlast['string'].str.lower()
- firstlast['title'] = firstlast['string'].str.title()
+ firstlast = pd.DataFrame({"string": ["John Smith", "Jane Cook"]})
+ firstlast["upper"] = firstlast["string"].str.upper()
+ firstlast["lower"] = firstlast["string"].str.lower()
+ firstlast["title"] = firstlast["string"].str.title()
firstlast
Merging
@@ -468,11 +474,9 @@ The following tables will be used in the merge examples
.. ipython:: python
- df1 = pd.DataFrame({'key': ['A', 'B', 'C', 'D'],
- 'value': np.random.randn(4)})
+ df1 = pd.DataFrame({"key": ["A", "B", "C", "D"], "value": np.random.randn(4)})
df1
- df2 = pd.DataFrame({'key': ['B', 'D', 'D', 'E'],
- 'value': np.random.randn(4)})
+ df2 = pd.DataFrame({"key": ["B", "D", "D", "E"], "value": np.random.randn(4)})
df2
In Stata, to perform a merge, one data set must be in memory
@@ -534,16 +538,16 @@ types are accomplished via the ``how`` keyword.
.. ipython:: python
- inner_join = df1.merge(df2, on=['key'], how='inner')
+ inner_join = df1.merge(df2, on=["key"], how="inner")
inner_join
- left_join = df1.merge(df2, on=['key'], how='left')
+ left_join = df1.merge(df2, on=["key"], how="left")
left_join
- right_join = df1.merge(df2, on=['key'], how='right')
+ right_join = df1.merge(df2, on=["key"], how="right")
right_join
- outer_join = df1.merge(df2, on=['key'], how='outer')
+ outer_join = df1.merge(df2, on=["key"], how="outer")
outer_join
@@ -558,8 +562,8 @@ operations, and is ignored by default for aggregations.
.. ipython:: python
outer_join
- outer_join['value_x'] + outer_join['value_y']
- outer_join['value_x'].sum()
+ outer_join["value_x"] + outer_join["value_y"]
+ outer_join["value_x"].sum()
One difference is that missing data cannot be compared to its sentinel value.
For example, in Stata you could do this to filter missing values.
@@ -576,8 +580,8 @@ should be used for comparisons.
.. ipython:: python
- outer_join[pd.isna(outer_join['value_x'])]
- outer_join[pd.notna(outer_join['value_x'])]
+ outer_join[pd.isna(outer_join["value_x"])]
+ outer_join[pd.notna(outer_join["value_x"])]
Pandas also provides a variety of methods to work with missing data -- some of
which would be challenging to express in Stata. For example, there are methods to
@@ -591,10 +595,10 @@ value, like the mean, or forward filling from previous rows. See the
outer_join.dropna()
# Fill forwards
- outer_join.fillna(method='ffill')
+ outer_join.fillna(method="ffill")
# Impute missing values with the mean
- outer_join['value_x'].fillna(outer_join['value_x'].mean())
+ outer_join["value_x"].fillna(outer_join["value_x"].mean())
GroupBy
@@ -617,7 +621,7 @@ for more details and examples.
.. ipython:: python
- tips_summed = tips.groupby(['sex', 'smoker'])[['total_bill', 'tip']].sum()
+ tips_summed = tips.groupby(["sex", "smoker"])[["total_bill", "tip"]].sum()
tips_summed.head()
@@ -640,8 +644,8 @@ operation.
.. ipython:: python
- gb = tips.groupby('smoker')['total_bill']
- tips['adj_total_bill'] = tips['total_bill'] - gb.transform('mean')
+ gb = tips.groupby("smoker")["total_bill"]
+ tips["adj_total_bill"] = tips["total_bill"] - gb.transform("mean")
tips.head()
@@ -661,7 +665,7 @@ In pandas this would be written as:
.. ipython:: python
- tips.groupby(['sex', 'smoker']).first()
+ tips.groupby(["sex", "smoker"]).first()
Other considerations
| Partially addresses #36777. Ran black on all the files under `doc/source/getting_started/comparison`. | https://api.github.com/repos/pandas-dev/pandas/pulls/36813 | 2020-10-02T19:19:27Z | 2020-10-02T21:03:51Z | 2020-10-02T21:03:51Z | 2020-10-05T19:18:15Z |
timeseries.rst | diff --git a/doc/source/user_guide/timeseries.rst b/doc/source/user_guide/timeseries.rst
index 61902b4a41b7c..84ea6feb8ef27 100644
--- a/doc/source/user_guide/timeseries.rst
+++ b/doc/source/user_guide/timeseries.rst
@@ -2448,7 +2448,7 @@ This will fail as there are ambiguous times (``'11/06/2011 01:00'``)
Handle these ambiguous times by specifying the following.
-.. ipython:: python
+.. ipython :: python
rng_hourly.tz_localize('US/Eastern', ambiguous='infer')
rng_hourly.tz_localize('US/Eastern', ambiguous='NaT')
@@ -2476,7 +2476,7 @@ can be controlled by the ``nonexistent`` argument. The following options are ava
Localization of nonexistent times will raise an error by default.
-.. code-block:: ipython
+.. code-block :: ipython
In [2]: dti.tz_localize('Europe/Warsaw')
NonExistentTimeError: 2015-03-29 02:30:00
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/36812 | 2020-10-02T18:56:30Z | 2020-10-02T19:39:34Z | null | 2020-10-02T19:39:41Z |
DOC: use blacken to fix code style in documentation #36777 | diff --git a/doc/source/user_guide/cookbook.rst b/doc/source/user_guide/cookbook.rst
index e33e85d3d2224..0a30d865f3c23 100644
--- a/doc/source/user_guide/cookbook.rst
+++ b/doc/source/user_guide/cookbook.rst
@@ -33,9 +33,9 @@ These are some neat pandas ``idioms``
.. ipython:: python
- df = pd.DataFrame({'AAA': [4, 5, 6, 7],
- 'BBB': [10, 20, 30, 40],
- 'CCC': [100, 50, -30, -50]})
+ df = pd.DataFrame(
+ {"AAA": [4, 5, 6, 7], "BBB": [10, 20, 30, 40], "CCC": [100, 50, -30, -50]}
+ )
df
if-then...
@@ -45,30 +45,30 @@ An if-then on one column
.. ipython:: python
- df.loc[df.AAA >= 5, 'BBB'] = -1
+ df.loc[df.AAA >= 5, "BBB"] = -1
df
An if-then with assignment to 2 columns:
.. ipython:: python
- df.loc[df.AAA >= 5, ['BBB', 'CCC']] = 555
+ df.loc[df.AAA >= 5, ["BBB", "CCC"]] = 555
df
Add another line with different logic, to do the -else
.. ipython:: python
- df.loc[df.AAA < 5, ['BBB', 'CCC']] = 2000
+ df.loc[df.AAA < 5, ["BBB", "CCC"]] = 2000
df
Or use pandas where after you've set up a mask
.. ipython:: python
- df_mask = pd.DataFrame({'AAA': [True] * 4,
- 'BBB': [False] * 4,
- 'CCC': [True, False] * 2})
+ df_mask = pd.DataFrame(
+ {"AAA": [True] * 4, "BBB": [False] * 4, "CCC": [True, False] * 2}
+ )
df.where(df_mask, -1000)
`if-then-else using numpy's where()
@@ -76,11 +76,11 @@ Or use pandas where after you've set up a mask
.. ipython:: python
- df = pd.DataFrame({'AAA': [4, 5, 6, 7],
- 'BBB': [10, 20, 30, 40],
- 'CCC': [100, 50, -30, -50]})
+ df = pd.DataFrame(
+ {"AAA": [4, 5, 6, 7], "BBB": [10, 20, 30, 40], "CCC": [100, 50, -30, -50]}
+ )
df
- df['logic'] = np.where(df['AAA'] > 5, 'high', 'low')
+ df["logic"] = np.where(df["AAA"] > 5, "high", "low")
df
Splitting
@@ -91,9 +91,9 @@ Splitting
.. ipython:: python
- df = pd.DataFrame({'AAA': [4, 5, 6, 7],
- 'BBB': [10, 20, 30, 40],
- 'CCC': [100, 50, -30, -50]})
+ df = pd.DataFrame(
+ {"AAA": [4, 5, 6, 7], "BBB": [10, 20, 30, 40], "CCC": [100, 50, -30, -50]}
+ )
df
df[df.AAA <= 5]
@@ -107,28 +107,28 @@ Building criteria
.. ipython:: python
- df = pd.DataFrame({'AAA': [4, 5, 6, 7],
- 'BBB': [10, 20, 30, 40],
- 'CCC': [100, 50, -30, -50]})
+ df = pd.DataFrame(
+ {"AAA": [4, 5, 6, 7], "BBB": [10, 20, 30, 40], "CCC": [100, 50, -30, -50]}
+ )
df
...and (without assignment returns a Series)
.. ipython:: python
- df.loc[(df['BBB'] < 25) & (df['CCC'] >= -40), 'AAA']
+ df.loc[(df["BBB"] < 25) & (df["CCC"] >= -40), "AAA"]
...or (without assignment returns a Series)
.. ipython:: python
- df.loc[(df['BBB'] > 25) | (df['CCC'] >= -40), 'AAA']
+ df.loc[(df["BBB"] > 25) | (df["CCC"] >= -40), "AAA"]
...or (with assignment modifies the DataFrame.)
.. ipython:: python
- df.loc[(df['BBB'] > 25) | (df['CCC'] >= 75), 'AAA'] = 0.1
+ df.loc[(df["BBB"] > 25) | (df["CCC"] >= 75), "AAA"] = 0.1
df
`Select rows with data closest to certain value using argsort
@@ -136,9 +136,9 @@ Building criteria
.. ipython:: python
- df = pd.DataFrame({'AAA': [4, 5, 6, 7],
- 'BBB': [10, 20, 30, 40],
- 'CCC': [100, 50, -30, -50]})
+ df = pd.DataFrame(
+ {"AAA": [4, 5, 6, 7], "BBB": [10, 20, 30, 40], "CCC": [100, 50, -30, -50]}
+ )
df
aValue = 43.0
df.loc[(df.CCC - aValue).abs().argsort()]
@@ -148,9 +148,9 @@ Building criteria
.. ipython:: python
- df = pd.DataFrame({'AAA': [4, 5, 6, 7],
- 'BBB': [10, 20, 30, 40],
- 'CCC': [100, 50, -30, -50]})
+ df = pd.DataFrame(
+ {"AAA": [4, 5, 6, 7], "BBB": [10, 20, 30, 40], "CCC": [100, 50, -30, -50]}
+ )
df
Crit1 = df.AAA <= 5.5
@@ -189,9 +189,9 @@ The :ref:`indexing <indexing>` docs.
.. ipython:: python
- df = pd.DataFrame({'AAA': [4, 5, 6, 7],
- 'BBB': [10, 20, 30, 40],
- 'CCC': [100, 50, -30, -50]})
+ df = pd.DataFrame(
+ {"AAA": [4, 5, 6, 7], "BBB": [10, 20, 30, 40], "CCC": [100, 50, -30, -50]}
+ )
df
df[(df.AAA <= 6) & (df.index.isin([0, 2, 4]))]
@@ -201,10 +201,10 @@ The :ref:`indexing <indexing>` docs.
.. ipython:: python
- df = pd.DataFrame({'AAA': [4, 5, 6, 7],
- 'BBB': [10, 20, 30, 40],
- 'CCC': [100, 50, -30, -50]},
- index=['foo', 'bar', 'boo', 'kar'])
+ df = pd.DataFrame(
+ {"AAA": [4, 5, 6, 7], "BBB": [10, 20, 30, 40], "CCC": [100, 50, -30, -50]},
+ index=["foo", "bar", "boo", "kar"],
+ )
There are 2 explicit slicing methods, with a third general case
@@ -216,19 +216,17 @@ There are 2 explicit slicing methods, with a third general case
.. ipython:: python
df.iloc[0:3] # Positional
- df.loc['bar':'kar'] # Label
+ df.loc["bar":"kar"] # Label
# Generic
df[0:3]
- df['bar':'kar']
+ df["bar":"kar"]
Ambiguity arises when an index consists of integers with a non-zero start or non-unit increment.
.. ipython:: python
- data = {'AAA': [4, 5, 6, 7],
- 'BBB': [10, 20, 30, 40],
- 'CCC': [100, 50, -30, -50]}
+ data = {"AAA": [4, 5, 6, 7], "BBB": [10, 20, 30, 40], "CCC": [100, 50, -30, -50]}
df2 = pd.DataFrame(data=data, index=[1, 2, 3, 4]) # Note index starts at 1.
df2.iloc[1:3] # Position-oriented
df2.loc[1:3] # Label-oriented
@@ -238,9 +236,9 @@ Ambiguity arises when an index consists of integers with a non-zero start or non
.. ipython:: python
- df = pd.DataFrame({'AAA': [4, 5, 6, 7],
- 'BBB': [10, 20, 30, 40],
- 'CCC': [100, 50, -30, -50]})
+ df = pd.DataFrame(
+ {"AAA": [4, 5, 6, 7], "BBB": [10, 20, 30, 40], "CCC": [100, 50, -30, -50]}
+ )
df
df[~((df.AAA <= 6) & (df.index.isin([0, 2, 4])))]
@@ -253,14 +251,12 @@ New columns
.. ipython:: python
- df = pd.DataFrame({'AAA': [1, 2, 1, 3],
- 'BBB': [1, 1, 2, 2],
- 'CCC': [2, 1, 3, 1]})
+ df = pd.DataFrame({"AAA": [1, 2, 1, 3], "BBB": [1, 1, 2, 2], "CCC": [2, 1, 3, 1]})
df
- source_cols = df.columns # Or some subset would work too
+ source_cols = df.columns # Or some subset would work too
new_cols = [str(x) + "_cat" for x in source_cols]
- categories = {1: 'Alpha', 2: 'Beta', 3: 'Charlie'}
+ categories = {1: "Alpha", 2: "Beta", 3: "Charlie"}
df[new_cols] = df[source_cols].applymap(categories.get)
df
@@ -270,8 +266,7 @@ New columns
.. ipython:: python
- df = pd.DataFrame({'AAA': [1, 1, 1, 2, 2, 2, 3, 3],
- 'BBB': [2, 1, 3, 4, 5, 1, 2, 3]})
+ df = pd.DataFrame({"AAA": [1, 1, 1, 2, 2, 2, 3, 3], "BBB": [2, 1, 3, 4, 5, 1, 2, 3]})
df
Method 1 : idxmin() to get the index of the minimums
@@ -300,25 +295,28 @@ The :ref:`multindexing <advanced.hierarchical>` docs.
.. ipython:: python
- df = pd.DataFrame({'row': [0, 1, 2],
- 'One_X': [1.1, 1.1, 1.1],
- 'One_Y': [1.2, 1.2, 1.2],
- 'Two_X': [1.11, 1.11, 1.11],
- 'Two_Y': [1.22, 1.22, 1.22]})
+ df = pd.DataFrame(
+ {
+ "row": [0, 1, 2],
+ "One_X": [1.1, 1.1, 1.1],
+ "One_Y": [1.2, 1.2, 1.2],
+ "Two_X": [1.11, 1.11, 1.11],
+ "Two_Y": [1.22, 1.22, 1.22],
+ }
+ )
df
# As Labelled Index
- df = df.set_index('row')
+ df = df.set_index("row")
df
# With Hierarchical Columns
- df.columns = pd.MultiIndex.from_tuples([tuple(c.split('_'))
- for c in df.columns])
+ df.columns = pd.MultiIndex.from_tuples([tuple(c.split("_")) for c in df.columns])
df
# Now stack & Reset
df = df.stack(0).reset_index(1)
df
# And fix the labels (Notice the label 'level_1' got added automatically)
- df.columns = ['Sample', 'All_X', 'All_Y']
+ df.columns = ["Sample", "All_X", "All_Y"]
df
Arithmetic
@@ -329,11 +327,10 @@ Arithmetic
.. ipython:: python
- cols = pd.MultiIndex.from_tuples([(x, y) for x in ['A', 'B', 'C']
- for y in ['O', 'I']])
- df = pd.DataFrame(np.random.randn(2, 6), index=['n', 'm'], columns=cols)
+ cols = pd.MultiIndex.from_tuples([(x, y) for x in ["A", "B", "C"] for y in ["O", "I"]])
+ df = pd.DataFrame(np.random.randn(2, 6), index=["n", "m"], columns=cols)
df
- df = df.div(df['C'], level=1)
+ df = df.div(df["C"], level=1)
df
Slicing
@@ -344,10 +341,9 @@ Slicing
.. ipython:: python
- coords = [('AA', 'one'), ('AA', 'six'), ('BB', 'one'), ('BB', 'two'),
- ('BB', 'six')]
+ coords = [("AA", "one"), ("AA", "six"), ("BB", "one"), ("BB", "two"), ("BB", "six")]
index = pd.MultiIndex.from_tuples(coords)
- df = pd.DataFrame([11, 22, 33, 44, 55], index, ['MyData'])
+ df = pd.DataFrame([11, 22, 33, 44, 55], index, ["MyData"])
df
To take the cross section of the 1st level and 1st axis the index:
@@ -355,13 +351,13 @@ To take the cross section of the 1st level and 1st axis the index:
.. ipython:: python
# Note : level and axis are optional, and default to zero
- df.xs('BB', level=0, axis=0)
+ df.xs("BB", level=0, axis=0)
...and now the 2nd level of the 1st axis.
.. ipython:: python
- df.xs('six', level=1, axis=0)
+ df.xs("six", level=1, axis=0)
`Slicing a MultiIndex with xs, method #2
<https://stackoverflow.com/questions/14964493/multiindex-based-indexing-in-pandas>`__
@@ -370,21 +366,20 @@ To take the cross section of the 1st level and 1st axis the index:
import itertools
- index = list(itertools.product(['Ada', 'Quinn', 'Violet'],
- ['Comp', 'Math', 'Sci']))
- headr = list(itertools.product(['Exams', 'Labs'], ['I', 'II']))
- indx = pd.MultiIndex.from_tuples(index, names=['Student', 'Course'])
- cols = pd.MultiIndex.from_tuples(headr) # Notice these are un-named
+ index = list(itertools.product(["Ada", "Quinn", "Violet"], ["Comp", "Math", "Sci"]))
+ headr = list(itertools.product(["Exams", "Labs"], ["I", "II"]))
+ indx = pd.MultiIndex.from_tuples(index, names=["Student", "Course"])
+ cols = pd.MultiIndex.from_tuples(headr) # Notice these are un-named
data = [[70 + x + y + (x * y) % 3 for x in range(4)] for y in range(9)]
df = pd.DataFrame(data, indx, cols)
df
All = slice(None)
- df.loc['Violet']
- df.loc[(All, 'Math'), All]
- df.loc[(slice('Ada', 'Quinn'), 'Math'), All]
- df.loc[(All, 'Math'), ('Exams')]
- df.loc[(All, 'Math'), (All, 'II')]
+ df.loc["Violet"]
+ df.loc[(All, "Math"), All]
+ df.loc[(slice("Ada", "Quinn"), "Math"), All]
+ df.loc[(All, "Math"), ("Exams")]
+ df.loc[(All, "Math"), (All, "II")]
`Setting portions of a MultiIndex with xs
<https://stackoverflow.com/questions/19319432/pandas-selecting-a-lower-level-in-a-dataframe-to-do-a-ffill>`__
@@ -397,7 +392,7 @@ Sorting
.. ipython:: python
- df.sort_values(by=('Labs', 'II'), ascending=False)
+ df.sort_values(by=("Labs", "II"), ascending=False)
`Partial selection, the need for sortedness;
<https://github.com/pandas-dev/pandas/issues/2995>`__
@@ -422,10 +417,12 @@ Fill forward a reversed timeseries
.. ipython:: python
- df = pd.DataFrame(np.random.randn(6, 1),
- index=pd.date_range('2013-08-01', periods=6, freq='B'),
- columns=list('A'))
- df.loc[df.index[3], 'A'] = np.nan
+ df = pd.DataFrame(
+ np.random.randn(6, 1),
+ index=pd.date_range("2013-08-01", periods=6, freq="B"),
+ columns=list("A"),
+ )
+ df.loc[df.index[3], "A"] = np.nan
df
df.reindex(df.index[::-1]).ffill()
@@ -452,22 +449,26 @@ Unlike agg, apply's callable is passed a sub-DataFrame which gives you access to
.. ipython:: python
- df = pd.DataFrame({'animal': 'cat dog cat fish dog cat cat'.split(),
- 'size': list('SSMMMLL'),
- 'weight': [8, 10, 11, 1, 20, 12, 12],
- 'adult': [False] * 5 + [True] * 2})
+ df = pd.DataFrame(
+ {
+ "animal": "cat dog cat fish dog cat cat".split(),
+ "size": list("SSMMMLL"),
+ "weight": [8, 10, 11, 1, 20, 12, 12],
+ "adult": [False] * 5 + [True] * 2,
+ }
+ )
df
# List the size of the animals with the highest weight.
- df.groupby('animal').apply(lambda subf: subf['size'][subf['weight'].idxmax()])
+ df.groupby("animal").apply(lambda subf: subf["size"][subf["weight"].idxmax()])
`Using get_group
<https://stackoverflow.com/questions/14734533/how-to-access-pandas-groupby-dataframe-by-key>`__
.. ipython:: python
- gb = df.groupby(['animal'])
- gb.get_group('cat')
+ gb = df.groupby(["animal"])
+ gb.get_group("cat")
`Apply to different items in a group
<https://stackoverflow.com/questions/15262134/apply-different-functions-to-different-items-in-group-object-python-pandas>`__
@@ -475,12 +476,12 @@ Unlike agg, apply's callable is passed a sub-DataFrame which gives you access to
.. ipython:: python
def GrowUp(x):
- avg_weight = sum(x[x['size'] == 'S'].weight * 1.5)
- avg_weight += sum(x[x['size'] == 'M'].weight * 1.25)
- avg_weight += sum(x[x['size'] == 'L'].weight)
+ avg_weight = sum(x[x["size"] == "S"].weight * 1.5)
+ avg_weight += sum(x[x["size"] == "M"].weight * 1.25)
+ avg_weight += sum(x[x["size"] == "L"].weight)
avg_weight /= len(x)
- return pd.Series(['L', avg_weight, True],
- index=['size', 'weight', 'adult'])
+ return pd.Series(["L", avg_weight, True], index=["size", "weight", "adult"])
+
expected_df = gb.apply(GrowUp)
expected_df
@@ -492,12 +493,15 @@ Unlike agg, apply's callable is passed a sub-DataFrame which gives you access to
S = pd.Series([i / 100.0 for i in range(1, 11)])
+
def cum_ret(x, y):
return x * (1 + y)
+
def red(x):
return functools.reduce(cum_ret, x, 1.0)
+
S.expanding().apply(red, raw=True)
@@ -506,13 +510,15 @@ Unlike agg, apply's callable is passed a sub-DataFrame which gives you access to
.. ipython:: python
- df = pd.DataFrame({'A': [1, 1, 2, 2], 'B': [1, -1, 1, 2]})
- gb = df.groupby('A')
+ df = pd.DataFrame({"A": [1, 1, 2, 2], "B": [1, -1, 1, 2]})
+ gb = df.groupby("A")
+
def replace(g):
mask = g < 0
return g.where(mask, g[~mask].mean())
+
gb.transform(replace)
`Sort groups by aggregated data
@@ -520,13 +526,17 @@ Unlike agg, apply's callable is passed a sub-DataFrame which gives you access to
.. ipython:: python
- df = pd.DataFrame({'code': ['foo', 'bar', 'baz'] * 2,
- 'data': [0.16, -0.21, 0.33, 0.45, -0.59, 0.62],
- 'flag': [False, True] * 3})
+ df = pd.DataFrame(
+ {
+ "code": ["foo", "bar", "baz"] * 2,
+ "data": [0.16, -0.21, 0.33, 0.45, -0.59, 0.62],
+ "flag": [False, True] * 3,
+ }
+ )
- code_groups = df.groupby('code')
+ code_groups = df.groupby("code")
- agg_n_sort_order = code_groups[['data']].transform(sum).sort_values(by='data')
+ agg_n_sort_order = code_groups[["data"]].transform(sum).sort_values(by="data")
sorted_df = df.loc[agg_n_sort_order.index]
@@ -537,15 +547,17 @@ Unlike agg, apply's callable is passed a sub-DataFrame which gives you access to
.. ipython:: python
- rng = pd.date_range(start="2014-10-07", periods=10, freq='2min')
+ rng = pd.date_range(start="2014-10-07", periods=10, freq="2min")
ts = pd.Series(data=list(range(10)), index=rng)
+
def MyCust(x):
if len(x) > 2:
return x[1] * 1.234
return pd.NaT
- mhc = {'Mean': np.mean, 'Max': np.max, 'Custom': MyCust}
+
+ mhc = {"Mean": np.mean, "Max": np.max, "Custom": MyCust}
ts.resample("5min").apply(mhc)
ts
@@ -554,10 +566,9 @@ Unlike agg, apply's callable is passed a sub-DataFrame which gives you access to
.. ipython:: python
- df = pd.DataFrame({'Color': 'Red Red Red Blue'.split(),
- 'Value': [100, 150, 50, 50]})
+ df = pd.DataFrame({"Color": "Red Red Red Blue".split(), "Value": [100, 150, 50, 50]})
df
- df['Counts'] = df.groupby(['Color']).transform(len)
+ df["Counts"] = df.groupby(["Color"]).transform(len)
df
`Shift groups of the values in a column based on the index
@@ -565,13 +576,19 @@ Unlike agg, apply's callable is passed a sub-DataFrame which gives you access to
.. ipython:: python
- df = pd.DataFrame({'line_race': [10, 10, 8, 10, 10, 8],
- 'beyer': [99, 102, 103, 103, 88, 100]},
- index=['Last Gunfighter', 'Last Gunfighter',
- 'Last Gunfighter', 'Paynter', 'Paynter',
- 'Paynter'])
+ df = pd.DataFrame(
+ {"line_race": [10, 10, 8, 10, 10, 8], "beyer": [99, 102, 103, 103, 88, 100]},
+ index=[
+ "Last Gunfighter",
+ "Last Gunfighter",
+ "Last Gunfighter",
+ "Paynter",
+ "Paynter",
+ "Paynter",
+ ],
+ )
df
- df['beyer_shifted'] = df.groupby(level=0)['beyer'].shift(1)
+ df["beyer_shifted"] = df.groupby(level=0)["beyer"].shift(1)
df
`Select row with maximum value from each group
@@ -579,11 +596,15 @@ Unlike agg, apply's callable is passed a sub-DataFrame which gives you access to
.. ipython:: python
- df = pd.DataFrame({'host': ['other', 'other', 'that', 'this', 'this'],
- 'service': ['mail', 'web', 'mail', 'mail', 'web'],
- 'no': [1, 2, 1, 2, 1]}).set_index(['host', 'service'])
- mask = df.groupby(level=0).agg('idxmax')
- df_count = df.loc[mask['no']].reset_index()
+ df = pd.DataFrame(
+ {
+ "host": ["other", "other", "that", "this", "this"],
+ "service": ["mail", "web", "mail", "mail", "web"],
+ "no": [1, 2, 1, 2, 1],
+ }
+ ).set_index(["host", "service"])
+ mask = df.groupby(level=0).agg("idxmax")
+ df_count = df.loc[mask["no"]].reset_index()
df_count
`Grouping like Python's itertools.groupby
@@ -591,9 +612,9 @@ Unlike agg, apply's callable is passed a sub-DataFrame which gives you access to
.. ipython:: python
- df = pd.DataFrame([0, 1, 0, 1, 1, 1, 0, 1, 1], columns=['A'])
- df['A'].groupby((df['A'] != df['A'].shift()).cumsum()).groups
- df['A'].groupby((df['A'] != df['A'].shift()).cumsum()).cumsum()
+ df = pd.DataFrame([0, 1, 0, 1, 1, 1, 0, 1, 1], columns=["A"])
+ df["A"].groupby((df["A"] != df["A"].shift()).cumsum()).groups
+ df["A"].groupby((df["A"] != df["A"].shift()).cumsum()).cumsum()
Expanding data
**************
@@ -617,12 +638,20 @@ Create a list of dataframes, split using a delineation based on logic included i
.. ipython:: python
- df = pd.DataFrame(data={'Case': ['A', 'A', 'A', 'B', 'A', 'A', 'B', 'A',
- 'A'],
- 'Data': np.random.randn(9)})
+ df = pd.DataFrame(
+ data={
+ "Case": ["A", "A", "A", "B", "A", "A", "B", "A", "A"],
+ "Data": np.random.randn(9),
+ }
+ )
- dfs = list(zip(*df.groupby((1 * (df['Case'] == 'B')).cumsum()
- .rolling(window=3, min_periods=1).median())))[-1]
+ dfs = list(
+ zip(
+ *df.groupby(
+ (1 * (df["Case"] == "B")).cumsum().rolling(window=3, min_periods=1).median()
+ )
+ )
+ )[-1]
dfs[0]
dfs[1]
@@ -639,14 +668,30 @@ The :ref:`Pivot <reshaping.pivot>` docs.
.. ipython:: python
- df = pd.DataFrame(data={'Province': ['ON', 'QC', 'BC', 'AL', 'AL', 'MN', 'ON'],
- 'City': ['Toronto', 'Montreal', 'Vancouver',
- 'Calgary', 'Edmonton', 'Winnipeg',
- 'Windsor'],
- 'Sales': [13, 6, 16, 8, 4, 3, 1]})
- table = pd.pivot_table(df, values=['Sales'], index=['Province'],
- columns=['City'], aggfunc=np.sum, margins=True)
- table.stack('City')
+ df = pd.DataFrame(
+ data={
+ "Province": ["ON", "QC", "BC", "AL", "AL", "MN", "ON"],
+ "City": [
+ "Toronto",
+ "Montreal",
+ "Vancouver",
+ "Calgary",
+ "Edmonton",
+ "Winnipeg",
+ "Windsor",
+ ],
+ "Sales": [13, 6, 16, 8, 4, 3, 1],
+ }
+ )
+ table = pd.pivot_table(
+ df,
+ values=["Sales"],
+ index=["Province"],
+ columns=["City"],
+ aggfunc=np.sum,
+ margins=True,
+ )
+ table.stack("City")
`Frequency table like plyr in R
<https://stackoverflow.com/questions/15589354/frequency-tables-in-pandas-like-plyr-in-r>`__
@@ -654,25 +699,60 @@ The :ref:`Pivot <reshaping.pivot>` docs.
.. ipython:: python
grades = [48, 99, 75, 80, 42, 80, 72, 68, 36, 78]
- df = pd.DataFrame({'ID': ["x%d" % r for r in range(10)],
- 'Gender': ['F', 'M', 'F', 'M', 'F',
- 'M', 'F', 'M', 'M', 'M'],
- 'ExamYear': ['2007', '2007', '2007', '2008', '2008',
- '2008', '2008', '2009', '2009', '2009'],
- 'Class': ['algebra', 'stats', 'bio', 'algebra',
- 'algebra', 'stats', 'stats', 'algebra',
- 'bio', 'bio'],
- 'Participated': ['yes', 'yes', 'yes', 'yes', 'no',
- 'yes', 'yes', 'yes', 'yes', 'yes'],
- 'Passed': ['yes' if x > 50 else 'no' for x in grades],
- 'Employed': [True, True, True, False,
- False, False, False, True, True, False],
- 'Grade': grades})
-
- df.groupby('ExamYear').agg({'Participated': lambda x: x.value_counts()['yes'],
- 'Passed': lambda x: sum(x == 'yes'),
- 'Employed': lambda x: sum(x),
- 'Grade': lambda x: sum(x) / len(x)})
+ df = pd.DataFrame(
+ {
+ "ID": ["x%d" % r for r in range(10)],
+ "Gender": ["F", "M", "F", "M", "F", "M", "F", "M", "M", "M"],
+ "ExamYear": [
+ "2007",
+ "2007",
+ "2007",
+ "2008",
+ "2008",
+ "2008",
+ "2008",
+ "2009",
+ "2009",
+ "2009",
+ ],
+ "Class": [
+ "algebra",
+ "stats",
+ "bio",
+ "algebra",
+ "algebra",
+ "stats",
+ "stats",
+ "algebra",
+ "bio",
+ "bio",
+ ],
+ "Participated": [
+ "yes",
+ "yes",
+ "yes",
+ "yes",
+ "no",
+ "yes",
+ "yes",
+ "yes",
+ "yes",
+ "yes",
+ ],
+ "Passed": ["yes" if x > 50 else "no" for x in grades],
+ "Employed": [True, True, True, False, False, False, False, True, True, False],
+ "Grade": grades,
+ }
+ )
+
+ df.groupby("ExamYear").agg(
+ {
+ "Participated": lambda x: x.value_counts()["yes"],
+ "Passed": lambda x: sum(x == "yes"),
+ "Employed": lambda x: sum(x),
+ "Grade": lambda x: sum(x) / len(x),
+ }
+ )
`Plot pandas DataFrame with year over year data
<https://stackoverflow.com/questions/30379789/plot-pandas-data-frame-with-year-over-year-data>`__
@@ -681,11 +761,14 @@ To create year and month cross tabulation:
.. ipython:: python
- df = pd.DataFrame({'value': np.random.randn(36)},
- index=pd.date_range('2011-01-01', freq='M', periods=36))
+ df = pd.DataFrame(
+ {"value": np.random.randn(36)},
+ index=pd.date_range("2011-01-01", freq="M", periods=36),
+ )
- pd.pivot_table(df, index=df.index.month, columns=df.index.year,
- values='value', aggfunc='sum')
+ pd.pivot_table(
+ df, index=df.index.month, columns=df.index.year, values="value", aggfunc="sum"
+ )
Apply
*****
@@ -695,15 +778,20 @@ Apply
.. ipython:: python
- df = pd.DataFrame(data={'A': [[2, 4, 8, 16], [100, 200], [10, 20, 30]],
- 'B': [['a', 'b', 'c'], ['jj', 'kk'], ['ccc']]},
- index=['I', 'II', 'III'])
+ df = pd.DataFrame(
+ data={
+ "A": [[2, 4, 8, 16], [100, 200], [10, 20, 30]],
+ "B": [["a", "b", "c"], ["jj", "kk"], ["ccc"]],
+ },
+ index=["I", "II", "III"],
+ )
+
def SeriesFromSubList(aList):
return pd.Series(aList)
- df_orgz = pd.concat({ind: row.apply(SeriesFromSubList)
- for ind, row in df.iterrows()})
+
+ df_orgz = pd.concat({ind: row.apply(SeriesFromSubList) for ind, row in df.iterrows()})
df_orgz
`Rolling apply with a DataFrame returning a Series
@@ -713,17 +801,25 @@ Rolling Apply to multiple columns where function calculates a Series before a Sc
.. ipython:: python
- df = pd.DataFrame(data=np.random.randn(2000, 2) / 10000,
- index=pd.date_range('2001-01-01', periods=2000),
- columns=['A', 'B'])
+ df = pd.DataFrame(
+ data=np.random.randn(2000, 2) / 10000,
+ index=pd.date_range("2001-01-01", periods=2000),
+ columns=["A", "B"],
+ )
df
+
def gm(df, const):
- v = ((((df['A'] + df['B']) + 1).cumprod()) - 1) * const
+ v = ((((df["A"] + df["B"]) + 1).cumprod()) - 1) * const
return v.iloc[-1]
- s = pd.Series({df.index[i]: gm(df.iloc[i:min(i + 51, len(df) - 1)], 5)
- for i in range(len(df) - 50)})
+
+ s = pd.Series(
+ {
+ df.index[i]: gm(df.iloc[i: min(i + 51, len(df) - 1)], 5)
+ for i in range(len(df) - 50)
+ }
+ )
s
`Rolling apply with a DataFrame returning a Scalar
@@ -733,20 +829,29 @@ Rolling Apply to multiple columns where function returns a Scalar (Volume Weight
.. ipython:: python
- rng = pd.date_range(start='2014-01-01', periods=100)
- df = pd.DataFrame({'Open': np.random.randn(len(rng)),
- 'Close': np.random.randn(len(rng)),
- 'Volume': np.random.randint(100, 2000, len(rng))},
- index=rng)
+ rng = pd.date_range(start="2014-01-01", periods=100)
+ df = pd.DataFrame(
+ {
+ "Open": np.random.randn(len(rng)),
+ "Close": np.random.randn(len(rng)),
+ "Volume": np.random.randint(100, 2000, len(rng)),
+ },
+ index=rng,
+ )
df
+
def vwap(bars):
- return ((bars.Close * bars.Volume).sum() / bars.Volume.sum())
+ return (bars.Close * bars.Volume).sum() / bars.Volume.sum()
+
window = 5
- s = pd.concat([(pd.Series(vwap(df.iloc[i:i + window]),
- index=[df.index[i + window]]))
- for i in range(len(df) - window)])
+ s = pd.concat(
+ [
+ (pd.Series(vwap(df.iloc[i: i + window]), index=[df.index[i + window]]))
+ for i in range(len(df) - window)
+ ]
+ )
s.round(2)
Timeseries
@@ -778,8 +883,8 @@ Calculate the first day of the month for each entry in a DatetimeIndex
.. ipython:: python
- dates = pd.date_range('2000-01-01', periods=5)
- dates.to_period(freq='M').to_timestamp()
+ dates = pd.date_range("2000-01-01", periods=5)
+ dates.to_period(freq="M").to_timestamp()
.. _cookbook.resample:
@@ -825,8 +930,8 @@ The :ref:`Concat <merging.concatenation>` docs. The :ref:`Join <merging.join>` d
.. ipython:: python
- rng = pd.date_range('2000-01-01', periods=6)
- df1 = pd.DataFrame(np.random.randn(6, 3), index=rng, columns=['A', 'B', 'C'])
+ rng = pd.date_range("2000-01-01", periods=6)
+ df1 = pd.DataFrame(np.random.randn(6, 3), index=rng, columns=["A", "B", "C"])
df2 = df1.copy()
Depending on df construction, ``ignore_index`` may be needed
@@ -841,17 +946,25 @@ Depending on df construction, ``ignore_index`` may be needed
.. ipython:: python
- df = pd.DataFrame(data={'Area': ['A'] * 5 + ['C'] * 2,
- 'Bins': [110] * 2 + [160] * 3 + [40] * 2,
- 'Test_0': [0, 1, 0, 1, 2, 0, 1],
- 'Data': np.random.randn(7)})
+ df = pd.DataFrame(
+ data={
+ "Area": ["A"] * 5 + ["C"] * 2,
+ "Bins": [110] * 2 + [160] * 3 + [40] * 2,
+ "Test_0": [0, 1, 0, 1, 2, 0, 1],
+ "Data": np.random.randn(7),
+ }
+ )
df
- df['Test_1'] = df['Test_0'] - 1
+ df["Test_1"] = df["Test_0"] - 1
- pd.merge(df, df, left_on=['Bins', 'Area', 'Test_0'],
- right_on=['Bins', 'Area', 'Test_1'],
- suffixes=('_L', '_R'))
+ pd.merge(
+ df,
+ df,
+ left_on=["Bins", "Area", "Test_0"],
+ right_on=["Bins", "Area", "Test_1"],
+ suffixes=("_L", "_R"),
+ )
`How to set the index and join
<https://stackoverflow.com/questions/14341805/pandas-merge-pd-merge-how-to-set-the-index-and-join>`__
@@ -902,16 +1015,18 @@ The :ref:`Plotting <visualization>` docs.
.. ipython:: python
df = pd.DataFrame(
- {'stratifying_var': np.random.uniform(0, 100, 20),
- 'price': np.random.normal(100, 5, 20)})
+ {
+ "stratifying_var": np.random.uniform(0, 100, 20),
+ "price": np.random.normal(100, 5, 20),
+ }
+ )
- df['quartiles'] = pd.qcut(
- df['stratifying_var'],
- 4,
- labels=['0-25%', '25-50%', '50-75%', '75-100%'])
+ df["quartiles"] = pd.qcut(
+ df["stratifying_var"], 4, labels=["0-25%", "25-50%", "50-75%", "75-100%"]
+ )
@savefig quartile_boxplot.png
- df.boxplot(column='price', by='quartiles')
+ df.boxplot(column="price", by="quartiles")
Data in/out
-----------
@@ -973,9 +1088,9 @@ of the individual frames into a list, and then combine the frames in the list us
for i in range(3):
data = pd.DataFrame(np.random.randn(10, 4))
- data.to_csv('file_{}.csv'.format(i))
+ data.to_csv("file_{}.csv".format(i))
- files = ['file_0.csv', 'file_1.csv', 'file_2.csv']
+ files = ["file_0.csv", "file_1.csv", "file_2.csv"]
result = pd.concat([pd.read_csv(f) for f in files], ignore_index=True)
You can use the same approach to read all files matching a pattern. Here is an example using ``glob``:
@@ -985,7 +1100,7 @@ You can use the same approach to read all files matching a pattern. Here is an
import glob
import os
- files = glob.glob('file_*.csv')
+ files = glob.glob("file_*.csv")
result = pd.concat([pd.read_csv(f) for f in files], ignore_index=True)
Finally, this strategy will work with the other ``pd.read_*(...)`` functions described in the :ref:`io docs<io>`.
@@ -994,7 +1109,7 @@ Finally, this strategy will work with the other ``pd.read_*(...)`` functions des
:suppress:
for i in range(3):
- os.remove('file_{}.csv'.format(i))
+ os.remove("file_{}.csv".format(i))
Parsing date components in multi-columns
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -1003,12 +1118,12 @@ Parsing date components in multi-columns is faster with a format
.. ipython:: python
- i = pd.date_range('20000101', periods=10000)
- df = pd.DataFrame({'year': i.year, 'month': i.month, 'day': i.day})
+ i = pd.date_range("20000101", periods=10000)
+ df = pd.DataFrame({"year": i.year, "month": i.month, "day": i.day})
df.head()
+
%timeit pd.to_datetime(df.year * 10000 + df.month * 100 + df.day, format='%Y%m%d')
- ds = df.apply(lambda x: "%04d%02d%02d" % (x['year'],
- x['month'], x['day']), axis=1)
+ ds = df.apply(lambda x: "%04d%02d%02d" % (x["year"], x["month"], x["day"]), axis=1)
ds.head()
%timeit pd.to_datetime(ds)
@@ -1046,18 +1161,20 @@ Option 1: pass rows explicitly to skip rows
from io import StringIO
- pd.read_csv(StringIO(data), sep=';', skiprows=[11, 12],
- index_col=0, parse_dates=True, header=10)
+ pd.read_csv(
+ StringIO(data), sep=";", skiprows=[11, 12], index_col=0, parse_dates=True, header=10
+ )
Option 2: read column names and then data
"""""""""""""""""""""""""""""""""""""""""
.. ipython:: python
- pd.read_csv(StringIO(data), sep=';', header=10, nrows=10).columns
- columns = pd.read_csv(StringIO(data), sep=';', header=10, nrows=10).columns
- pd.read_csv(StringIO(data), sep=';', index_col=0,
- header=12, parse_dates=True, names=columns)
+ pd.read_csv(StringIO(data), sep=";", header=10, nrows=10).columns
+ columns = pd.read_csv(StringIO(data), sep=";", header=10, nrows=10).columns
+ pd.read_csv(
+ StringIO(data), sep=";", index_col=0, header=12, parse_dates=True, names=columns
+ )
.. _cookbook.sql:
@@ -1153,18 +1270,18 @@ Storing Attributes to a group node
.. ipython:: python
df = pd.DataFrame(np.random.randn(8, 3))
- store = pd.HDFStore('test.h5')
- store.put('df', df)
+ store = pd.HDFStore("test.h5")
+ store.put("df", df)
# you can store an arbitrary Python object via pickle
- store.get_storer('df').attrs.my_attribute = {'A': 10}
- store.get_storer('df').attrs.my_attribute
+ store.get_storer("df").attrs.my_attribute = {"A": 10}
+ store.get_storer("df").attrs.my_attribute
.. ipython:: python
:suppress:
store.close()
- os.remove('test.h5')
+ os.remove("test.h5")
You can create or load a HDFStore in-memory by passing the ``driver``
parameter to PyTables. Changes are only written to disk when the HDFStore
@@ -1172,10 +1289,10 @@ is closed.
.. ipython:: python
- store = pd.HDFStore('test.h5', 'w', diver='H5FD_CORE')
+ store = pd.HDFStore("test.h5", "w", diver="H5FD_CORE")
df = pd.DataFrame(np.random.randn(8, 3))
- store['test'] = df
+ store["test"] = df
# only after closing the store, data is written to disk:
store.close()
@@ -1183,7 +1300,7 @@ is closed.
.. ipython:: python
:suppress:
- os.remove('test.h5')
+ os.remove("test.h5")
.. _cookbook.binary:
@@ -1232,15 +1349,14 @@ in the frame:
.. code-block:: python
- names = 'count', 'avg', 'scale'
+ names = "count", "avg", "scale"
# note that the offsets are larger than the size of the type because of
# struct padding
offsets = 0, 8, 16
- formats = 'i4', 'f8', 'f4'
- dt = np.dtype({'names': names, 'offsets': offsets, 'formats': formats},
- align=True)
- df = pd.DataFrame(np.fromfile('binary.dat', dt))
+ formats = "i4", "f8", "f4"
+ dt = np.dtype({"names": names, "offsets": offsets, "formats": formats}, align=True)
+ df = pd.DataFrame(np.fromfile("binary.dat", dt))
.. note::
@@ -1289,10 +1405,11 @@ The ``method`` argument within ``DataFrame.corr`` can accept a callable in addit
A = a - a_bar - a_bar.T + np.full(shape=(n, n), fill_value=a_bar.mean())
B = b - b_bar - b_bar.T + np.full(shape=(n, n), fill_value=b_bar.mean())
cov_ab = np.sqrt(np.nansum(A * B)) / n
- std_a = np.sqrt(np.sqrt(np.nansum(A**2)) / n)
- std_b = np.sqrt(np.sqrt(np.nansum(B**2)) / n)
+ std_a = np.sqrt(np.sqrt(np.nansum(A ** 2)) / n)
+ std_b = np.sqrt(np.sqrt(np.nansum(B ** 2)) / n)
return cov_ab / std_a / std_b
+
df = pd.DataFrame(np.random.normal(size=(100, 3)))
df.corr(method=distcorr)
@@ -1308,7 +1425,7 @@ The :ref:`Timedeltas <timedeltas.timedeltas>` docs.
import datetime
- s = pd.Series(pd.date_range('2012-1-1', periods=3, freq='D'))
+ s = pd.Series(pd.date_range("2012-1-1", periods=3, freq="D"))
s - s.max()
@@ -1329,12 +1446,12 @@ The :ref:`Timedeltas <timedeltas.timedeltas>` docs.
deltas = pd.Series([datetime.timedelta(days=i) for i in range(3)])
- df = pd.DataFrame({'A': s, 'B': deltas})
+ df = pd.DataFrame({"A": s, "B": deltas})
df
- df['New Dates'] = df['A'] + df['B']
+ df["New Dates"] = df["A"] + df["B"]
- df['Delta'] = df['A'] - df['New Dates']
+ df["Delta"] = df["A"] - df["New Dates"]
df
df.dtypes
@@ -1365,7 +1482,8 @@ of the data values:
rows = itertools.product(*data_dict.values())
return pd.DataFrame.from_records(rows, columns=data_dict.keys())
- df = expand_grid({'height': [60, 70],
- 'weight': [100, 140, 180],
- 'sex': ['Male', 'Female']})
+
+ df = expand_grid(
+ {"height": [60, 70], "weight": [100, 140, 180], "sex": ["Male", "Female"]}
+ )
df
| Addresses part of #36777
Ran blacken-tools on cookbook.rst and checked for warnings from flake8-rst | https://api.github.com/repos/pandas-dev/pandas/pulls/36811 | 2020-10-02T17:42:06Z | 2020-10-02T19:15:08Z | 2020-10-02T19:15:08Z | 2020-10-02T20:15:07Z |
CLN: test_moments_consistency_*.py | diff --git a/pandas/tests/window/common.py b/pandas/tests/window/common.py
index 7e0be331ec8d5..7c8c9de40f7c5 100644
--- a/pandas/tests/window/common.py
+++ b/pandas/tests/window/common.py
@@ -4,49 +4,6 @@
import pandas._testing as tm
-def check_pairwise_moment(frame, dispatch, name, **kwargs):
- def get_result(obj, obj2=None):
- return getattr(getattr(obj, dispatch)(**kwargs), name)(obj2)
-
- result = get_result(frame)
- result = result.loc[(slice(None), 1), 5]
- result.index = result.index.droplevel(1)
- expected = get_result(frame[1], frame[5])
- expected.index = expected.index._with_freq(None)
- tm.assert_series_equal(result, expected, check_names=False)
-
-
-def ew_func(A, B, com, name, **kwargs):
- return getattr(A.ewm(com, **kwargs), name)(B)
-
-
-def check_binary_ew(name, A, B):
-
- result = ew_func(A=A, B=B, com=20, name=name, min_periods=5)
- assert np.isnan(result.values[:14]).all()
- assert not np.isnan(result.values[14:]).any()
-
-
-def check_binary_ew_min_periods(name, min_periods, A, B):
- # GH 7898
- result = ew_func(A, B, 20, name=name, min_periods=min_periods)
- # binary functions (ewmcov, ewmcorr) with bias=False require at
- # least two values
- assert np.isnan(result.values[:11]).all()
- assert not np.isnan(result.values[11:]).any()
-
- # check series of length 0
- empty = Series([], dtype=np.float64)
- result = ew_func(empty, empty, 50, name=name, min_periods=min_periods)
- tm.assert_series_equal(result, empty)
-
- # check series of length 1
- result = ew_func(
- Series([1.0]), Series([1.0]), 50, name=name, min_periods=min_periods
- )
- tm.assert_series_equal(result, Series([np.NaN]))
-
-
def moments_consistency_mock_mean(x, mean, mock_mean):
mean_x = mean(x)
# check that correlation of a series with itself is either 1 or NaN
diff --git a/pandas/tests/window/moments/test_moments_consistency_ewm.py b/pandas/tests/window/moments/test_moments_consistency_ewm.py
index f143278e12ec5..089ec697b5b1c 100644
--- a/pandas/tests/window/moments/test_moments_consistency_ewm.py
+++ b/pandas/tests/window/moments/test_moments_consistency_ewm.py
@@ -3,11 +3,8 @@
import pytest
from pandas import DataFrame, Series, concat
+import pandas._testing as tm
from pandas.tests.window.common import (
- check_binary_ew,
- check_binary_ew_min_periods,
- check_pairwise_moment,
- ew_func,
moments_consistency_cov_data,
moments_consistency_is_constant,
moments_consistency_mock_mean,
@@ -20,15 +17,43 @@
@pytest.mark.parametrize("func", ["cov", "corr"])
def test_ewm_pairwise_cov_corr(func, frame):
- check_pairwise_moment(frame, "ewm", func, span=10, min_periods=5)
+ result = getattr(frame.ewm(span=10, min_periods=5), func)()
+ result = result.loc[(slice(None), 1), 5]
+ result.index = result.index.droplevel(1)
+ expected = getattr(frame[1].ewm(span=10, min_periods=5), func)(frame[5])
+ expected.index = expected.index._with_freq(None)
+ tm.assert_series_equal(result, expected, check_names=False)
@pytest.mark.parametrize("name", ["cov", "corr"])
-def test_ewm_corr_cov(name, min_periods, binary_ew_data):
+def test_ewm_corr_cov(name, binary_ew_data):
A, B = binary_ew_data
- check_binary_ew(name="corr", A=A, B=B)
- check_binary_ew_min_periods("corr", min_periods, A, B)
+ result = getattr(A.ewm(com=20, min_periods=5), name)(B)
+ assert np.isnan(result.values[:14]).all()
+ assert not np.isnan(result.values[14:]).any()
+
+
+@pytest.mark.parametrize("name", ["cov", "corr"])
+def test_ewm_corr_cov_min_periods(name, min_periods, binary_ew_data):
+ # GH 7898
+ A, B = binary_ew_data
+ result = getattr(A.ewm(com=20, min_periods=min_periods), name)(B)
+ # binary functions (ewmcov, ewmcorr) with bias=False require at
+ # least two values
+ assert np.isnan(result.values[:11]).all()
+ assert not np.isnan(result.values[11:]).any()
+
+ # check series of length 0
+ empty = Series([], dtype=np.float64)
+ result = getattr(empty.ewm(com=50, min_periods=min_periods), name)(empty)
+ tm.assert_series_equal(result, empty)
+
+ # check series of length 1
+ result = getattr(Series([1.0]).ewm(com=50, min_periods=min_periods), name)(
+ Series([1.0])
+ )
+ tm.assert_series_equal(result, Series([np.NaN]))
@pytest.mark.parametrize("name", ["cov", "corr"])
@@ -38,7 +63,7 @@ def test_different_input_array_raise_exception(name, binary_ew_data):
msg = "Input arrays must be of the same type!"
# exception raised is Exception
with pytest.raises(Exception, match=msg):
- ew_func(A, randn(50), 20, name=name, min_periods=5)
+ getattr(A.ewm(com=20, min_periods=5), name)(randn(50))
@pytest.mark.slow
diff --git a/pandas/tests/window/moments/test_moments_consistency_rolling.py b/pandas/tests/window/moments/test_moments_consistency_rolling.py
index dfcbdde466d44..f9670e0c30ade 100644
--- a/pandas/tests/window/moments/test_moments_consistency_rolling.py
+++ b/pandas/tests/window/moments/test_moments_consistency_rolling.py
@@ -12,7 +12,6 @@
import pandas._testing as tm
from pandas.core.window.common import flex_binary_moment
from pandas.tests.window.common import (
- check_pairwise_moment,
moments_consistency_cov_data,
moments_consistency_is_constant,
moments_consistency_mock_mean,
@@ -60,7 +59,12 @@ def test_rolling_corr(series):
@pytest.mark.parametrize("func", ["cov", "corr"])
def test_rolling_pairwise_cov_corr(func, frame):
- check_pairwise_moment(frame, "rolling", func, window=10, min_periods=5)
+ result = getattr(frame.rolling(window=10, min_periods=5), func)()
+ result = result.loc[(slice(None), 1), 5]
+ result.index = result.index.droplevel(1)
+ expected = getattr(frame[1].rolling(window=10, min_periods=5), func)(frame[5])
+ expected.index = expected.index._with_freq(None)
+ tm.assert_series_equal(result, expected, check_names=False)
@pytest.mark.parametrize("method", ["corr", "cov"])
| - [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
In-lining some common functions to make it easier to follow test failure tracebacks | https://api.github.com/repos/pandas-dev/pandas/pulls/36810 | 2020-10-02T17:34:42Z | 2020-10-02T21:39:27Z | 2020-10-02T21:39:27Z | 2020-10-02T21:42:16Z |
TST: insert 'match' to bare pytest raises in pandas/tests/indexing/te… | diff --git a/pandas/tests/indexing/test_indexing.py b/pandas/tests/indexing/test_indexing.py
index 7d5fea232817d..fd83f9ab29407 100644
--- a/pandas/tests/indexing/test_indexing.py
+++ b/pandas/tests/indexing/test_indexing.py
@@ -31,7 +31,11 @@ def test_setitem_ndarray_1d(self):
df["bar"] = np.zeros(10, dtype=complex)
# invalid
- with pytest.raises(ValueError):
+ msg = (
+ "cannot set using a multi-index selection "
+ "indexer with a different length than the value"
+ )
+ with pytest.raises(ValueError, match=msg):
df.loc[df.index[2:5], "bar"] = np.array([2.33j, 1.23 + 0.1j, 2.2, 1.0])
# valid
@@ -48,7 +52,8 @@ def test_setitem_ndarray_1d(self):
df["foo"] = np.zeros(10, dtype=np.float64)
df["bar"] = np.zeros(10, dtype=complex)
- with pytest.raises(ValueError):
+ msg = "Must have equal len keys and value when setting with an iterable"
+ with pytest.raises(ValueError, match=msg):
df[2:5] = np.arange(1, 4) * 1j
@pytest.mark.parametrize(
@@ -1055,13 +1060,13 @@ def test_1tuple_without_multiindex():
def test_duplicate_index_mistyped_key_raises_keyerror():
# GH#29189 float_index.get_loc(None) should raise KeyError, not TypeError
ser = pd.Series([2, 5, 6, 8], index=[2.0, 4.0, 4.0, 5.0])
- with pytest.raises(KeyError):
+ with pytest.raises(KeyError, match="None"):
ser[None]
- with pytest.raises(KeyError):
+ with pytest.raises(KeyError, match="None"):
ser.index.get_loc(None)
- with pytest.raises(KeyError):
+ with pytest.raises(KeyError, match="None"):
ser.index._engine.get_loc(None)
| …st_indexing.py
- [ ] ref https://github.com/pandas-dev/pandas/issues/30999
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/36809 | 2020-10-02T17:11:04Z | 2020-10-02T18:08:58Z | 2020-10-02T18:08:58Z | 2020-10-02T18:09:06Z |
Fix select_dtypes(include='int') for Windows. | diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst
index 17d8c79994dbe..2deb3295d3c68 100644
--- a/doc/source/whatsnew/v1.3.0.rst
+++ b/doc/source/whatsnew/v1.3.0.rst
@@ -304,6 +304,7 @@ Numeric
- Bug in :meth:`DataFrame.mode` and :meth:`Series.mode` not keeping consistent integer :class:`Index` for empty input (:issue:`33321`)
- Bug in :meth:`DataFrame.rank` with ``np.inf`` and mixture of ``np.nan`` and ``np.inf`` (:issue:`32593`)
- Bug in :meth:`DataFrame.rank` with ``axis=0`` and columns holding incomparable types raising ``IndexError`` (:issue:`38932`)
+- Bug in :func:`select_dtypes` different behavior between Windows and Linux with ``include="int"`` (:issue:`36569`)
-
Conversion
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 6357b8feb348b..2e4a6f0928f94 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -3786,8 +3786,21 @@ def select_dtypes(self, include=None, exclude=None) -> DataFrame:
raise ValueError("at least one of include or exclude must be nonempty")
# convert the myriad valid dtypes object to a single representation
- include = frozenset(infer_dtype_from_object(x) for x in include)
- exclude = frozenset(infer_dtype_from_object(x) for x in exclude)
+ def check_int_infer_dtype(dtypes):
+ converted_dtypes = []
+ for dtype in dtypes:
+ # Numpy maps int to different types (int32, in64) on Windows and Linux
+ # see https://github.com/numpy/numpy/issues/9464
+ if (isinstance(dtype, str) and dtype == "int") or (dtype is int):
+ converted_dtypes.append(np.int32)
+ converted_dtypes.append(np.int64)
+ else:
+ converted_dtypes.append(infer_dtype_from_object(dtype))
+ return frozenset(converted_dtypes)
+
+ include = check_int_infer_dtype(include)
+ exclude = check_int_infer_dtype(exclude)
+
for dtypes in (include, exclude):
invalidate_string_dtypes(dtypes)
diff --git a/pandas/tests/frame/methods/test_select_dtypes.py b/pandas/tests/frame/methods/test_select_dtypes.py
index 434df5ccccaf7..2a94b18b806f8 100644
--- a/pandas/tests/frame/methods/test_select_dtypes.py
+++ b/pandas/tests/frame/methods/test_select_dtypes.py
@@ -110,7 +110,7 @@ def test_select_dtypes_exclude_include_using_list_like(self):
{
"a": list("abc"),
"b": list(range(1, 4)),
- "c": np.arange(3, 6).astype("u1"),
+ "c": np.arange(3, 6, dtype="u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("now", periods=3).values,
@@ -128,6 +128,26 @@ def test_select_dtypes_exclude_include_using_list_like(self):
e = df[["b", "e"]]
tm.assert_frame_equal(r, e)
+ @pytest.mark.parametrize(
+ "include", [(np.bool_, "int"), (np.bool_, "integer"), ("bool", int)]
+ )
+ def test_select_dtypes_exclude_include_int(self, include):
+ # Fix select_dtypes(include='int') for Windows, FYI #36596
+ df = DataFrame(
+ {
+ "a": list("abc"),
+ "b": list(range(1, 4)),
+ "c": np.arange(3, 6, dtype="int32"),
+ "d": np.arange(4.0, 7.0, dtype="float64"),
+ "e": [True, False, True],
+ "f": pd.date_range("now", periods=3).values,
+ }
+ )
+ exclude = (np.datetime64,)
+ result = df.select_dtypes(include=include, exclude=exclude)
+ expected = df[["b", "c", "e"]]
+ tm.assert_frame_equal(result, expected)
+
def test_select_dtypes_include_using_scalars(self):
df = DataFrame(
{
| - [x] closes #36596
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/36808 | 2020-10-02T15:51:32Z | 2021-02-07T17:06:43Z | 2021-02-07T17:06:43Z | 2021-02-07T17:06:47Z |
github --> GitHub | diff --git a/README.md b/README.md
index a2f2f1c04442a..04d8963b3392e 100644
--- a/README.md
+++ b/README.md
@@ -154,7 +154,7 @@ For usage questions, the best place to go to is [StackOverflow](https://stackove
Further, general questions and discussions can also take place on the [pydata mailing list](https://groups.google.com/forum/?fromgroups#!forum/pydata).
## Discussion and Development
-Most development discussions take place on github in this repo. Further, the [pandas-dev mailing list](https://mail.python.org/mailman/listinfo/pandas-dev) can also be used for specialized discussions or design issues, and a [Gitter channel](https://gitter.im/pydata/pandas) is available for quick development related questions.
+Most development discussions take place on GitHub in this repo. Further, the [pandas-dev mailing list](https://mail.python.org/mailman/listinfo/pandas-dev) can also be used for specialized discussions or design issues, and a [Gitter channel](https://gitter.im/pydata/pandas) is available for quick development related questions.
## Contributing to pandas [](https://www.codetriage.com/pandas-dev/pandas)
| Changed from "github" to "GitHub" in README.md to make it consistent throughout the document.
| https://api.github.com/repos/pandas-dev/pandas/pulls/36804 | 2020-10-02T13:27:01Z | 2020-10-02T13:30:18Z | null | 2020-10-02T13:30:29Z |
DOC: ran blacken docs tool and checked output to improve formatting #36777 | diff --git a/doc/source/user_guide/10min.rst b/doc/source/user_guide/10min.rst
index 673f8689736f1..08f83a4674ada 100644
--- a/doc/source/user_guide/10min.rst
+++ b/doc/source/user_guide/10min.rst
@@ -34,9 +34,9 @@ and labeled columns:
.. ipython:: python
- dates = pd.date_range('20130101', periods=6)
+ dates = pd.date_range("20130101", periods=6)
dates
- df = pd.DataFrame(np.random.randn(6, 4), index=dates, columns=list('ABCD'))
+ df = pd.DataFrame(np.random.randn(6, 4), index=dates, columns=list("ABCD"))
df
Creating a :class:`DataFrame` by passing a dict of objects that can be converted to series-like.
@@ -156,7 +156,7 @@ Sorting by values:
.. ipython:: python
- df.sort_values(by='B')
+ df.sort_values(by="B")
Selection
---------
@@ -178,14 +178,14 @@ equivalent to ``df.A``:
.. ipython:: python
- df['A']
+ df["A"]
Selecting via ``[]``, which slices the rows.
.. ipython:: python
df[0:3]
- df['20130102':'20130104']
+ df["20130102":"20130104"]
Selection by label
~~~~~~~~~~~~~~~~~~
@@ -202,31 +202,31 @@ Selecting on a multi-axis by label:
.. ipython:: python
- df.loc[:, ['A', 'B']]
+ df.loc[:, ["A", "B"]]
Showing label slicing, both endpoints are *included*:
.. ipython:: python
- df.loc['20130102':'20130104', ['A', 'B']]
+ df.loc["20130102":"20130104", ["A", "B"]]
Reduction in the dimensions of the returned object:
.. ipython:: python
- df.loc['20130102', ['A', 'B']]
+ df.loc["20130102", ["A", "B"]]
For getting a scalar value:
.. ipython:: python
- df.loc[dates[0], 'A']
+ df.loc[dates[0], "A"]
For getting fast access to a scalar (equivalent to the prior method):
.. ipython:: python
- df.at[dates[0], 'A']
+ df.at[dates[0], "A"]
Selection by position
~~~~~~~~~~~~~~~~~~~~~
@@ -282,7 +282,7 @@ Using a single column's values to select data.
.. ipython:: python
- df[df['A'] > 0]
+ df[df["A"] > 0]
Selecting values from a DataFrame where a boolean condition is met.
@@ -295,9 +295,9 @@ Using the :func:`~Series.isin` method for filtering:
.. ipython:: python
df2 = df.copy()
- df2['E'] = ['one', 'one', 'two', 'three', 'four', 'three']
+ df2["E"] = ["one", "one", "two", "three", "four", "three"]
df2
- df2[df2['E'].isin(['two', 'four'])]
+ df2[df2["E"].isin(["two", "four"])]
Setting
~~~~~~~
@@ -307,15 +307,15 @@ by the indexes.
.. ipython:: python
- s1 = pd.Series([1, 2, 3, 4, 5, 6], index=pd.date_range('20130102', periods=6))
+ s1 = pd.Series([1, 2, 3, 4, 5, 6], index=pd.date_range("20130102", periods=6))
s1
- df['F'] = s1
+ df["F"] = s1
Setting values by label:
.. ipython:: python
- df.at[dates[0], 'A'] = 0
+ df.at[dates[0], "A"] = 0
Setting values by position:
@@ -327,7 +327,7 @@ Setting by assigning with a NumPy array:
.. ipython:: python
- df.loc[:, 'D'] = np.array([5] * len(df))
+ df.loc[:, "D"] = np.array([5] * len(df))
The result of the prior setting operations.
@@ -356,15 +356,15 @@ returns a copy of the data.
.. ipython:: python
- df1 = df.reindex(index=dates[0:4], columns=list(df.columns) + ['E'])
- df1.loc[dates[0]:dates[1], 'E'] = 1
+ df1 = df.reindex(index=dates[0:4], columns=list(df.columns) + ["E"])
+ df1.loc[dates[0] : dates[1], "E"] = 1
df1
To drop any rows that have missing data.
.. ipython:: python
- df1.dropna(how='any')
+ df1.dropna(how="any")
Filling missing data.
@@ -408,7 +408,7 @@ In addition, pandas automatically broadcasts along the specified dimension.
s = pd.Series([1, 3, 5, np.nan, 6, 8], index=dates).shift(2)
s
- df.sub(s, axis='index')
+ df.sub(s, axis="index")
Apply
@@ -444,7 +444,7 @@ some cases always uses them). See more at :ref:`Vectorized String Methods
.. ipython:: python
- s = pd.Series(['A', 'B', 'C', 'Aaba', 'Baca', np.nan, 'CABA', 'dog', 'cat'])
+ s = pd.Series(["A", "B", "C", "Aaba", "Baca", np.nan, "CABA", "dog", "cat"])
s.str.lower()
Merge
@@ -486,21 +486,21 @@ SQL style merges. See the :ref:`Database style joining <merging.join>` section.
.. ipython:: python
- left = pd.DataFrame({'key': ['foo', 'foo'], 'lval': [1, 2]})
- right = pd.DataFrame({'key': ['foo', 'foo'], 'rval': [4, 5]})
+ left = pd.DataFrame({"key": ["foo", "foo"], "lval": [1, 2]})
+ right = pd.DataFrame({"key": ["foo", "foo"], "rval": [4, 5]})
left
right
- pd.merge(left, right, on='key')
+ pd.merge(left, right, on="key")
Another example that can be given is:
.. ipython:: python
- left = pd.DataFrame({'key': ['foo', 'bar'], 'lval': [1, 2]})
- right = pd.DataFrame({'key': ['foo', 'bar'], 'rval': [4, 5]})
+ left = pd.DataFrame({"key": ["foo", "bar"], "lval": [1, 2]})
+ right = pd.DataFrame({"key": ["foo", "bar"], "rval": [4, 5]})
left
right
- pd.merge(left, right, on='key')
+ pd.merge(left, right, on="key")
Grouping
--------
@@ -531,14 +531,14 @@ groups.
.. ipython:: python
- df.groupby('A').sum()
+ df.groupby("A").sum()
Grouping by multiple columns forms a hierarchical index, and again we can
apply the :meth:`~pandas.core.groupby.GroupBy.sum` function.
.. ipython:: python
- df.groupby(['A', 'B']).sum()
+ df.groupby(["A", "B"]).sum()
Reshaping
---------
@@ -559,8 +559,8 @@ Stack
]
)
)
- index = pd.MultiIndex.from_tuples(tuples, names=['first', 'second'])
- df = pd.DataFrame(np.random.randn(8, 2), index=index, columns=['A', 'B'])
+ index = pd.MultiIndex.from_tuples(tuples, names=["first", "second"])
+ df = pd.DataFrame(np.random.randn(8, 2), index=index, columns=["A", "B"])
df2 = df[:4]
df2
@@ -603,7 +603,7 @@ We can produce pivot tables from this data very easily:
.. ipython:: python
- pd.pivot_table(df, values='D', index=['A', 'B'], columns=['C'])
+ pd.pivot_table(df, values="D", index=["A", "B"], columns=["C"])
Time series
@@ -616,31 +616,31 @@ financial applications. See the :ref:`Time Series section <timeseries>`.
.. ipython:: python
- rng = pd.date_range('1/1/2012', periods=100, freq='S')
+ rng = pd.date_range("1/1/2012", periods=100, freq="S")
ts = pd.Series(np.random.randint(0, 500, len(rng)), index=rng)
- ts.resample('5Min').sum()
+ ts.resample("5Min").sum()
Time zone representation:
.. ipython:: python
- rng = pd.date_range('3/6/2012 00:00', periods=5, freq='D')
+ rng = pd.date_range("3/6/2012 00:00", periods=5, freq="D")
ts = pd.Series(np.random.randn(len(rng)), rng)
ts
- ts_utc = ts.tz_localize('UTC')
+ ts_utc = ts.tz_localize("UTC")
ts_utc
Converting to another time zone:
.. ipython:: python
- ts_utc.tz_convert('US/Eastern')
+ ts_utc.tz_convert("US/Eastern")
Converting between time span representations:
.. ipython:: python
- rng = pd.date_range('1/1/2012', periods=5, freq='M')
+ rng = pd.date_range("1/1/2012", periods=5, freq="M")
ts = pd.Series(np.random.randn(len(rng)), index=rng)
ts
ps = ts.to_period()
@@ -654,9 +654,9 @@ the quarter end:
.. ipython:: python
- prng = pd.period_range('1990Q1', '2000Q4', freq='Q-NOV')
+ prng = pd.period_range("1990Q1", "2000Q4", freq="Q-NOV")
ts = pd.Series(np.random.randn(len(prng)), prng)
- ts.index = (prng.asfreq('M', 'e') + 1).asfreq('H', 's') + 9
+ ts.index = (prng.asfreq("M", "e") + 1).asfreq("H", "s") + 9
ts.head()
Categoricals
@@ -667,9 +667,10 @@ pandas can include categorical data in a :class:`DataFrame`. For full docs, see
.. ipython:: python
- df = pd.DataFrame(
- {"id": [1, 2, 3, 4, 5, 6], "raw_grade": ["a", "b", "b", "a", "a", "e"]}
- )
+ df = pd.DataFrame(
+ {"id": [1, 2, 3, 4, 5, 6], "raw_grade": ["a", "b", "b", "a", "a", "e"]}
+ )
+
Convert the raw grades to a categorical data type.
@@ -718,7 +719,8 @@ We use the standard convention for referencing the matplotlib API:
.. ipython:: python
import matplotlib.pyplot as plt
- plt.close('all')
+
+ plt.close("all")
.. ipython:: python
@@ -754,19 +756,20 @@ CSV
.. ipython:: python
- df.to_csv('foo.csv')
+ df.to_csv("foo.csv")
:ref:`Reading from a csv file. <io.read_csv_table>`
.. ipython:: python
- pd.read_csv('foo.csv')
+ pd.read_csv("foo.csv")
.. ipython:: python
:suppress:
import os
- os.remove('foo.csv')
+
+ os.remove("foo.csv")
HDF5
~~~~
@@ -777,18 +780,18 @@ Writing to a HDF5 Store.
.. ipython:: python
- df.to_hdf('foo.h5', 'df')
+ df.to_hdf("foo.h5", "df")
Reading from a HDF5 Store.
.. ipython:: python
- pd.read_hdf('foo.h5', 'df')
+ pd.read_hdf("foo.h5", "df")
.. ipython:: python
:suppress:
- os.remove('foo.h5')
+ os.remove("foo.h5")
Excel
~~~~~
@@ -799,18 +802,18 @@ Writing to an excel file.
.. ipython:: python
- df.to_excel('foo.xlsx', sheet_name='Sheet1')
+ df.to_excel("foo.xlsx", sheet_name="Sheet1")
Reading from an excel file.
.. ipython:: python
- pd.read_excel('foo.xlsx', 'Sheet1', index_col=None, na_values=['NA'])
+ pd.read_excel("foo.xlsx", "Sheet1", index_col=None, na_values=["NA"])
.. ipython:: python
:suppress:
- os.remove('foo.xlsx')
+ os.remove("foo.xlsx")
Gotchas
-------
diff --git a/doc/source/user_guide/advanced.rst b/doc/source/user_guide/advanced.rst
index 8cd35e94ae743..cec777e0f021e 100644
--- a/doc/source/user_guide/advanced.rst
+++ b/doc/source/user_guide/advanced.rst
@@ -62,12 +62,14 @@ demonstrate different ways to initialize MultiIndexes.
.. ipython:: python
- arrays = [['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'],
- ['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
+ arrays = [
+ ["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
+ ["one", "two", "one", "two", "one", "two", "one", "two"],
+ ]
tuples = list(zip(*arrays))
tuples
- index = pd.MultiIndex.from_tuples(tuples, names=['first', 'second'])
+ index = pd.MultiIndex.from_tuples(tuples, names=["first", "second"])
index
s = pd.Series(np.random.randn(8), index=index)
@@ -78,8 +80,8 @@ to use the :meth:`MultiIndex.from_product` method:
.. ipython:: python
- iterables = [['bar', 'baz', 'foo', 'qux'], ['one', 'two']]
- pd.MultiIndex.from_product(iterables, names=['first', 'second'])
+ iterables = [["bar", "baz", "foo", "qux"], ["one", "two"]]
+ pd.MultiIndex.from_product(iterables, names=["first", "second"])
You can also construct a ``MultiIndex`` from a ``DataFrame`` directly, using
the method :meth:`MultiIndex.from_frame`. This is a complementary method to
@@ -89,9 +91,10 @@ the method :meth:`MultiIndex.from_frame`. This is a complementary method to
.. ipython:: python
- df = pd.DataFrame([['bar', 'one'], ['bar', 'two'],
- ['foo', 'one'], ['foo', 'two']],
- columns=['first', 'second'])
+ df = pd.DataFrame(
+ [["bar", "one"], ["bar", "two"], ["foo", "one"], ["foo", "two"]],
+ columns=["first", "second"],
+ )
pd.MultiIndex.from_frame(df)
As a convenience, you can pass a list of arrays directly into ``Series`` or
@@ -99,8 +102,10 @@ As a convenience, you can pass a list of arrays directly into ``Series`` or
.. ipython:: python
- arrays = [np.array(['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux']),
- np.array(['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two'])]
+ arrays = [
+ np.array(["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"]),
+ np.array(["one", "two", "one", "two", "one", "two", "one", "two"]),
+ ]
s = pd.Series(np.random.randn(8), index=arrays)
s
df = pd.DataFrame(np.random.randn(8, 4), index=arrays)
@@ -119,7 +124,7 @@ of the index is up to you:
.. ipython:: python
- df = pd.DataFrame(np.random.randn(3, 8), index=['A', 'B', 'C'], columns=index)
+ df = pd.DataFrame(np.random.randn(3, 8), index=["A", "B", "C"], columns=index)
df
pd.DataFrame(np.random.randn(6, 6), index=index[:6], columns=index[:6])
@@ -129,7 +134,7 @@ bit easier on the eyes. Note that how the index is displayed can be controlled u
.. ipython:: python
- with pd.option_context('display.multi_sparse', False):
+ with pd.option_context("display.multi_sparse", False):
df
It's worth keeping in mind that there's nothing preventing you from using
@@ -157,7 +162,7 @@ location at a particular level:
.. ipython:: python
index.get_level_values(0)
- index.get_level_values('second')
+ index.get_level_values("second")
Basic indexing on axis with MultiIndex
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -169,10 +174,10 @@ completely analogous way to selecting a column in a regular DataFrame:
.. ipython:: python
- df['bar']
- df['bar', 'one']
- df['bar']['one']
- s['qux']
+ df["bar"]
+ df["bar", "one"]
+ df["bar"]["one"]
+ s["qux"]
See :ref:`Cross-section with hierarchical index <advanced.xs>` for how to select
on a deeper level.
@@ -190,7 +195,7 @@ For example:
df.columns.levels # original MultiIndex
- df[['foo','qux']].columns.levels # sliced
+ df[["foo","qux"]].columns.levels # sliced
This is done to avoid a recomputation of the levels in order to make slicing
highly performant. If you want to see only the used levels, you can use the
@@ -198,17 +203,17 @@ highly performant. If you want to see only the used levels, you can use the
.. ipython:: python
- df[['foo', 'qux']].columns.to_numpy()
+ df[["foo", "qux"]].columns.to_numpy()
# for a specific level
- df[['foo', 'qux']].columns.get_level_values(0)
+ df[["foo", "qux"]].columns.get_level_values(0)
To reconstruct the ``MultiIndex`` with only the used levels, the
:meth:`~MultiIndex.remove_unused_levels` method may be used.
.. ipython:: python
- new_mi = df[['foo', 'qux']].columns.remove_unused_levels()
+ new_mi = df[["foo", "qux"]].columns.remove_unused_levels()
new_mi.levels
Data alignment and using ``reindex``
@@ -229,7 +234,7 @@ called with another ``MultiIndex``, or even a list or array of tuples:
.. ipython:: python
s.reindex(index[:3])
- s.reindex([('foo', 'two'), ('bar', 'one'), ('qux', 'one'), ('baz', 'one')])
+ s.reindex([("foo", "two"), ("bar", "one"), ("qux", "one"), ("baz", "one")])
.. _advanced.advanced_hierarchical:
@@ -244,7 +249,7 @@ keys take the form of tuples. For example, the following works as you would expe
df = df.T
df
- df.loc[('bar', 'two')]
+ df.loc[("bar", "two")]
Note that ``df.loc['bar', 'two']`` would also work in this example, but this shorthand
notation can lead to ambiguity in general.
@@ -254,7 +259,7 @@ like this:
.. ipython:: python
- df.loc[('bar', 'two'), 'A']
+ df.loc[("bar", "two"), "A"]
You don't have to specify all levels of the ``MultiIndex`` by passing only the
first elements of the tuple. For example, you can use "partial" indexing to
@@ -262,7 +267,7 @@ get all elements with ``bar`` in the first level as follows:
.. ipython:: python
- df.loc['bar']
+ df.loc["bar"]
This is a shortcut for the slightly more verbose notation ``df.loc[('bar',),]`` (equivalent
to ``df.loc['bar',]`` in this example).
@@ -271,20 +276,20 @@ to ``df.loc['bar',]`` in this example).
.. ipython:: python
- df.loc['baz':'foo']
+ df.loc["baz":"foo"]
You can slice with a 'range' of values, by providing a slice of tuples.
.. ipython:: python
- df.loc[('baz', 'two'):('qux', 'one')]
- df.loc[('baz', 'two'):'foo']
+ df.loc[("baz", "two"):("qux", "one")]
+ df.loc[("baz", "two"):"foo"]
Passing a list of labels or tuples works similar to reindexing:
.. ipython:: python
- df.loc[[('bar', 'two'), ('qux', 'one')]]
+ df.loc[[("bar", "two"), ("qux", "one")]]
.. note::
@@ -298,8 +303,9 @@ whereas a tuple of lists refer to several values within a level:
.. ipython:: python
- s = pd.Series([1, 2, 3, 4, 5, 6],
- index=pd.MultiIndex.from_product([["A", "B"], ["c", "d", "e"]]))
+ s = pd.Series(
+ [1, 2, 3, 4, 5, 6], index=pd.MultiIndex.from_product([["A", "B"], ["c", "d", "e"]])
+ )
s.loc[[("A", "c"), ("B", "d")]] # list of tuples
s.loc[(["A", "B"], ["c", "d"])] # tuple of lists
@@ -329,37 +335,44 @@ As usual, **both sides** of the slicers are included as this is label indexing.
.. code-block:: python
- df.loc[(slice('A1', 'A3'), ...), :] # noqa: E999
+ df.loc[(slice("A1", "A3"), ...), :] # noqa: E999
You should **not** do this:
.. code-block:: python
- df.loc[(slice('A1', 'A3'), ...)] # noqa: E999
+ df.loc[(slice("A1", "A3"), ...)] # noqa: E999
.. ipython:: python
def mklbl(prefix, n):
return ["%s%s" % (prefix, i) for i in range(n)]
- miindex = pd.MultiIndex.from_product([mklbl('A', 4),
- mklbl('B', 2),
- mklbl('C', 4),
- mklbl('D', 2)])
- micolumns = pd.MultiIndex.from_tuples([('a', 'foo'), ('a', 'bar'),
- ('b', 'foo'), ('b', 'bah')],
- names=['lvl0', 'lvl1'])
- dfmi = pd.DataFrame(np.arange(len(miindex) * len(micolumns))
- .reshape((len(miindex), len(micolumns))),
- index=miindex,
- columns=micolumns).sort_index().sort_index(axis=1)
+
+ miindex = pd.MultiIndex.from_product(
+ [mklbl("A", 4), mklbl("B", 2), mklbl("C", 4), mklbl("D", 2)]
+ )
+ micolumns = pd.MultiIndex.from_tuples(
+ [("a", "foo"), ("a", "bar"), ("b", "foo"), ("b", "bah")], names=["lvl0", "lvl1"]
+ )
+ dfmi = (
+ pd.DataFrame(
+ np.arange(len(miindex) * len(micolumns)).reshape(
+ (len(miindex), len(micolumns))
+ ),
+ index=miindex,
+ columns=micolumns,
+ )
+ .sort_index()
+ .sort_index(axis=1)
+ )
dfmi
Basic MultiIndex slicing using slices, lists, and labels.
.. ipython:: python
- dfmi.loc[(slice('A1', 'A3'), slice(None), ['C1', 'C3']), :]
+ dfmi.loc[(slice("A1", "A3"), slice(None), ["C1", "C3"]), :]
You can use :class:`pandas.IndexSlice` to facilitate a more natural syntax
@@ -368,36 +381,36 @@ using ``:``, rather than using ``slice(None)``.
.. ipython:: python
idx = pd.IndexSlice
- dfmi.loc[idx[:, :, ['C1', 'C3']], idx[:, 'foo']]
+ dfmi.loc[idx[:, :, ["C1", "C3"]], idx[:, "foo"]]
It is possible to perform quite complicated selections using this method on multiple
axes at the same time.
.. ipython:: python
- dfmi.loc['A1', (slice(None), 'foo')]
- dfmi.loc[idx[:, :, ['C1', 'C3']], idx[:, 'foo']]
+ dfmi.loc["A1", (slice(None), "foo")]
+ dfmi.loc[idx[:, :, ["C1", "C3"]], idx[:, "foo"]]
Using a boolean indexer you can provide selection related to the *values*.
.. ipython:: python
- mask = dfmi[('a', 'foo')] > 200
- dfmi.loc[idx[mask, :, ['C1', 'C3']], idx[:, 'foo']]
+ mask = dfmi[("a", "foo")] > 200
+ dfmi.loc[idx[mask, :, ["C1", "C3"]], idx[:, "foo"]]
You can also specify the ``axis`` argument to ``.loc`` to interpret the passed
slicers on a single axis.
.. ipython:: python
- dfmi.loc(axis=0)[:, :, ['C1', 'C3']]
+ dfmi.loc(axis=0)[:, :, ["C1", "C3"]]
Furthermore, you can *set* the values using the following methods.
.. ipython:: python
df2 = dfmi.copy()
- df2.loc(axis=0)[:, :, ['C1', 'C3']] = -10
+ df2.loc(axis=0)[:, :, ["C1", "C3"]] = -10
df2
You can use a right-hand-side of an alignable object as well.
@@ -405,7 +418,7 @@ You can use a right-hand-side of an alignable object as well.
.. ipython:: python
df2 = dfmi.copy()
- df2.loc[idx[:, :, ['C1', 'C3']], :] = df2 * 1000
+ df2.loc[idx[:, :, ["C1", "C3"]], :] = df2 * 1000
df2
.. _advanced.xs:
@@ -419,12 +432,12 @@ selecting data at a particular level of a ``MultiIndex`` easier.
.. ipython:: python
df
- df.xs('one', level='second')
+ df.xs("one", level="second")
.. ipython:: python
# using the slicers
- df.loc[(slice(None), 'one'), :]
+ df.loc[(slice(None), "one"), :]
You can also select on the columns with ``xs``, by
providing the axis argument.
@@ -432,36 +445,36 @@ providing the axis argument.
.. ipython:: python
df = df.T
- df.xs('one', level='second', axis=1)
+ df.xs("one", level="second", axis=1)
.. ipython:: python
# using the slicers
- df.loc[:, (slice(None), 'one')]
+ df.loc[:, (slice(None), "one")]
``xs`` also allows selection with multiple keys.
.. ipython:: python
- df.xs(('one', 'bar'), level=('second', 'first'), axis=1)
+ df.xs(("one", "bar"), level=("second", "first"), axis=1)
.. ipython:: python
# using the slicers
- df.loc[:, ('bar', 'one')]
+ df.loc[:, ("bar", "one")]
You can pass ``drop_level=False`` to ``xs`` to retain
the level that was selected.
.. ipython:: python
- df.xs('one', level='second', axis=1, drop_level=False)
+ df.xs("one", level="second", axis=1, drop_level=False)
Compare the above with the result using ``drop_level=True`` (the default value).
.. ipython:: python
- df.xs('one', level='second', axis=1, drop_level=True)
+ df.xs("one", level="second", axis=1, drop_level=True)
.. ipython:: python
:suppress:
@@ -479,8 +492,9 @@ values across a level. For instance:
.. ipython:: python
- midx = pd.MultiIndex(levels=[['zero', 'one'], ['x', 'y']],
- codes=[[1, 1, 0, 0], [1, 0, 1, 0]])
+ midx = pd.MultiIndex(
+ levels=[["zero", "one"], ["x", "y"]], codes=[[1, 1, 0, 0], [1, 0, 1, 0]]
+ )
df = pd.DataFrame(np.random.randn(4, 2), index=midx)
df
df2 = df.mean(level=0)
@@ -543,7 +557,7 @@ used to move the values from the ``MultiIndex`` to a column.
.. ipython:: python
- df.rename_axis(index=['abc', 'def'])
+ df.rename_axis(index=["abc", "def"])
Note that the columns of a ``DataFrame`` are an index, so that using
``rename_axis`` with the ``columns`` argument will change the name of that
@@ -561,7 +575,7 @@ When working with an ``Index`` object directly, rather than via a ``DataFrame``,
.. ipython:: python
- mi = pd.MultiIndex.from_product([[1, 2], ['a', 'b']], names=['x', 'y'])
+ mi = pd.MultiIndex.from_product([[1, 2], ["a", "b"]], names=["x", "y"])
mi.names
mi2 = mi.rename("new name", level=0)
@@ -586,6 +600,7 @@ they need to be sorted. As with any index, you can use :meth:`~DataFrame.sort_in
.. ipython:: python
import random
+
random.shuffle(tuples)
s = pd.Series(np.random.randn(8), index=pd.MultiIndex.from_tuples(tuples))
s
@@ -600,9 +615,9 @@ are named.
.. ipython:: python
- s.index.set_names(['L1', 'L2'], inplace=True)
- s.sort_index(level='L1')
- s.sort_index(level='L2')
+ s.index.set_names(["L1", "L2"], inplace=True)
+ s.sort_index(level="L1")
+ s.sort_index(level="L2")
On higher dimensional objects, you can sort any of the other axes by level if
they have a ``MultiIndex``:
@@ -617,10 +632,10 @@ return a copy of the data rather than a view:
.. ipython:: python
- dfm = pd.DataFrame({'jim': [0, 0, 1, 1],
- 'joe': ['x', 'x', 'z', 'y'],
- 'jolie': np.random.rand(4)})
- dfm = dfm.set_index(['jim', 'joe'])
+ dfm = pd.DataFrame(
+ {"jim": [0, 0, 1, 1], "joe": ["x", "x", "z", "y"], "jolie": np.random.rand(4)}
+ )
+ dfm = dfm.set_index(["jim", "joe"])
dfm
.. code-block:: ipython
@@ -661,7 +676,7 @@ And now selection works as expected.
.. ipython:: python
- dfm.loc[(0, 'y'):(1, 'z')]
+ dfm.loc[(0, "y"):(1, "z")]
Take methods
------------
@@ -754,18 +769,18 @@ and allows efficient indexing and storage of an index with a large number of dup
.. ipython:: python
from pandas.api.types import CategoricalDtype
- df = pd.DataFrame({'A': np.arange(6),
- 'B': list('aabbca')})
- df['B'] = df['B'].astype(CategoricalDtype(list('cab')))
+
+ df = pd.DataFrame({"A": np.arange(6), "B": list("aabbca")})
+ df["B"] = df["B"].astype(CategoricalDtype(list("cab")))
df
df.dtypes
- df['B'].cat.categories
+ df["B"].cat.categories
Setting the index will create a ``CategoricalIndex``.
.. ipython:: python
- df2 = df.set_index('B')
+ df2 = df.set_index("B")
df2.index
Indexing with ``__getitem__/.iloc/.loc`` works similarly to an ``Index`` with duplicates.
@@ -773,13 +788,13 @@ The indexers **must** be in the category or the operation will raise a ``KeyErro
.. ipython:: python
- df2.loc['a']
+ df2.loc["a"]
The ``CategoricalIndex`` is **preserved** after indexing:
.. ipython:: python
- df2.loc['a'].index
+ df2.loc["a"].index
Sorting the index will sort by the order of the categories (recall that we
created the index with ``CategoricalDtype(list('cab'))``, so the sorted
@@ -804,17 +819,16 @@ values **not** in the categories, similarly to how you can reindex **any** panda
.. ipython:: python
- df3 = pd.DataFrame({'A': np.arange(3),
- 'B': pd.Series(list('abc')).astype('category')})
- df3 = df3.set_index('B')
+ df3 = pd.DataFrame({"A": np.arange(3), "B": pd.Series(list("abc")).astype("category")})
+ df3 = df3.set_index("B")
df3
.. ipython:: python
- df3.reindex(['a', 'e'])
- df3.reindex(['a', 'e']).index
- df3.reindex(pd.Categorical(['a', 'e'], categories=list('abe')))
- df3.reindex(pd.Categorical(['a', 'e'], categories=list('abe'))).index
+ df3.reindex(["a", "e"])
+ df3.reindex(["a", "e"]).index
+ df3.reindex(pd.Categorical(["a", "e"], categories=list("abe")))
+ df3.reindex(pd.Categorical(["a", "e"], categories=list("abe"))).index
.. warning::
@@ -823,16 +837,14 @@ values **not** in the categories, similarly to how you can reindex **any** panda
.. ipython:: python
- df4 = pd.DataFrame({'A': np.arange(2),
- 'B': list('ba')})
- df4['B'] = df4['B'].astype(CategoricalDtype(list('ab')))
- df4 = df4.set_index('B')
+ df4 = pd.DataFrame({"A": np.arange(2), "B": list("ba")})
+ df4["B"] = df4["B"].astype(CategoricalDtype(list("ab")))
+ df4 = df4.set_index("B")
df4.index
- df5 = pd.DataFrame({'A': np.arange(2),
- 'B': list('bc')})
- df5['B'] = df5['B'].astype(CategoricalDtype(list('bc')))
- df5 = df5.set_index('B')
+ df5 = pd.DataFrame({"A": np.arange(2), "B": list("bc")})
+ df5["B"] = df5["B"].astype(CategoricalDtype(list("bc")))
+ df5 = df5.set_index("B")
df5.index
.. code-block:: ipython
@@ -916,12 +928,16 @@ example, be millisecond offsets.
.. ipython:: python
- dfir = pd.concat([pd.DataFrame(np.random.randn(5, 2),
- index=np.arange(5) * 250.0,
- columns=list('AB')),
- pd.DataFrame(np.random.randn(6, 2),
- index=np.arange(4, 10) * 250.1,
- columns=list('AB'))])
+ dfir = pd.concat(
+ [
+ pd.DataFrame(
+ np.random.randn(5, 2), index=np.arange(5) * 250.0, columns=list("AB")
+ ),
+ pd.DataFrame(
+ np.random.randn(6, 2), index=np.arange(4, 10) * 250.1, columns=list("AB")
+ ),
+ ]
+ )
dfir
Selection operations then will always work on a value basis, for all selection operators.
@@ -929,7 +945,7 @@ Selection operations then will always work on a value basis, for all selection o
.. ipython:: python
dfir[0:1000.4]
- dfir.loc[0:1001, 'A']
+ dfir.loc[0:1001, "A"]
dfir.loc[1000.4]
You could retrieve the first 1 second (1000 ms) of data as such:
@@ -963,8 +979,9 @@ An ``IntervalIndex`` can be used in ``Series`` and in ``DataFrame`` as the index
.. ipython:: python
- df = pd.DataFrame({'A': [1, 2, 3, 4]},
- index=pd.IntervalIndex.from_breaks([0, 1, 2, 3, 4]))
+ df = pd.DataFrame(
+ {"A": [1, 2, 3, 4]}, index=pd.IntervalIndex.from_breaks([0, 1, 2, 3, 4])
+ )
df
Label based indexing via ``.loc`` along the edges of an interval works as you would expect,
@@ -1041,9 +1058,9 @@ datetime-like intervals:
pd.interval_range(start=0, end=5)
- pd.interval_range(start=pd.Timestamp('2017-01-01'), periods=4)
+ pd.interval_range(start=pd.Timestamp("2017-01-01"), periods=4)
- pd.interval_range(end=pd.Timedelta('3 days'), periods=3)
+ pd.interval_range(end=pd.Timedelta("3 days"), periods=3)
The ``freq`` parameter can used to specify non-default frequencies, and can utilize a variety
of :ref:`frequency aliases <timeseries.offset_aliases>` with datetime-like intervals:
@@ -1052,18 +1069,18 @@ of :ref:`frequency aliases <timeseries.offset_aliases>` with datetime-like inter
pd.interval_range(start=0, periods=5, freq=1.5)
- pd.interval_range(start=pd.Timestamp('2017-01-01'), periods=4, freq='W')
+ pd.interval_range(start=pd.Timestamp("2017-01-01"), periods=4, freq="W")
- pd.interval_range(start=pd.Timedelta('0 days'), periods=3, freq='9H')
+ pd.interval_range(start=pd.Timedelta("0 days"), periods=3, freq="9H")
Additionally, the ``closed`` parameter can be used to specify which side(s) the intervals
are closed on. Intervals are closed on the right side by default.
.. ipython:: python
- pd.interval_range(start=0, end=4, closed='both')
+ pd.interval_range(start=0, end=4, closed="both")
- pd.interval_range(start=0, end=4, closed='neither')
+ pd.interval_range(start=0, end=4, closed="neither")
Specifying ``start``, ``end``, and ``periods`` will generate a range of evenly spaced
intervals from ``start`` to ``end`` inclusively, with ``periods`` number of elements
@@ -1073,8 +1090,7 @@ in the resulting ``IntervalIndex``:
pd.interval_range(start=0, end=6, periods=4)
- pd.interval_range(pd.Timestamp('2018-01-01'),
- pd.Timestamp('2018-02-28'), periods=3)
+ pd.interval_range(pd.Timestamp("2018-01-01"), pd.Timestamp("2018-02-28"), periods=3)
Miscellaneous indexing FAQ
--------------------------
@@ -1112,7 +1128,7 @@ normal Python ``list``. Monotonicity of an index can be tested with the :meth:`~
.. ipython:: python
- df = pd.DataFrame(index=[2, 3, 3, 4, 5], columns=['data'], data=list(range(5)))
+ df = pd.DataFrame(index=[2, 3, 3, 4, 5], columns=["data"], data=list(range(5)))
df.index.is_monotonic_increasing
# no rows 0 or 1, but still returns rows 2, 3 (both of them), and 4:
@@ -1126,8 +1142,7 @@ On the other hand, if the index is not monotonic, then both slice bounds must be
.. ipython:: python
- df = pd.DataFrame(index=[2, 3, 1, 4, 3, 5],
- columns=['data'], data=list(range(6)))
+ df = pd.DataFrame(index=[2, 3, 1, 4, 3, 5], columns=["data"], data=list(range(6)))
df.index.is_monotonic_increasing
# OK because 2 and 4 are in the index
@@ -1149,7 +1164,7 @@ the :meth:`~Index.is_unique` attribute.
.. ipython:: python
- weakly_monotonic = pd.Index(['a', 'b', 'c', 'c'])
+ weakly_monotonic = pd.Index(["a", "b", "c", "c"])
weakly_monotonic
weakly_monotonic.is_monotonic_increasing
weakly_monotonic.is_monotonic_increasing & weakly_monotonic.is_unique
@@ -1167,7 +1182,7 @@ consider the following ``Series``:
.. ipython:: python
- s = pd.Series(np.random.randn(6), index=list('abcdef'))
+ s = pd.Series(np.random.randn(6), index=list("abcdef"))
s
Suppose we wished to slice from ``c`` to ``e``, using integers this would be
@@ -1190,7 +1205,7 @@ slicing include both endpoints:
.. ipython:: python
- s.loc['c':'e']
+ s.loc["c":"e"]
This is most definitely a "practicality beats purity" sort of thing, but it is
something to watch out for if you expect label-based slicing to behave exactly
diff --git a/doc/source/user_guide/basics.rst b/doc/source/user_guide/basics.rst
index e348111fe7881..9ef91a9604d39 100644
--- a/doc/source/user_guide/basics.rst
+++ b/doc/source/user_guide/basics.rst
@@ -12,10 +12,9 @@ the :ref:`10 minutes to pandas <10min>` section:
.. ipython:: python
- index = pd.date_range('1/1/2000', periods=8)
- s = pd.Series(np.random.randn(5), index=['a', 'b', 'c', 'd', 'e'])
- df = pd.DataFrame(np.random.randn(8, 3), index=index,
- columns=['A', 'B', 'C'])
+ index = pd.date_range("1/1/2000", periods=8)
+ s = pd.Series(np.random.randn(5), index=["a", "b", "c", "d", "e"])
+ df = pd.DataFrame(np.random.randn(8, 3), index=index, columns=["A", "B", "C"])
.. _basics.head_tail:
@@ -97,7 +96,7 @@ Timezones may be preserved with ``dtype=object``
.. ipython:: python
- ser = pd.Series(pd.date_range('2000', periods=2, tz="CET"))
+ ser = pd.Series(pd.date_range("2000", periods=2, tz="CET"))
ser.to_numpy(dtype=object)
Or thrown away with ``dtype='datetime64[ns]'``
@@ -174,8 +173,8 @@ These are both enabled to be used by default, you can control this by setting th
.. code-block:: python
- pd.set_option('compute.use_bottleneck', False)
- pd.set_option('compute.use_numexpr', False)
+ pd.set_option("compute.use_bottleneck", False)
+ pd.set_option("compute.use_numexpr", False)
.. _basics.binop:
@@ -204,18 +203,21 @@ either match on the *index* or *columns* via the **axis** keyword:
.. ipython:: python
- df = pd.DataFrame({
- 'one': pd.Series(np.random.randn(3), index=['a', 'b', 'c']),
- 'two': pd.Series(np.random.randn(4), index=['a', 'b', 'c', 'd']),
- 'three': pd.Series(np.random.randn(3), index=['b', 'c', 'd'])})
+ df = pd.DataFrame(
+ {
+ "one": pd.Series(np.random.randn(3), index=["a", "b", "c"]),
+ "two": pd.Series(np.random.randn(4), index=["a", "b", "c", "d"]),
+ "three": pd.Series(np.random.randn(3), index=["b", "c", "d"]),
+ }
+ )
df
row = df.iloc[1]
- column = df['two']
+ column = df["two"]
- df.sub(row, axis='columns')
+ df.sub(row, axis="columns")
df.sub(row, axis=1)
- df.sub(column, axis='index')
+ df.sub(column, axis="index")
df.sub(column, axis=0)
.. ipython:: python
@@ -228,10 +230,10 @@ Furthermore you can align a level of a MultiIndexed DataFrame with a Series.
.. ipython:: python
dfmi = df.copy()
- dfmi.index = pd.MultiIndex.from_tuples([(1, 'a'), (1, 'b'),
- (1, 'c'), (2, 'a')],
- names=['first', 'second'])
- dfmi.sub(column, axis=0, level='second')
+ dfmi.index = pd.MultiIndex.from_tuples(
+ [(1, "a"), (1, "b"), (1, "c"), (2, "a")], names=["first", "second"]
+ )
+ dfmi.sub(column, axis=0, level="second")
Series and Index also support the :func:`divmod` builtin. This function takes
the floor division and modulo operation at the same time returning a two-tuple
@@ -273,7 +275,7 @@ using ``fillna`` if you wish).
:suppress:
df2 = df.copy()
- df2['three']['a'] = 1.
+ df2["three"]["a"] = 1.0
.. ipython:: python
@@ -325,7 +327,7 @@ You can test if a pandas object is empty, via the :attr:`~DataFrame.empty` prope
.. ipython:: python
df.empty
- pd.DataFrame(columns=list('ABC')).empty
+ pd.DataFrame(columns=list("ABC")).empty
To evaluate single-element pandas objects in a boolean context, use the method
:meth:`~DataFrame.bool`:
@@ -394,8 +396,8 @@ equality to be True:
.. ipython:: python
- df1 = pd.DataFrame({'col': ['foo', 0, np.nan]})
- df2 = pd.DataFrame({'col': [np.nan, 0, 'foo']}, index=[2, 1, 0])
+ df1 = pd.DataFrame({"col": ["foo", 0, np.nan]})
+ df2 = pd.DataFrame({"col": [np.nan, 0, "foo"]}, index=[2, 1, 0])
df1.equals(df2)
df1.equals(df2.sort_index())
@@ -407,16 +409,16 @@ data structure with a scalar value:
.. ipython:: python
- pd.Series(['foo', 'bar', 'baz']) == 'foo'
- pd.Index(['foo', 'bar', 'baz']) == 'foo'
+ pd.Series(["foo", "bar", "baz"]) == "foo"
+ pd.Index(["foo", "bar", "baz"]) == "foo"
Pandas also handles element-wise comparisons between different array-like
objects of the same length:
.. ipython:: python
- pd.Series(['foo', 'bar', 'baz']) == pd.Index(['foo', 'bar', 'qux'])
- pd.Series(['foo', 'bar', 'baz']) == np.array(['foo', 'bar', 'qux'])
+ pd.Series(["foo", "bar", "baz"]) == pd.Index(["foo", "bar", "qux"])
+ pd.Series(["foo", "bar", "baz"]) == np.array(["foo", "bar", "qux"])
Trying to compare ``Index`` or ``Series`` objects of different lengths will
raise a ValueError:
@@ -458,10 +460,12 @@ which we illustrate:
.. ipython:: python
- df1 = pd.DataFrame({'A': [1., np.nan, 3., 5., np.nan],
- 'B': [np.nan, 2., 3., np.nan, 6.]})
- df2 = pd.DataFrame({'A': [5., 2., 4., np.nan, 3., 7.],
- 'B': [np.nan, np.nan, 3., 4., 6., 8.]})
+ df1 = pd.DataFrame(
+ {"A": [1.0, np.nan, 3.0, 5.0, np.nan], "B": [np.nan, 2.0, 3.0, np.nan, 6.0]}
+ )
+ df2 = pd.DataFrame(
+ {"A": [5.0, 2.0, 4.0, np.nan, 3.0, 7.0], "B": [np.nan, np.nan, 3.0, 4.0, 6.0, 8.0]}
+ )
df1
df2
df1.combine_first(df2)
@@ -480,6 +484,8 @@ So, for instance, to reproduce :meth:`~DataFrame.combine_first` as above:
def combiner(x, y):
return np.where(pd.isna(x), y, x)
+
+
df1.combine(df2, combiner)
.. _basics.stats:
@@ -570,8 +576,8 @@ will exclude NAs on Series input by default:
.. ipython:: python
- np.mean(df['one'])
- np.mean(df['one'].to_numpy())
+ np.mean(df["one"])
+ np.mean(df["one"].to_numpy())
:meth:`Series.nunique` will return the number of unique non-NA values in a
Series:
@@ -597,8 +603,7 @@ course):
series = pd.Series(np.random.randn(1000))
series[::2] = np.nan
series.describe()
- frame = pd.DataFrame(np.random.randn(1000, 5),
- columns=['a', 'b', 'c', 'd', 'e'])
+ frame = pd.DataFrame(np.random.randn(1000, 5), columns=["a", "b", "c", "d", "e"])
frame.iloc[::2] = np.nan
frame.describe()
@@ -606,7 +611,7 @@ You can select specific percentiles to include in the output:
.. ipython:: python
- series.describe(percentiles=[.05, .25, .75, .95])
+ series.describe(percentiles=[0.05, 0.25, 0.75, 0.95])
By default, the median is always included.
@@ -615,7 +620,7 @@ summary of the number of unique values and most frequently occurring values:
.. ipython:: python
- s = pd.Series(['a', 'a', 'b', 'b', 'a', 'a', np.nan, 'c', 'd', 'a'])
+ s = pd.Series(["a", "a", "b", "b", "a", "a", np.nan, "c", "d", "a"])
s.describe()
Note that on a mixed-type DataFrame object, :meth:`~DataFrame.describe` will
@@ -624,7 +629,7 @@ categorical columns:
.. ipython:: python
- frame = pd.DataFrame({'a': ['Yes', 'Yes', 'No', 'No'], 'b': range(4)})
+ frame = pd.DataFrame({"a": ["Yes", "Yes", "No", "No"], "b": range(4)})
frame.describe()
This behavior can be controlled by providing a list of types as ``include``/``exclude``
@@ -632,9 +637,9 @@ arguments. The special value ``all`` can also be used:
.. ipython:: python
- frame.describe(include=['object'])
- frame.describe(include=['number'])
- frame.describe(include='all')
+ frame.describe(include=["object"])
+ frame.describe(include=["number"])
+ frame.describe(include="all")
That feature relies on :ref:`select_dtypes <basics.selectdtypes>`. Refer to
there for details about accepted inputs.
@@ -654,7 +659,7 @@ corresponding values:
s1
s1.idxmin(), s1.idxmax()
- df1 = pd.DataFrame(np.random.randn(5, 3), columns=['A', 'B', 'C'])
+ df1 = pd.DataFrame(np.random.randn(5, 3), columns=["A", "B", "C"])
df1
df1.idxmin(axis=0)
df1.idxmax(axis=1)
@@ -665,9 +670,9 @@ matching index:
.. ipython:: python
- df3 = pd.DataFrame([2, 1, 1, 3, np.nan], columns=['A'], index=list('edcba'))
+ df3 = pd.DataFrame([2, 1, 1, 3, np.nan], columns=["A"], index=list("edcba"))
df3
- df3['A'].idxmin()
+ df3["A"].idxmin()
.. note::
@@ -706,8 +711,9 @@ Similarly, you can get the most frequently occurring value(s), i.e. the mode, of
s5 = pd.Series([1, 1, 3, 3, 3, 5, 5, 7, 7, 7])
s5.mode()
- df5 = pd.DataFrame({"A": np.random.randint(0, 7, size=50),
- "B": np.random.randint(-10, 15, size=50)})
+ df5 = pd.DataFrame(
+ {"A": np.random.randint(0, 7, size=50), "B": np.random.randint(-10, 15, size=50)}
+ )
df5.mode()
@@ -732,7 +738,7 @@ normally distributed data into equal-size quartiles like so:
.. ipython:: python
arr = np.random.randn(30)
- factor = pd.qcut(arr, [0, .25, .5, .75, 1])
+ factor = pd.qcut(arr, [0, 0.25, 0.5, 0.75, 1])
factor
pd.value_counts(factor)
@@ -775,18 +781,20 @@ First some setup:
"""
Chicago, IL -> Chicago for city_name column
"""
- df['city_name'] = df['city_and_code'].str.split(",").str.get(0)
+ df["city_name"] = df["city_and_code"].str.split(",").str.get(0)
return df
+
def add_country_name(df, country_name=None):
"""
Chicago -> Chicago-US for city_name column
"""
- col = 'city_name'
- df['city_and_country'] = df[col] + country_name
+ col = "city_name"
+ df["city_and_country"] = df[col] + country_name
return df
- df_p = pd.DataFrame({'city_and_code': ['Chicago, IL']})
+
+ df_p = pd.DataFrame({"city_and_code": ["Chicago, IL"]})
``extract_city_name`` and ``add_country_name`` are functions taking and returning ``DataFrames``.
@@ -795,14 +803,13 @@ Now compare the following:
.. ipython:: python
- add_country_name(extract_city_name(df_p), country_name='US')
+ add_country_name(extract_city_name(df_p), country_name="US")
Is equivalent to:
.. ipython:: python
- (df_p.pipe(extract_city_name)
- .pipe(add_country_name, country_name="US"))
+ df_p.pipe(extract_city_name).pipe(add_country_name, country_name="US")
Pandas encourages the second style, which is known as method chaining.
``pipe`` makes it easy to use your own or another library's functions
@@ -820,14 +827,15 @@ For example, we can fit a regression using statsmodels. Their API expects a form
import statsmodels.formula.api as sm
- bb = pd.read_csv('data/baseball.csv', index_col='id')
+ bb = pd.read_csv("data/baseball.csv", index_col="id")
- (bb.query('h > 0')
- .assign(ln_h=lambda df: np.log(df.h))
- .pipe((sm.ols, 'data'), 'hr ~ ln_h + year + g + C(lg)')
- .fit()
- .summary()
- )
+ (
+ bb.query("h > 0")
+ .assign(ln_h=lambda df: np.log(df.h))
+ .pipe((sm.ols, "data"), "hr ~ ln_h + year + g + C(lg)")
+ .fit()
+ .summary()
+ )
The pipe method is inspired by unix pipes and more recently dplyr_ and magrittr_, which
have introduced the popular ``(%>%)`` (read pipe) operator for R_.
@@ -858,8 +866,8 @@ The :meth:`~DataFrame.apply` method will also dispatch on a string method name.
.. ipython:: python
- df.apply('mean')
- df.apply('mean', axis=1)
+ df.apply("mean")
+ df.apply("mean", axis=1)
The return type of the function passed to :meth:`~DataFrame.apply` affects the
type of the final output from ``DataFrame.apply`` for the default behaviour:
@@ -878,8 +886,11 @@ maximum value for each column occurred:
.. ipython:: python
- tsdf = pd.DataFrame(np.random.randn(1000, 3), columns=['A', 'B', 'C'],
- index=pd.date_range('1/1/2000', periods=1000))
+ tsdf = pd.DataFrame(
+ np.random.randn(1000, 3),
+ columns=["A", "B", "C"],
+ index=pd.date_range("1/1/2000", periods=1000),
+ )
tsdf.apply(lambda x: x.idxmax())
You may also pass additional arguments and keyword arguments to the :meth:`~DataFrame.apply`
@@ -902,8 +913,11 @@ Series operation on each column or row:
.. ipython:: python
:suppress:
- tsdf = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'],
- index=pd.date_range('1/1/2000', periods=10))
+ tsdf = pd.DataFrame(
+ np.random.randn(10, 3),
+ columns=["A", "B", "C"],
+ index=pd.date_range("1/1/2000", periods=10),
+ )
tsdf.iloc[3:7] = np.nan
.. ipython:: python
@@ -933,8 +947,11 @@ We will use a similar starting frame from above:
.. ipython:: python
- tsdf = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'],
- index=pd.date_range('1/1/2000', periods=10))
+ tsdf = pd.DataFrame(
+ np.random.randn(10, 3),
+ columns=["A", "B", "C"],
+ index=pd.date_range("1/1/2000", periods=10),
+ )
tsdf.iloc[3:7] = np.nan
tsdf
@@ -946,7 +963,7 @@ output:
tsdf.agg(np.sum)
- tsdf.agg('sum')
+ tsdf.agg("sum")
# these are equivalent to a ``.sum()`` because we are aggregating
# on a single function
@@ -956,7 +973,7 @@ Single aggregations on a ``Series`` this will return a scalar value:
.. ipython:: python
- tsdf['A'].agg('sum')
+ tsdf["A"].agg("sum")
Aggregating with multiple functions
@@ -968,25 +985,25 @@ These are naturally named from the aggregation function.
.. ipython:: python
- tsdf.agg(['sum'])
+ tsdf.agg(["sum"])
Multiple functions yield multiple rows:
.. ipython:: python
- tsdf.agg(['sum', 'mean'])
+ tsdf.agg(["sum", "mean"])
On a ``Series``, multiple functions return a ``Series``, indexed by the function names:
.. ipython:: python
- tsdf['A'].agg(['sum', 'mean'])
+ tsdf["A"].agg(["sum", "mean"])
Passing a ``lambda`` function will yield a ``<lambda>`` named row:
.. ipython:: python
- tsdf['A'].agg(['sum', lambda x: x.mean()])
+ tsdf["A"].agg(["sum", lambda x: x.mean()])
Passing a named function will yield that name for the row:
@@ -995,7 +1012,8 @@ Passing a named function will yield that name for the row:
def mymean(x):
return x.mean()
- tsdf['A'].agg(['sum', mymean])
+
+ tsdf["A"].agg(["sum", mymean])
Aggregating with a dict
+++++++++++++++++++++++
@@ -1006,7 +1024,7 @@ are not in any particular order, you can use an ``OrderedDict`` instead to guara
.. ipython:: python
- tsdf.agg({'A': 'mean', 'B': 'sum'})
+ tsdf.agg({"A": "mean", "B": "sum"})
Passing a list-like will generate a ``DataFrame`` output. You will get a matrix-like output
of all of the aggregators. The output will consist of all unique functions. Those that are
@@ -1014,7 +1032,7 @@ not noted for a particular column will be ``NaN``:
.. ipython:: python
- tsdf.agg({'A': ['mean', 'min'], 'B': 'sum'})
+ tsdf.agg({"A": ["mean", "min"], "B": "sum"})
.. _basics.aggregation.mixed_string:
@@ -1026,15 +1044,19 @@ aggregations. This is similar to how ``.groupby.agg`` works.
.. ipython:: python
- mdf = pd.DataFrame({'A': [1, 2, 3],
- 'B': [1., 2., 3.],
- 'C': ['foo', 'bar', 'baz'],
- 'D': pd.date_range('20130101', periods=3)})
+ mdf = pd.DataFrame(
+ {
+ "A": [1, 2, 3],
+ "B": [1.0, 2.0, 3.0],
+ "C": ["foo", "bar", "baz"],
+ "D": pd.date_range("20130101", periods=3),
+ }
+ )
mdf.dtypes
.. ipython:: python
- mdf.agg(['min', 'sum'])
+ mdf.agg(["min", "sum"])
.. _basics.aggregation.custom_describe:
@@ -1049,11 +1071,11 @@ to the built in :ref:`describe function <basics.describe>`.
from functools import partial
q_25 = partial(pd.Series.quantile, q=0.25)
- q_25.__name__ = '25%'
+ q_25.__name__ = "25%"
q_75 = partial(pd.Series.quantile, q=0.75)
- q_75.__name__ = '75%'
+ q_75.__name__ = "75%"
- tsdf.agg(['count', 'mean', 'std', 'min', q_25, 'median', q_75, 'max'])
+ tsdf.agg(["count", "mean", "std", "min", q_25, "median", q_75, "max"])
.. _basics.transform:
@@ -1068,8 +1090,11 @@ We create a frame similar to the one used in the above sections.
.. ipython:: python
- tsdf = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'],
- index=pd.date_range('1/1/2000', periods=10))
+ tsdf = pd.DataFrame(
+ np.random.randn(10, 3),
+ columns=["A", "B", "C"],
+ index=pd.date_range("1/1/2000", periods=10),
+ )
tsdf.iloc[3:7] = np.nan
tsdf
@@ -1080,7 +1105,7 @@ function name or a user defined function.
:okwarning:
tsdf.transform(np.abs)
- tsdf.transform('abs')
+ tsdf.transform("abs")
tsdf.transform(lambda x: x.abs())
Here :meth:`~DataFrame.transform` received a single function; this is equivalent to a `ufunc
@@ -1094,7 +1119,7 @@ Passing a single function to ``.transform()`` with a ``Series`` will yield a sin
.. ipython:: python
- tsdf['A'].transform(np.abs)
+ tsdf["A"].transform(np.abs)
Transform with multiple functions
@@ -1113,7 +1138,7 @@ resulting column names will be the transforming functions.
.. ipython:: python
- tsdf['A'].transform([np.abs, lambda x: x + 1])
+ tsdf["A"].transform([np.abs, lambda x: x + 1])
Transforming with a dict
@@ -1124,7 +1149,7 @@ Passing a dict of functions will allow selective transforming per column.
.. ipython:: python
- tsdf.transform({'A': np.abs, 'B': lambda x: x + 1})
+ tsdf.transform({"A": np.abs, "B": lambda x: x + 1})
Passing a dict of lists will generate a MultiIndexed DataFrame with these
selective transforms.
@@ -1132,7 +1157,7 @@ selective transforms.
.. ipython:: python
:okwarning:
- tsdf.transform({'A': np.abs, 'B': [lambda x: x + 1, 'sqrt']})
+ tsdf.transform({"A": np.abs, "B": [lambda x: x + 1, "sqrt"]})
.. _basics.elementwise:
@@ -1153,10 +1178,12 @@ a single value and returning a single value. For example:
df4
+
def f(x):
return len(str(x))
- df4['one'].map(f)
+
+ df4["one"].map(f)
df4.applymap(f)
:meth:`Series.map` has an additional feature; it can be used to easily
@@ -1165,9 +1192,8 @@ to :ref:`merging/joining functionality <merging>`:
.. ipython:: python
- s = pd.Series(['six', 'seven', 'six', 'seven', 'six'],
- index=['a', 'b', 'c', 'd', 'e'])
- t = pd.Series({'six': 6., 'seven': 7.})
+ s = pd.Series(["six", "seven", "six", "seven", "six"], index=["a", "b", "c", "d", "e"])
+ t = pd.Series({"six": 6.0, "seven": 7.0})
s
s.map(t)
@@ -1192,9 +1218,9 @@ Here is a simple example:
.. ipython:: python
- s = pd.Series(np.random.randn(5), index=['a', 'b', 'c', 'd', 'e'])
+ s = pd.Series(np.random.randn(5), index=["a", "b", "c", "d", "e"])
s
- s.reindex(['e', 'b', 'f', 'd'])
+ s.reindex(["e", "b", "f", "d"])
Here, the ``f`` label was not contained in the Series and hence appears as
``NaN`` in the result.
@@ -1204,13 +1230,13 @@ With a DataFrame, you can simultaneously reindex the index and columns:
.. ipython:: python
df
- df.reindex(index=['c', 'f', 'b'], columns=['three', 'two', 'one'])
+ df.reindex(index=["c", "f", "b"], columns=["three", "two", "one"])
You may also use ``reindex`` with an ``axis`` keyword:
.. ipython:: python
- df.reindex(['c', 'f', 'b'], axis='index')
+ df.reindex(["c", "f", "b"], axis="index")
Note that the ``Index`` objects containing the actual axis labels can be
**shared** between objects. So if we have a Series and a DataFrame, the
@@ -1230,8 +1256,8 @@ where you specify a single ``labels`` argument and the ``axis`` it applies to.
.. ipython:: python
- df.reindex(['c', 'f', 'b'], axis='index')
- df.reindex(['three', 'two', 'one'], axis='columns')
+ df.reindex(["c", "f", "b"], axis="index")
+ df.reindex(["three", "two", "one"], axis="columns")
.. seealso::
@@ -1261,7 +1287,7 @@ available to make this simpler:
.. ipython:: python
:suppress:
- df2 = df.reindex(['a', 'b', 'c'], columns=['one', 'two'])
+ df2 = df.reindex(["a", "b", "c"], columns=["one", "two"])
df3 = df2 - df2.mean()
@@ -1288,12 +1314,12 @@ It returns a tuple with both of the reindexed Series:
.. ipython:: python
- s = pd.Series(np.random.randn(5), index=['a', 'b', 'c', 'd', 'e'])
+ s = pd.Series(np.random.randn(5), index=["a", "b", "c", "d", "e"])
s1 = s[:4]
s2 = s[1:]
s1.align(s2)
- s1.align(s2, join='inner')
- s1.align(s2, join='left')
+ s1.align(s2, join="inner")
+ s1.align(s2, join="left")
.. _basics.df_join:
@@ -1302,13 +1328,13 @@ columns by default:
.. ipython:: python
- df.align(df2, join='inner')
+ df.align(df2, join="inner")
You can also pass an ``axis`` option to only align on the specified axis:
.. ipython:: python
- df.align(df2, join='inner', axis=0)
+ df.align(df2, join="inner", axis=0)
.. _basics.align.frame.series:
@@ -1339,16 +1365,16 @@ We illustrate these fill methods on a simple Series:
.. ipython:: python
- rng = pd.date_range('1/3/2000', periods=8)
+ rng = pd.date_range("1/3/2000", periods=8)
ts = pd.Series(np.random.randn(8), index=rng)
ts2 = ts[[0, 3, 6]]
ts
ts2
ts2.reindex(ts.index)
- ts2.reindex(ts.index, method='ffill')
- ts2.reindex(ts.index, method='bfill')
- ts2.reindex(ts.index, method='nearest')
+ ts2.reindex(ts.index, method="ffill")
+ ts2.reindex(ts.index, method="bfill")
+ ts2.reindex(ts.index, method="nearest")
These methods require that the indexes are **ordered** increasing or
decreasing.
@@ -1359,7 +1385,7 @@ Note that the same result could have been achieved using
.. ipython:: python
- ts2.reindex(ts.index).fillna(method='ffill')
+ ts2.reindex(ts.index).fillna(method="ffill")
:meth:`~Series.reindex` will raise a ValueError if the index is not monotonically
increasing or decreasing. :meth:`~Series.fillna` and :meth:`~Series.interpolate`
@@ -1376,14 +1402,14 @@ matches:
.. ipython:: python
- ts2.reindex(ts.index, method='ffill', limit=1)
+ ts2.reindex(ts.index, method="ffill", limit=1)
In contrast, tolerance specifies the maximum distance between the index and
indexer values:
.. ipython:: python
- ts2.reindex(ts.index, method='ffill', tolerance='1 day')
+ ts2.reindex(ts.index, method="ffill", tolerance="1 day")
Notice that when used on a ``DatetimeIndex``, ``TimedeltaIndex`` or
``PeriodIndex``, ``tolerance`` will coerced into a ``Timedelta`` if possible.
@@ -1400,14 +1426,14 @@ It removes a set of labels from an axis:
.. ipython:: python
df
- df.drop(['a', 'd'], axis=0)
- df.drop(['one'], axis=1)
+ df.drop(["a", "d"], axis=0)
+ df.drop(["one"], axis=1)
Note that the following also works, but is a bit less obvious / clean:
.. ipython:: python
- df.reindex(df.index.difference(['a', 'd']))
+ df.reindex(df.index.difference(["a", "d"]))
.. _basics.rename:
@@ -1428,8 +1454,10 @@ Series can also be used:
.. ipython:: python
- df.rename(columns={'one': 'foo', 'two': 'bar'},
- index={'a': 'apple', 'b': 'banana', 'd': 'durian'})
+ df.rename(
+ columns={"one": "foo", "two": "bar"},
+ index={"a": "apple", "b": "banana", "d": "durian"},
+ )
If the mapping doesn't include a column/index label, it isn't renamed. Note that
extra labels in the mapping don't throw an error.
@@ -1439,8 +1467,8 @@ you specify a single ``mapper`` and the ``axis`` to apply that mapping to.
.. ipython:: python
- df.rename({'one': 'foo', 'two': 'bar'}, axis='columns')
- df.rename({'a': 'apple', 'b': 'banana', 'd': 'durian'}, axis='index')
+ df.rename({"one": "foo", "two": "bar"}, axis="columns")
+ df.rename({"a": "apple", "b": "banana", "d": "durian"}, axis="index")
The :meth:`~DataFrame.rename` method also provides an ``inplace`` named
@@ -1464,12 +1492,12 @@ labels).
.. ipython:: python
- df = pd.DataFrame({'x': [1, 2, 3, 4, 5, 6],
- 'y': [10, 20, 30, 40, 50, 60]},
- index=pd.MultiIndex.from_product([['a', 'b', 'c'], [1, 2]],
- names=['let', 'num']))
+ df = pd.DataFrame(
+ {"x": [1, 2, 3, 4, 5, 6], "y": [10, 20, 30, 40, 50, 60]},
+ index=pd.MultiIndex.from_product([["a", "b", "c"], [1, 2]], names=["let", "num"]),
+ )
df
- df.rename_axis(index={'let': 'abc'})
+ df.rename_axis(index={"let": "abc"})
df.rename_axis(index=str.upper)
.. _basics.iteration:
@@ -1491,8 +1519,9 @@ Thus, for example, iterating over a DataFrame gives you the column names:
.. ipython:: python
- df = pd.DataFrame({'col1': np.random.randn(3),
- 'col2': np.random.randn(3)}, index=['a', 'b', 'c'])
+ df = pd.DataFrame(
+ {"col1": np.random.randn(3), "col2": np.random.randn(3)}, index=["a", "b", "c"]
+ )
for col in df:
print(col)
@@ -1540,10 +1569,10 @@ To iterate over the rows of a DataFrame, you can use the following methods:
.. ipython:: python
- df = pd.DataFrame({'a': [1, 2, 3], 'b': ['a', 'b', 'c']})
+ df = pd.DataFrame({"a": [1, 2, 3], "b": ["a", "b", "c"]})
for index, row in df.iterrows():
- row['a'] = 10
+ row["a"] = 10
df
@@ -1576,7 +1605,7 @@ index value along with a Series containing the data in each row:
.. ipython:: python
for row_index, row in df.iterrows():
- print(row_index, row, sep='\n')
+ print(row_index, row, sep="\n")
.. note::
@@ -1586,7 +1615,7 @@ index value along with a Series containing the data in each row:
.. ipython:: python
- df_orig = pd.DataFrame([[1, 1.5]], columns=['int', 'float'])
+ df_orig = pd.DataFrame([[1, 1.5]], columns=["int", "float"])
df_orig.dtypes
row = next(df_orig.iterrows())[1]
row
@@ -1596,8 +1625,8 @@ index value along with a Series containing the data in each row:
.. ipython:: python
- row['int'].dtype
- df_orig['int'].dtype
+ row["int"].dtype
+ df_orig["int"].dtype
To preserve dtypes while iterating over the rows, it is better
to use :meth:`~DataFrame.itertuples` which returns namedtuples of the values
@@ -1607,7 +1636,7 @@ For instance, a contrived way to transpose the DataFrame would be:
.. ipython:: python
- df2 = pd.DataFrame({'x': [1, 2, 3], 'y': [4, 5, 6]})
+ df2 = pd.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
print(df2)
print(df2.T)
@@ -1652,7 +1681,7 @@ This will return a Series, indexed like the existing Series.
.. ipython:: python
# datetime
- s = pd.Series(pd.date_range('20130101 09:10:12', periods=4))
+ s = pd.Series(pd.date_range("20130101 09:10:12", periods=4))
s
s.dt.hour
s.dt.second
@@ -1668,7 +1697,7 @@ You can easily produces tz aware transformations:
.. ipython:: python
- stz = s.dt.tz_localize('US/Eastern')
+ stz = s.dt.tz_localize("US/Eastern")
stz
stz.dt.tz
@@ -1676,7 +1705,7 @@ You can also chain these types of operations:
.. ipython:: python
- s.dt.tz_localize('UTC').dt.tz_convert('US/Eastern')
+ s.dt.tz_localize("UTC").dt.tz_convert("US/Eastern")
You can also format datetime values as strings with :meth:`Series.dt.strftime` which
supports the same format as the standard :meth:`~datetime.datetime.strftime`.
@@ -1684,23 +1713,23 @@ supports the same format as the standard :meth:`~datetime.datetime.strftime`.
.. ipython:: python
# DatetimeIndex
- s = pd.Series(pd.date_range('20130101', periods=4))
+ s = pd.Series(pd.date_range("20130101", periods=4))
s
- s.dt.strftime('%Y/%m/%d')
+ s.dt.strftime("%Y/%m/%d")
.. ipython:: python
# PeriodIndex
- s = pd.Series(pd.period_range('20130101', periods=4))
+ s = pd.Series(pd.period_range("20130101", periods=4))
s
- s.dt.strftime('%Y/%m/%d')
+ s.dt.strftime("%Y/%m/%d")
The ``.dt`` accessor works for period and timedelta dtypes.
.. ipython:: python
# period
- s = pd.Series(pd.period_range('20130101', periods=4, freq='D'))
+ s = pd.Series(pd.period_range("20130101", periods=4, freq="D"))
s
s.dt.year
s.dt.day
@@ -1708,7 +1737,7 @@ The ``.dt`` accessor works for period and timedelta dtypes.
.. ipython:: python
# timedelta
- s = pd.Series(pd.timedelta_range('1 day 00:00:05', periods=4, freq='s'))
+ s = pd.Series(pd.timedelta_range("1 day 00:00:05", periods=4, freq="s"))
s
s.dt.days
s.dt.seconds
@@ -1729,8 +1758,9 @@ built-in string methods. For example:
.. ipython:: python
- s = pd.Series(['A', 'B', 'C', 'Aaba', 'Baca', np.nan, 'CABA', 'dog', 'cat'],
- dtype="string")
+ s = pd.Series(
+ ["A", "B", "C", "Aaba", "Baca", np.nan, "CABA", "dog", "cat"], dtype="string"
+ )
s.str.lower()
Powerful pattern-matching methods are provided as well, but note that
@@ -1765,13 +1795,15 @@ used to sort a pandas object by its index levels.
.. ipython:: python
- df = pd.DataFrame({
- 'one': pd.Series(np.random.randn(3), index=['a', 'b', 'c']),
- 'two': pd.Series(np.random.randn(4), index=['a', 'b', 'c', 'd']),
- 'three': pd.Series(np.random.randn(3), index=['b', 'c', 'd'])})
+ df = pd.DataFrame(
+ {
+ "one": pd.Series(np.random.randn(3), index=["a", "b", "c"]),
+ "two": pd.Series(np.random.randn(4), index=["a", "b", "c", "d"]),
+ "three": pd.Series(np.random.randn(3), index=["b", "c", "d"]),
+ }
+ )
- unsorted_df = df.reindex(index=['a', 'd', 'c', 'b'],
- columns=['three', 'two', 'one'])
+ unsorted_df = df.reindex(index=["a", "d", "c", "b"], columns=["three", "two", "one"])
unsorted_df
# DataFrame
@@ -1780,7 +1812,7 @@ used to sort a pandas object by its index levels.
unsorted_df.sort_index(axis=1)
# Series
- unsorted_df['three'].sort_index()
+ unsorted_df["three"].sort_index()
.. _basics.sort_index_key:
@@ -1792,11 +1824,9 @@ the key is applied per-level to the levels specified by ``level``.
.. ipython:: python
- s1 = pd.DataFrame({
- "a": ['B', 'a', 'C'],
- "b": [1, 2, 3],
- "c": [2, 3, 4]
- }).set_index(list("ab"))
+ s1 = pd.DataFrame({"a": ["B", "a", "C"], "b": [1, 2, 3], "c": [2, 3, 4]}).set_index(
+ list("ab")
+ )
s1
.. ipython:: python
@@ -1819,16 +1849,14 @@ to use to determine the sorted order.
.. ipython:: python
- df1 = pd.DataFrame({'one': [2, 1, 1, 1],
- 'two': [1, 3, 2, 4],
- 'three': [5, 4, 3, 2]})
- df1.sort_values(by='two')
+ df1 = pd.DataFrame({"one": [2, 1, 1, 1], "two": [1, 3, 2, 4], "three": [5, 4, 3, 2]})
+ df1.sort_values(by="two")
The ``by`` parameter can take a list of column names, e.g.:
.. ipython:: python
- df1[['one', 'two', 'three']].sort_values(by=['one', 'two'])
+ df1[["one", "two", "three"]].sort_values(by=["one", "two"])
These methods have special treatment of NA values via the ``na_position``
argument:
@@ -1837,7 +1865,7 @@ argument:
s[2] = np.nan
s.sort_values()
- s.sort_values(na_position='first')
+ s.sort_values(na_position="first")
.. _basics.sort_value_key:
@@ -1848,7 +1876,7 @@ to apply to the values being sorted.
.. ipython:: python
- s1 = pd.Series(['B', 'a', 'C'])
+ s1 = pd.Series(["B", "a", "C"])
.. ipython:: python
@@ -1862,12 +1890,12 @@ a Series, e.g.
.. ipython:: python
- df = pd.DataFrame({"a": ['B', 'a', 'C'], "b": [1, 2, 3]})
+ df = pd.DataFrame({"a": ["B", "a", "C"], "b": [1, 2, 3]})
.. ipython:: python
- df.sort_values(by='a')
- df.sort_values(by='a', key=lambda col: col.str.lower())
+ df.sort_values(by="a")
+ df.sort_values(by="a", key=lambda col: col.str.lower())
The name or type of each column can be used to apply different functions to
different columns.
@@ -1883,20 +1911,20 @@ refer to either columns or index level names.
.. ipython:: python
# Build MultiIndex
- idx = pd.MultiIndex.from_tuples([('a', 1), ('a', 2), ('a', 2),
- ('b', 2), ('b', 1), ('b', 1)])
- idx.names = ['first', 'second']
+ idx = pd.MultiIndex.from_tuples(
+ [("a", 1), ("a", 2), ("a", 2), ("b", 2), ("b", 1), ("b", 1)]
+ )
+ idx.names = ["first", "second"]
# Build DataFrame
- df_multi = pd.DataFrame({'A': np.arange(6, 0, -1)},
- index=idx)
+ df_multi = pd.DataFrame({"A": np.arange(6, 0, -1)}, index=idx)
df_multi
Sort by 'second' (index) and 'A' (column)
.. ipython:: python
- df_multi.sort_values(by=['second', 'A'])
+ df_multi.sort_values(by=["second", "A"])
.. note::
@@ -1917,8 +1945,8 @@ Series has the :meth:`~Series.searchsorted` method, which works similarly to
ser = pd.Series([1, 2, 3])
ser.searchsorted([0, 3])
ser.searchsorted([0, 4])
- ser.searchsorted([1, 3], side='right')
- ser.searchsorted([1, 3], side='left')
+ ser.searchsorted([1, 3], side="right")
+ ser.searchsorted([1, 3], side="left")
ser = pd.Series([3, 1, 2])
ser.searchsorted([0, 3], sorter=np.argsort(ser))
@@ -1943,13 +1971,17 @@ faster than sorting the entire Series and calling ``head(n)`` on the result.
.. ipython:: python
- df = pd.DataFrame({'a': [-2, -1, 1, 10, 8, 11, -1],
- 'b': list('abdceff'),
- 'c': [1.0, 2.0, 4.0, 3.2, np.nan, 3.0, 4.0]})
- df.nlargest(3, 'a')
- df.nlargest(5, ['a', 'c'])
- df.nsmallest(3, 'a')
- df.nsmallest(5, ['a', 'c'])
+ df = pd.DataFrame(
+ {
+ "a": [-2, -1, 1, 10, 8, 11, -1],
+ "b": list("abdceff"),
+ "c": [1.0, 2.0, 4.0, 3.2, np.nan, 3.0, 4.0],
+ }
+ )
+ df.nlargest(3, "a")
+ df.nlargest(5, ["a", "c"])
+ df.nsmallest(3, "a")
+ df.nsmallest(5, ["a", "c"])
.. _basics.multiindex_sorting:
@@ -1962,10 +1994,8 @@ all levels to ``by``.
.. ipython:: python
- df1.columns = pd.MultiIndex.from_tuples([('a', 'one'),
- ('a', 'two'),
- ('b', 'three')])
- df1.sort_values(by=('a', 'two'))
+ df1.columns = pd.MultiIndex.from_tuples([("a", "one"), ("a", "two"), ("b", "three")])
+ df1.sort_values(by=("a", "two"))
Copying
@@ -2048,13 +2078,17 @@ with the data type of each column.
.. ipython:: python
- dft = pd.DataFrame({'A': np.random.rand(3),
- 'B': 1,
- 'C': 'foo',
- 'D': pd.Timestamp('20010102'),
- 'E': pd.Series([1.0] * 3).astype('float32'),
- 'F': False,
- 'G': pd.Series([1] * 3, dtype='int8')})
+ dft = pd.DataFrame(
+ {
+ "A": np.random.rand(3),
+ "B": 1,
+ "C": "foo",
+ "D": pd.Timestamp("20010102"),
+ "E": pd.Series([1.0] * 3).astype("float32"),
+ "F": False,
+ "G": pd.Series([1] * 3, dtype="int8"),
+ }
+ )
dft
dft.dtypes
@@ -2062,7 +2096,7 @@ On a ``Series`` object, use the :attr:`~Series.dtype` attribute.
.. ipython:: python
- dft['A'].dtype
+ dft["A"].dtype
If a pandas object contains data with multiple dtypes *in a single column*, the
dtype of the column will be chosen to accommodate all of the data types
@@ -2071,10 +2105,10 @@ dtype of the column will be chosen to accommodate all of the data types
.. ipython:: python
# these ints are coerced to floats
- pd.Series([1, 2, 3, 4, 5, 6.])
+ pd.Series([1, 2, 3, 4, 5, 6.0])
# string data forces an ``object`` dtype
- pd.Series([1, 2, 3, 6., 'foo'])
+ pd.Series([1, 2, 3, 6.0, "foo"])
The number of columns of each type in a ``DataFrame`` can be found by calling
``DataFrame.dtypes.value_counts()``.
@@ -2090,13 +2124,16 @@ different numeric dtypes will **NOT** be combined. The following example will gi
.. ipython:: python
- df1 = pd.DataFrame(np.random.randn(8, 1), columns=['A'], dtype='float32')
+ df1 = pd.DataFrame(np.random.randn(8, 1), columns=["A"], dtype="float32")
df1
df1.dtypes
- df2 = pd.DataFrame({'A': pd.Series(np.random.randn(8), dtype='float16'),
- 'B': pd.Series(np.random.randn(8)),
- 'C': pd.Series(np.array(np.random.randn(8),
- dtype='uint8'))})
+ df2 = pd.DataFrame(
+ {
+ "A": pd.Series(np.random.randn(8), dtype="float16"),
+ "B": pd.Series(np.random.randn(8)),
+ "C": pd.Series(np.array(np.random.randn(8), dtype="uint8")),
+ }
+ )
df2
df2.dtypes
@@ -2109,9 +2146,9 @@ The following will all result in ``int64`` dtypes.
.. ipython:: python
- pd.DataFrame([1, 2], columns=['a']).dtypes
- pd.DataFrame({'a': [1, 2]}).dtypes
- pd.DataFrame({'a': 1}, index=list(range(2))).dtypes
+ pd.DataFrame([1, 2], columns=["a"]).dtypes
+ pd.DataFrame({"a": [1, 2]}).dtypes
+ pd.DataFrame({"a": 1}, index=list(range(2))).dtypes
Note that Numpy will choose *platform-dependent* types when creating arrays.
The following **WILL** result in ``int32`` on 32-bit platform.
@@ -2159,15 +2196,15 @@ then the more *general* one will be used as the result of the operation.
df3.dtypes
# conversion of dtypes
- df3.astype('float32').dtypes
+ df3.astype("float32").dtypes
Convert a subset of columns to a specified type using :meth:`~DataFrame.astype`.
.. ipython:: python
- dft = pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6], 'c': [7, 8, 9]})
- dft[['a', 'b']] = dft[['a', 'b']].astype(np.uint8)
+ dft = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
+ dft[["a", "b"]] = dft[["a", "b"]].astype(np.uint8)
dft
dft.dtypes
@@ -2175,8 +2212,8 @@ Convert certain columns to a specific dtype by passing a dict to :meth:`~DataFra
.. ipython:: python
- dft1 = pd.DataFrame({'a': [1, 0, 1], 'b': [4, 5, 6], 'c': [7, 8, 9]})
- dft1 = dft1.astype({'a': np.bool, 'c': np.float64})
+ dft1 = pd.DataFrame({"a": [1, 0, 1], "b": [4, 5, 6], "c": [7, 8, 9]})
+ dft1 = dft1.astype({"a": np.bool, "c": np.float64})
dft1
dft1.dtypes
@@ -2188,9 +2225,9 @@ Convert certain columns to a specific dtype by passing a dict to :meth:`~DataFra
.. ipython:: python
- dft = pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6], 'c': [7, 8, 9]})
- dft.loc[:, ['a', 'b']].astype(np.uint8).dtypes
- dft.loc[:, ['a', 'b']] = dft.loc[:, ['a', 'b']].astype(np.uint8)
+ dft = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
+ dft.loc[:, ["a", "b"]].astype(np.uint8).dtypes
+ dft.loc[:, ["a", "b"]] = dft.loc[:, ["a", "b"]].astype(np.uint8)
dft.dtypes
.. _basics.object_conversion:
@@ -2206,10 +2243,10 @@ to the correct type.
.. ipython:: python
import datetime
- df = pd.DataFrame([[1, 2],
- ['a', 'b'],
- [datetime.datetime(2016, 3, 2),
- datetime.datetime(2016, 3, 2)]])
+
+ df = pd.DataFrame(
+ [[1, 2], ["a", "b"], [datetime.datetime(2016, 3, 2), datetime.datetime(2016, 3, 2)]]
+ )
df = df.T
df
df.dtypes
@@ -2228,7 +2265,7 @@ hard conversion of objects to a specified type:
.. ipython:: python
- m = ['1.1', 2, 3]
+ m = ["1.1", 2, 3]
pd.to_numeric(m)
* :meth:`~pandas.to_datetime` (conversion to datetime objects)
@@ -2236,14 +2273,15 @@ hard conversion of objects to a specified type:
.. ipython:: python
import datetime
- m = ['2016-07-09', datetime.datetime(2016, 3, 2)]
+
+ m = ["2016-07-09", datetime.datetime(2016, 3, 2)]
pd.to_datetime(m)
* :meth:`~pandas.to_timedelta` (conversion to timedelta objects)
.. ipython:: python
- m = ['5us', pd.Timedelta('1day')]
+ m = ["5us", pd.Timedelta("1day")]
pd.to_timedelta(m)
To force a conversion, we can pass in an ``errors`` argument, which specifies how pandas should deal with elements
@@ -2256,14 +2294,15 @@ non-conforming elements intermixed that you want to represent as missing:
.. ipython:: python
import datetime
- m = ['apple', datetime.datetime(2016, 3, 2)]
- pd.to_datetime(m, errors='coerce')
- m = ['apple', 2, 3]
- pd.to_numeric(m, errors='coerce')
+ m = ["apple", datetime.datetime(2016, 3, 2)]
+ pd.to_datetime(m, errors="coerce")
- m = ['apple', pd.Timedelta('1day')]
- pd.to_timedelta(m, errors='coerce')
+ m = ["apple", 2, 3]
+ pd.to_numeric(m, errors="coerce")
+
+ m = ["apple", pd.Timedelta("1day")]
+ pd.to_timedelta(m, errors="coerce")
The ``errors`` parameter has a third option of ``errors='ignore'``, which will simply return the passed in data if it
encounters any errors with the conversion to a desired data type:
@@ -2271,25 +2310,26 @@ encounters any errors with the conversion to a desired data type:
.. ipython:: python
import datetime
- m = ['apple', datetime.datetime(2016, 3, 2)]
- pd.to_datetime(m, errors='ignore')
- m = ['apple', 2, 3]
- pd.to_numeric(m, errors='ignore')
+ m = ["apple", datetime.datetime(2016, 3, 2)]
+ pd.to_datetime(m, errors="ignore")
+
+ m = ["apple", 2, 3]
+ pd.to_numeric(m, errors="ignore")
- m = ['apple', pd.Timedelta('1day')]
- pd.to_timedelta(m, errors='ignore')
+ m = ["apple", pd.Timedelta("1day")]
+ pd.to_timedelta(m, errors="ignore")
In addition to object conversion, :meth:`~pandas.to_numeric` provides another argument ``downcast``, which gives the
option of downcasting the newly (or already) numeric data to a smaller dtype, which can conserve memory:
.. ipython:: python
- m = ['1', 2, 3]
- pd.to_numeric(m, downcast='integer') # smallest signed int dtype
- pd.to_numeric(m, downcast='signed') # same as 'integer'
- pd.to_numeric(m, downcast='unsigned') # smallest unsigned int dtype
- pd.to_numeric(m, downcast='float') # smallest float dtype
+ m = ["1", 2, 3]
+ pd.to_numeric(m, downcast="integer") # smallest signed int dtype
+ pd.to_numeric(m, downcast="signed") # same as 'integer'
+ pd.to_numeric(m, downcast="unsigned") # smallest unsigned int dtype
+ pd.to_numeric(m, downcast="float") # smallest float dtype
As these methods apply only to one-dimensional arrays, lists or scalars; they cannot be used directly on multi-dimensional objects such
as DataFrames. However, with :meth:`~pandas.DataFrame.apply`, we can "apply" the function over each column efficiently:
@@ -2297,16 +2337,16 @@ as DataFrames. However, with :meth:`~pandas.DataFrame.apply`, we can "apply" the
.. ipython:: python
import datetime
- df = pd.DataFrame([
- ['2016-07-09', datetime.datetime(2016, 3, 2)]] * 2, dtype='O')
+
+ df = pd.DataFrame([["2016-07-09", datetime.datetime(2016, 3, 2)]] * 2, dtype="O")
df
df.apply(pd.to_datetime)
- df = pd.DataFrame([['1.1', 2, 3]] * 2, dtype='O')
+ df = pd.DataFrame([["1.1", 2, 3]] * 2, dtype="O")
df
df.apply(pd.to_numeric)
- df = pd.DataFrame([['5us', pd.Timedelta('1day')]] * 2, dtype='O')
+ df = pd.DataFrame([["5us", pd.Timedelta("1day")]] * 2, dtype="O")
df
df.apply(pd.to_timedelta)
@@ -2319,8 +2359,8 @@ See also :ref:`Support for integer NA <gotchas.intna>`.
.. ipython:: python
- dfi = df3.astype('int32')
- dfi['E'] = 1
+ dfi = df3.astype("int32")
+ dfi["E"] = 1
dfi
dfi.dtypes
@@ -2333,7 +2373,7 @@ While float dtypes are unchanged.
.. ipython:: python
dfa = df3.copy()
- dfa['A'] = dfa['A'].astype('float32')
+ dfa["A"] = dfa["A"].astype("float32")
dfa.dtypes
casted = dfa[df2 > 0]
@@ -2353,18 +2393,22 @@ dtypes:
.. ipython:: python
- df = pd.DataFrame({'string': list('abc'),
- 'int64': list(range(1, 4)),
- 'uint8': np.arange(3, 6).astype('u1'),
- 'float64': np.arange(4.0, 7.0),
- 'bool1': [True, False, True],
- 'bool2': [False, True, False],
- 'dates': pd.date_range('now', periods=3),
- 'category': pd.Series(list("ABC")).astype('category')})
- df['tdeltas'] = df.dates.diff()
- df['uint64'] = np.arange(3, 6).astype('u8')
- df['other_dates'] = pd.date_range('20130101', periods=3)
- df['tz_aware_dates'] = pd.date_range('20130101', periods=3, tz='US/Eastern')
+ df = pd.DataFrame(
+ {
+ "string": list("abc"),
+ "int64": list(range(1, 4)),
+ "uint8": np.arange(3, 6).astype("u1"),
+ "float64": np.arange(4.0, 7.0),
+ "bool1": [True, False, True],
+ "bool2": [False, True, False],
+ "dates": pd.date_range("now", periods=3),
+ "category": pd.Series(list("ABC")).astype("category"),
+ }
+ )
+ df["tdeltas"] = df.dates.diff()
+ df["uint64"] = np.arange(3, 6).astype("u8")
+ df["other_dates"] = pd.date_range("20130101", periods=3)
+ df["tz_aware_dates"] = pd.date_range("20130101", periods=3, tz="US/Eastern")
df
And the dtypes:
@@ -2388,7 +2432,7 @@ You can also pass the name of a dtype in the `NumPy dtype hierarchy
.. ipython:: python
- df.select_dtypes(include=['bool'])
+ df.select_dtypes(include=["bool"])
:meth:`~pandas.DataFrame.select_dtypes` also works with generic dtypes as well.
@@ -2397,13 +2441,13 @@ integers:
.. ipython:: python
- df.select_dtypes(include=['number', 'bool'], exclude=['unsignedinteger'])
+ df.select_dtypes(include=["number", "bool"], exclude=["unsignedinteger"])
To select string columns you must use the ``object`` dtype:
.. ipython:: python
- df.select_dtypes(include=['object'])
+ df.select_dtypes(include=["object"])
To see all the child dtypes of a generic ``dtype`` like ``numpy.number`` you
can define a function that returns a tree of child dtypes:
diff --git a/setup.cfg b/setup.cfg
index 73986f692b6cd..211e8ebede8b6 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -41,6 +41,7 @@ bootstrap =
pd # (in some cases we want to do it to show users)
ignore = E402, # module level import not at top of file
W503, # line break before binary operator
+ E203, # space before : (needed for how black formats slicing)
# Classes/functions in different blocks can generate those errors
E302, # expected 2 blank lines, found 0
E305, # expected 2 blank lines after class or function definition, found 0
| For task #36777
Ran blacken-tools on user_guide/10min.rst, user_guide/advanced.rst, basics.rst
| https://api.github.com/repos/pandas-dev/pandas/pulls/36802 | 2020-10-02T06:09:05Z | 2020-10-05T02:07:28Z | 2020-10-05T02:07:28Z | 2020-10-06T08:15:40Z |
CLN: Use more pytest idioms in test_momemts_ewm.py | diff --git a/pandas/tests/window/moments/test_moments_ewm.py b/pandas/tests/window/moments/test_moments_ewm.py
index a83bfabc4a048..287cd7ebba536 100644
--- a/pandas/tests/window/moments/test_moments_ewm.py
+++ b/pandas/tests/window/moments/test_moments_ewm.py
@@ -7,21 +7,19 @@
import pandas._testing as tm
-def check_ew(name=None, preserve_nan=False, series=None, frame=None, nan_locs=None):
+@pytest.mark.parametrize("name", ["var", "vol", "mean"])
+def test_ewma_series(series, name):
series_result = getattr(series.ewm(com=10), name)()
assert isinstance(series_result, Series)
- frame_result = getattr(frame.ewm(com=10), name)()
- assert type(frame_result) == DataFrame
-
- result = getattr(series.ewm(com=10), name)()
- if preserve_nan:
- assert result[nan_locs].isna().all()
+@pytest.mark.parametrize("name", ["var", "vol", "mean"])
+def test_ewma_frame(frame, name):
+ frame_result = getattr(frame.ewm(com=10), name)()
+ assert isinstance(frame_result, DataFrame)
-def test_ewma(series, frame, nan_locs):
- check_ew(name="mean", frame=frame, series=series, nan_locs=nan_locs)
+def test_ewma_adjust():
vals = pd.Series(np.zeros(1000))
vals[5] = 1
result = vals.ewm(span=100, adjust=False).mean().sum()
@@ -53,63 +51,153 @@ def test_ewma_nan_handling():
result = s.ewm(com=5).mean()
tm.assert_series_equal(result, Series([np.nan] * 2 + [1.0] * 4))
- # GH 7603
- s0 = Series([np.nan, 1.0, 101.0])
- s1 = Series([1.0, np.nan, 101.0])
- s2 = Series([np.nan, 1.0, np.nan, np.nan, 101.0, np.nan])
- s3 = Series([1.0, np.nan, 101.0, 50.0])
- com = 2.0
- alpha = 1.0 / (1.0 + com)
-
- def simple_wma(s, w):
- return (s.multiply(w).cumsum() / w.cumsum()).fillna(method="ffill")
-
- for (s, adjust, ignore_na, w) in [
- (s0, True, False, [np.nan, (1.0 - alpha), 1.0]),
- (s0, True, True, [np.nan, (1.0 - alpha), 1.0]),
- (s0, False, False, [np.nan, (1.0 - alpha), alpha]),
- (s0, False, True, [np.nan, (1.0 - alpha), alpha]),
- (s1, True, False, [(1.0 - alpha) ** 2, np.nan, 1.0]),
- (s1, True, True, [(1.0 - alpha), np.nan, 1.0]),
- (s1, False, False, [(1.0 - alpha) ** 2, np.nan, alpha]),
- (s1, False, True, [(1.0 - alpha), np.nan, alpha]),
- (s2, True, False, [np.nan, (1.0 - alpha) ** 3, np.nan, np.nan, 1.0, np.nan]),
- (s2, True, True, [np.nan, (1.0 - alpha), np.nan, np.nan, 1.0, np.nan]),
+
+@pytest.mark.parametrize(
+ "s, adjust, ignore_na, w",
+ [
+ (
+ Series([np.nan, 1.0, 101.0]),
+ True,
+ False,
+ [np.nan, (1.0 - (1.0 / (1.0 + 2.0))), 1.0],
+ ),
+ (
+ Series([np.nan, 1.0, 101.0]),
+ True,
+ True,
+ [np.nan, (1.0 - (1.0 / (1.0 + 2.0))), 1.0],
+ ),
+ (
+ Series([np.nan, 1.0, 101.0]),
+ False,
+ False,
+ [np.nan, (1.0 - (1.0 / (1.0 + 2.0))), (1.0 / (1.0 + 2.0))],
+ ),
+ (
+ Series([np.nan, 1.0, 101.0]),
+ False,
+ True,
+ [np.nan, (1.0 - (1.0 / (1.0 + 2.0))), (1.0 / (1.0 + 2.0))],
+ ),
+ (
+ Series([1.0, np.nan, 101.0]),
+ True,
+ False,
+ [(1.0 - (1.0 / (1.0 + 2.0))) ** 2, np.nan, 1.0],
+ ),
(
- s2,
+ Series([1.0, np.nan, 101.0]),
+ True,
+ True,
+ [(1.0 - (1.0 / (1.0 + 2.0))), np.nan, 1.0],
+ ),
+ (
+ Series([1.0, np.nan, 101.0]),
+ False,
False,
+ [(1.0 - (1.0 / (1.0 + 2.0))) ** 2, np.nan, (1.0 / (1.0 + 2.0))],
+ ),
+ (
+ Series([1.0, np.nan, 101.0]),
+ False,
+ True,
+ [(1.0 - (1.0 / (1.0 + 2.0))), np.nan, (1.0 / (1.0 + 2.0))],
+ ),
+ (
+ Series([np.nan, 1.0, np.nan, np.nan, 101.0, np.nan]),
+ True,
False,
- [np.nan, (1.0 - alpha) ** 3, np.nan, np.nan, alpha, np.nan],
+ [np.nan, (1.0 - (1.0 / (1.0 + 2.0))) ** 3, np.nan, np.nan, 1.0, np.nan],
+ ),
+ (
+ Series([np.nan, 1.0, np.nan, np.nan, 101.0, np.nan]),
+ True,
+ True,
+ [np.nan, (1.0 - (1.0 / (1.0 + 2.0))), np.nan, np.nan, 1.0, np.nan],
),
- (s2, False, True, [np.nan, (1.0 - alpha), np.nan, np.nan, alpha, np.nan]),
- (s3, True, False, [(1.0 - alpha) ** 3, np.nan, (1.0 - alpha), 1.0]),
- (s3, True, True, [(1.0 - alpha) ** 2, np.nan, (1.0 - alpha), 1.0]),
(
- s3,
+ Series([np.nan, 1.0, np.nan, np.nan, 101.0, np.nan]),
False,
False,
[
- (1.0 - alpha) ** 3,
np.nan,
- (1.0 - alpha) * alpha,
- alpha * ((1.0 - alpha) ** 2 + alpha),
+ (1.0 - (1.0 / (1.0 + 2.0))) ** 3,
+ np.nan,
+ np.nan,
+ (1.0 / (1.0 + 2.0)),
+ np.nan,
],
),
- (s3, False, True, [(1.0 - alpha) ** 2, np.nan, (1.0 - alpha) * alpha, alpha]),
- ]:
- expected = simple_wma(s, Series(w))
- result = s.ewm(com=com, adjust=adjust, ignore_na=ignore_na).mean()
+ (
+ Series([np.nan, 1.0, np.nan, np.nan, 101.0, np.nan]),
+ False,
+ True,
+ [
+ np.nan,
+ (1.0 - (1.0 / (1.0 + 2.0))),
+ np.nan,
+ np.nan,
+ (1.0 / (1.0 + 2.0)),
+ np.nan,
+ ],
+ ),
+ (
+ Series([1.0, np.nan, 101.0, 50.0]),
+ True,
+ False,
+ [
+ (1.0 - (1.0 / (1.0 + 2.0))) ** 3,
+ np.nan,
+ (1.0 - (1.0 / (1.0 + 2.0))),
+ 1.0,
+ ],
+ ),
+ (
+ Series([1.0, np.nan, 101.0, 50.0]),
+ True,
+ True,
+ [
+ (1.0 - (1.0 / (1.0 + 2.0))) ** 2,
+ np.nan,
+ (1.0 - (1.0 / (1.0 + 2.0))),
+ 1.0,
+ ],
+ ),
+ (
+ Series([1.0, np.nan, 101.0, 50.0]),
+ False,
+ False,
+ [
+ (1.0 - (1.0 / (1.0 + 2.0))) ** 3,
+ np.nan,
+ (1.0 - (1.0 / (1.0 + 2.0))) * (1.0 / (1.0 + 2.0)),
+ (1.0 / (1.0 + 2.0))
+ * ((1.0 - (1.0 / (1.0 + 2.0))) ** 2 + (1.0 / (1.0 + 2.0))),
+ ],
+ ),
+ (
+ Series([1.0, np.nan, 101.0, 50.0]),
+ False,
+ True,
+ [
+ (1.0 - (1.0 / (1.0 + 2.0))) ** 2,
+ np.nan,
+ (1.0 - (1.0 / (1.0 + 2.0))) * (1.0 / (1.0 + 2.0)),
+ (1.0 / (1.0 + 2.0)),
+ ],
+ ),
+ ],
+)
+def test_ewma_nan_handling_cases(s, adjust, ignore_na, w):
+ # GH 7603
+ expected = (s.multiply(w).cumsum() / Series(w).cumsum()).fillna(method="ffill")
+ result = s.ewm(com=2.0, adjust=adjust, ignore_na=ignore_na).mean()
+ tm.assert_series_equal(result, expected)
+ if ignore_na is False:
+ # check that ignore_na defaults to False
+ result = s.ewm(com=2.0, adjust=adjust).mean()
tm.assert_series_equal(result, expected)
- if ignore_na is False:
- # check that ignore_na defaults to False
- result = s.ewm(com=com, adjust=adjust).mean()
- tm.assert_series_equal(result, expected)
-
-
-@pytest.mark.parametrize("name", ["var", "vol"])
-def test_ewmvar_ewmvol(series, frame, nan_locs, name):
- check_ew(name=name, frame=frame, series=series, nan_locs=nan_locs)
def test_ewma_span_com_args(series):
| - [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
Use `pytest.mark.parameterize` and unnest helper test functions | https://api.github.com/repos/pandas-dev/pandas/pulls/36801 | 2020-10-02T02:34:31Z | 2020-10-02T20:24:07Z | 2020-10-02T20:24:07Z | 2020-10-02T20:27:26Z |
BUG: Fix FloatingArray output formatting | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 539275c7ff617..c1ddf2c29c5ae 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -2656,7 +2656,7 @@ def memory_usage(self, index=True, deep=False) -> Series:
Examples
--------
>>> dtypes = ['int64', 'float64', 'complex128', 'object', 'bool']
- >>> data = dict([(t, np.ones(shape=5000).astype(t))
+ >>> data = dict([(t, np.ones(shape=5000, dtype=int).astype(t))
... for t in dtypes])
>>> df = pd.DataFrame(data)
>>> df.head()
@@ -2691,7 +2691,7 @@ def memory_usage(self, index=True, deep=False) -> Series:
int64 40000
float64 40000
complex128 80000
- object 160000
+ object 180000
bool 5000
dtype: int64
@@ -2790,7 +2790,7 @@ def transpose(self, *args, copy: bool = False) -> DataFrame:
>>> df2_transposed
0 1
name Alice Bob
- score 9.5 8
+ score 9.5 8.0
employed False True
kids 0 0
diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py
index d234997ee670c..dcd91b3a12294 100644
--- a/pandas/io/formats/format.py
+++ b/pandas/io/formats/format.py
@@ -1311,7 +1311,7 @@ def _format_strings(self) -> List[str]:
float_format = get_option("display.float_format")
if float_format is None:
precision = get_option("display.precision")
- float_format = lambda x: f"{x: .{precision:d}g}"
+ float_format = lambda x: f"{x: .{precision:d}f}"
else:
float_format = self.float_format
@@ -1372,6 +1372,8 @@ def _format(x):
tpl = " {v}"
fmt_values.append(tpl.format(v=_format(v)))
+ fmt_values = _trim_zeros_float(str_floats=fmt_values, decimal=".")
+
return fmt_values
@@ -1891,27 +1893,31 @@ def _trim_zeros_float(
Trims zeros, leaving just one before the decimal points if need be.
"""
trimmed = str_floats
- number_regex = re.compile(fr"\s*[\+-]?[0-9]+(\{decimal}[0-9]*)?")
+ number_regex = re.compile(fr"^\s*[\+-]?[0-9]+\{decimal}[0-9]*$")
- def _is_number(x):
+ def is_number_with_decimal(x):
return re.match(number_regex, x) is not None
- def _cond(values):
- finite = [x for x in values if _is_number(x)]
- has_decimal = [decimal in x for x in finite]
+ def should_trim(values: Union[np.ndarray, List[str]]) -> bool:
+ """
+ Determine if an array of strings should be trimmed.
- return (
- len(finite) > 0
- and all(has_decimal)
- and all(x.endswith("0") for x in finite)
- and not (any(("e" in x) or ("E" in x) for x in finite))
- )
+ Returns True if all numbers containing decimals (defined by the
+ above regular expression) within the array end in a zero, otherwise
+ returns False.
+ """
+ numbers = [x for x in values if is_number_with_decimal(x)]
+ return len(numbers) > 0 and all(x.endswith("0") for x in numbers)
- while _cond(trimmed):
- trimmed = [x[:-1] if _is_number(x) else x for x in trimmed]
+ while should_trim(trimmed):
+ trimmed = [x[:-1] if is_number_with_decimal(x) else x for x in trimmed]
# leave one 0 after the decimal points if need be.
- return [x + "0" if x.endswith(decimal) and _is_number(x) else x for x in trimmed]
+ result = [
+ x + "0" if is_number_with_decimal(x) and x.endswith(decimal) else x
+ for x in trimmed
+ ]
+ return result
def _has_names(index: Index) -> bool:
diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py
index b30ceed23b02e..78cb8ccc05077 100644
--- a/pandas/tests/io/formats/test_format.py
+++ b/pandas/tests/io/formats/test_format.py
@@ -3439,3 +3439,25 @@ def test_to_string_complex_number_trims_zeros():
result = s.to_string()
expected = "0 1.00+1.00j\n1 1.00+1.00j\n2 1.05+1.00j"
assert result == expected
+
+
+def test_nullable_float_to_string(float_ea_dtype):
+ # https://github.com/pandas-dev/pandas/issues/36775
+ dtype = float_ea_dtype
+ s = pd.Series([0.0, 1.0, None], dtype=dtype)
+ result = s.to_string()
+ expected = """0 0.0
+1 1.0
+2 <NA>"""
+ assert result == expected
+
+
+def test_nullable_int_to_string(any_nullable_int_dtype):
+ # https://github.com/pandas-dev/pandas/issues/36775
+ dtype = any_nullable_int_dtype
+ s = pd.Series([0, 1, None], dtype=dtype)
+ result = s.to_string()
+ expected = """0 0
+1 1
+2 <NA>"""
+ assert result == expected
| - [x] closes #36775
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
| https://api.github.com/repos/pandas-dev/pandas/pulls/36800 | 2020-10-02T01:38:28Z | 2020-10-16T01:46:54Z | 2020-10-16T01:46:54Z | 2020-10-16T06:17:06Z |
BUG/CLN: Clean float / complex string formatting | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index 2b4b10c39602a..2805bd9ac2db3 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -424,6 +424,7 @@ I/O
- Bug in :func:`read_table` and :func:`read_csv` when ``delim_whitespace=True`` and ``sep=default`` (:issue:`36583`)
- Bug in :meth:`to_json` with ``lines=True`` and ``orient='records'`` the last line of the record is not appended with 'new line character' (:issue:`36888`)
- Bug in :meth:`read_parquet` with fixed offset timezones. String representation of timezones was not recognized (:issue:`35997`, :issue:`36004`)
+- Bug in output rendering of complex numbers showing too many trailing zeros (:issue:`36799`)
Plotting
^^^^^^^^
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 0314bdc4ee8ed..f8b8a2c6b6d10 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -2659,11 +2659,11 @@ def memory_usage(self, index=True, deep=False) -> Series:
>>> df = pd.DataFrame(data)
>>> df.head()
int64 float64 complex128 object bool
- 0 1 1.0 1.000000+0.000000j 1 True
- 1 1 1.0 1.000000+0.000000j 1 True
- 2 1 1.0 1.000000+0.000000j 1 True
- 3 1 1.0 1.000000+0.000000j 1 True
- 4 1 1.0 1.000000+0.000000j 1 True
+ 0 1 1.0 1.0+0.0j 1 True
+ 1 1 1.0 1.0+0.0j 1 True
+ 2 1 1.0 1.0+0.0j 1 True
+ 3 1 1.0 1.0+0.0j 1 True
+ 4 1 1.0 1.0+0.0j 1 True
>>> df.memory_usage()
Index 128
diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py
index 3e4780ec21378..f5908cf7ca7bb 100644
--- a/pandas/io/formats/format.py
+++ b/pandas/io/formats/format.py
@@ -1474,9 +1474,9 @@ def format_values_with(float_format):
if self.fixed_width:
if is_complex:
- result = _trim_zeros_complex(values, self.decimal, na_rep)
+ result = _trim_zeros_complex(values, self.decimal)
else:
- result = _trim_zeros_float(values, self.decimal, na_rep)
+ result = _trim_zeros_float(values, self.decimal)
return np.asarray(result, dtype="object")
return values
@@ -1857,29 +1857,42 @@ def just(x):
return result
-def _trim_zeros_complex(
- str_complexes: np.ndarray, decimal: str = ".", na_rep: str = "NaN"
-) -> List[str]:
+def _trim_zeros_complex(str_complexes: np.ndarray, decimal: str = ".") -> List[str]:
"""
Separates the real and imaginary parts from the complex number, and
executes the _trim_zeros_float method on each of those.
"""
- return [
- "".join(_trim_zeros_float(re.split(r"([j+-])", x), decimal, na_rep))
+ trimmed = [
+ "".join(_trim_zeros_float(re.split(r"([j+-])", x), decimal))
for x in str_complexes
]
+ # pad strings to the length of the longest trimmed string for alignment
+ lengths = [len(s) for s in trimmed]
+ max_length = max(lengths)
+ padded = [
+ s[: -((k - 1) // 2 + 1)] # real part
+ + (max_length - k) // 2 * "0"
+ + s[-((k - 1) // 2 + 1) : -((k - 1) // 2)] # + / -
+ + s[-((k - 1) // 2) : -1] # imaginary part
+ + (max_length - k) // 2 * "0"
+ + s[-1]
+ for s, k in zip(trimmed, lengths)
+ ]
+ return padded
+
def _trim_zeros_float(
- str_floats: Union[np.ndarray, List[str]], decimal: str = ".", na_rep: str = "NaN"
+ str_floats: Union[np.ndarray, List[str]], decimal: str = "."
) -> List[str]:
"""
Trims zeros, leaving just one before the decimal points if need be.
"""
trimmed = str_floats
+ number_regex = re.compile(fr"\s*[\+-]?[0-9]+(\{decimal}[0-9]*)?")
def _is_number(x):
- return x != na_rep and not x.endswith("inf")
+ return re.match(number_regex, x) is not None
def _cond(values):
finite = [x for x in values if _is_number(x)]
diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py
index 68f5386fff7be..b30ceed23b02e 100644
--- a/pandas/tests/io/formats/test_format.py
+++ b/pandas/tests/io/formats/test_format.py
@@ -3432,3 +3432,10 @@ def test_format_remove_leading_space_dataframe(input_array, expected):
# GH: 24980
df = pd.DataFrame(input_array).to_string(index=False)
assert df == expected
+
+
+def test_to_string_complex_number_trims_zeros():
+ s = pd.Series([1.000000 + 1.000000j, 1.0 + 1.0j, 1.05 + 1.0j])
+ result = s.to_string()
+ expected = "0 1.00+1.00j\n1 1.00+1.00j\n2 1.05+1.00j"
+ assert result == expected
| Noticed while working on another bug. The _is_number helper here is wrong and can cause incorrect results given that this code path is hit by arbitrary strings (e.g., it thinks "foo" is a number). Also the _trim_zeros_complex helper apparently does nothing:
```python
[ins] In [3]: _trim_zeros_float(["0.00000"])
Out[3]: ['0.0']
[ins] In [4]: _trim_zeros_complex(["1.000+1.000000j"])
Out[4]: ['1.000+1.000000j']
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/36799 | 2020-10-02T01:24:54Z | 2020-10-14T12:51:53Z | 2020-10-14T12:51:53Z | 2020-10-14T15:06:37Z |
DOC: remove outdated doc closes #31487 | diff --git a/doc/source/user_guide/dsintro.rst b/doc/source/user_guide/dsintro.rst
index 0e6767e88edc2..c27c73d439a0c 100644
--- a/doc/source/user_guide/dsintro.rst
+++ b/doc/source/user_guide/dsintro.rst
@@ -663,31 +663,6 @@ row-wise. For example:
df - df.iloc[0]
-In the special case of working with time series data, if the DataFrame index
-contains dates, the broadcasting will be column-wise:
-
-.. ipython:: python
- :okwarning:
-
- index = pd.date_range('1/1/2000', periods=8)
- df = pd.DataFrame(np.random.randn(8, 3), index=index, columns=list('ABC'))
- df
- type(df['A'])
- df - df['A']
-
-.. warning::
-
- .. code-block:: python
-
- df - df['A']
-
- is now deprecated and will be removed in a future release. The preferred way
- to replicate this behavior is
-
- .. code-block:: python
-
- df.sub(df['A'], axis=0)
-
For explicit control over the matching and broadcasting behavior, see the
section on :ref:`flexible binary operations <basics.binop>`.
| - [x] closes #31487
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/36797 | 2020-10-01T23:45:15Z | 2020-10-02T22:39:17Z | 2020-10-02T22:39:17Z | 2020-10-02T23:00:15Z |
DEPR: automatic alignment on frame.__cmp__(series) | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index 8688b2ae81302..9d6b7fadd2e80 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -265,6 +265,7 @@ Deprecations
- Deprecated indexing :class:`DataFrame` rows with datetime-like strings ``df[string]``, use ``df.loc[string]`` instead (:issue:`36179`)
- Deprecated casting an object-dtype index of ``datetime`` objects to :class:`DatetimeIndex` in the :class:`Series` constructor (:issue:`23598`)
- Deprecated :meth:`Index.is_all_dates` (:issue:`27744`)
+- Deprecated automatic alignment on comparison operations between :class:`DataFrame` and :class:`Series`, do ``frame, ser = frame.align(ser, axis=1, copy=False)`` before e.g. ``frame == ser`` (:issue:`28759`)
- :meth:`Rolling.count` with ``min_periods=None`` will default to the size of the window in a future version (:issue:`31302`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/ops/__init__.py b/pandas/core/ops/__init__.py
index 2dc97a3583dfb..49b7aff5af40a 100644
--- a/pandas/core/ops/__init__.py
+++ b/pandas/core/ops/__init__.py
@@ -5,6 +5,7 @@
"""
import operator
from typing import TYPE_CHECKING, Optional, Set, Type
+import warnings
import numpy as np
@@ -513,6 +514,18 @@ def to_series(right):
elif isinstance(right, ABCSeries):
# axis=1 is default for DataFrame-with-Series op
axis = left._get_axis_number(axis) if axis is not None else 1
+
+ if not flex:
+ if not left.axes[axis].equals(right.index):
+ warnings.warn(
+ "Automatic reindexing on DataFrame vs Series comparisons "
+ "is deprecated and will raise ValueError in a future version. "
+ "Do `left, right = left.align(right, axis=1, copy=False)` "
+ "before e.g. `left == right`",
+ FutureWarning,
+ stacklevel=3,
+ )
+
left, right = left.align(
right, join="outer", axis=axis, level=level, copy=False
)
diff --git a/pandas/tests/arithmetic/test_datetime64.py b/pandas/tests/arithmetic/test_datetime64.py
index 0dd389ed516c7..c79fe4b8b22f3 100644
--- a/pandas/tests/arithmetic/test_datetime64.py
+++ b/pandas/tests/arithmetic/test_datetime64.py
@@ -308,11 +308,16 @@ def test_dt64arr_timestamp_equality(self, box_with_array):
expected = tm.box_expected([False, True], xbox)
tm.assert_equal(result, expected)
- result = ser != ser[0]
+ warn = FutureWarning if box_with_array is pd.DataFrame else None
+ with tm.assert_produces_warning(warn):
+ # alignment for frame vs series comparisons deprecated
+ result = ser != ser[0]
expected = tm.box_expected([False, True], xbox)
tm.assert_equal(result, expected)
- result = ser != ser[1]
+ with tm.assert_produces_warning(warn):
+ # alignment for frame vs series comparisons deprecated
+ result = ser != ser[1]
expected = tm.box_expected([True, True], xbox)
tm.assert_equal(result, expected)
@@ -320,11 +325,15 @@ def test_dt64arr_timestamp_equality(self, box_with_array):
expected = tm.box_expected([True, False], xbox)
tm.assert_equal(result, expected)
- result = ser == ser[0]
+ with tm.assert_produces_warning(warn):
+ # alignment for frame vs series comparisons deprecated
+ result = ser == ser[0]
expected = tm.box_expected([True, False], xbox)
tm.assert_equal(result, expected)
- result = ser == ser[1]
+ with tm.assert_produces_warning(warn):
+ # alignment for frame vs series comparisons deprecated
+ result = ser == ser[1]
expected = tm.box_expected([False, False], xbox)
tm.assert_equal(result, expected)
diff --git a/pandas/tests/frame/test_arithmetic.py b/pandas/tests/frame/test_arithmetic.py
index b3aa5e403e795..d9ef19e174700 100644
--- a/pandas/tests/frame/test_arithmetic.py
+++ b/pandas/tests/frame/test_arithmetic.py
@@ -795,13 +795,17 @@ def test_frame_with_zero_len_series_corner_cases():
expected = pd.DataFrame(df.values * np.nan, columns=df.columns)
tm.assert_frame_equal(result, expected)
- result = df == ser
+ with tm.assert_produces_warning(FutureWarning):
+ # Automatic alignment for comparisons deprecated
+ result = df == ser
expected = pd.DataFrame(False, index=df.index, columns=df.columns)
tm.assert_frame_equal(result, expected)
# non-float case should not raise on comparison
df2 = pd.DataFrame(df.values.view("M8[ns]"), columns=df.columns)
- result = df2 == ser
+ with tm.assert_produces_warning(FutureWarning):
+ # Automatic alignment for comparisons deprecated
+ result = df2 == ser
expected = pd.DataFrame(False, index=df.index, columns=df.columns)
tm.assert_frame_equal(result, expected)
| - [ ] closes #xxxx
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
xref #28759, may close it depending on if we want to do anything else on that front | https://api.github.com/repos/pandas-dev/pandas/pulls/36795 | 2020-10-01T23:14:45Z | 2020-10-03T00:55:37Z | 2020-10-03T00:55:37Z | 2020-10-03T01:16:39Z |
API: make DataFrame.__boolop__ default_axis match DataFrame.__arithop__ default_axis | diff --git a/pandas/core/ops/__init__.py b/pandas/core/ops/__init__.py
index 2dc97a3583dfb..7827efbd34207 100644
--- a/pandas/core/ops/__init__.py
+++ b/pandas/core/ops/__init__.py
@@ -146,31 +146,6 @@ def _maybe_match_name(a, b):
# -----------------------------------------------------------------------------
-def _get_frame_op_default_axis(name: str) -> Optional[str]:
- """
- Only DataFrame cares about default_axis, specifically:
- special methods have default_axis=None and flex methods
- have default_axis='columns'.
-
- Parameters
- ----------
- name : str
-
- Returns
- -------
- default_axis: str or None
- """
- if name.replace("__r", "__") in ["__and__", "__or__", "__xor__"]:
- # bool methods
- return "columns"
- elif name.startswith("__"):
- # __add__, __mul__, ...
- return None
- else:
- # add, mul, ...
- return "columns"
-
-
def _get_op_name(op, special: bool) -> str:
"""
Find the name to attach to this method according to conventions
@@ -617,7 +592,7 @@ def _maybe_align_series_as_frame(frame: "DataFrame", series: "Series", axis: int
def arith_method_FRAME(cls: Type["DataFrame"], op, special: bool):
# This is the only function where `special` can be either True or False
op_name = _get_op_name(op, special)
- default_axis = _get_frame_op_default_axis(op_name)
+ default_axis = None if special else "columns"
na_op = get_array_op(op)
@@ -669,8 +644,7 @@ def f(self, other, axis=default_axis, level=None, fill_value=None):
def flex_comp_method_FRAME(cls: Type["DataFrame"], op, special: bool):
assert not special # "special" also means "not flex"
op_name = _get_op_name(op, special)
- default_axis = _get_frame_op_default_axis(op_name)
- assert default_axis == "columns", default_axis # because we are not "special"
+ default_axis = "columns" # because we are "flex"
doc = _flex_comp_doc_FRAME.format(
op_name=op_name, desc=_op_descriptions[op_name]["desc"]
diff --git a/pandas/tests/series/test_api.py b/pandas/tests/series/test_api.py
index a69c0ee75eaba..d92edb6fe149a 100644
--- a/pandas/tests/series/test_api.py
+++ b/pandas/tests/series/test_api.py
@@ -493,7 +493,7 @@ async def test_tab_complete_warning(self, ip):
pytest.importorskip("IPython", minversion="6.0.0")
from IPython.core.completer import provisionalcompleter
- code = "import pandas as pd; s = pd.Series()"
+ code = "import pandas as pd; s = pd.Series(dtype=object)"
await ip.run_code(code)
# TODO: remove it when Ipython updates
| - [x] closes #36788
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
Fix a test warning generated by an ipython test, unrelated | https://api.github.com/repos/pandas-dev/pandas/pulls/36793 | 2020-10-01T22:39:18Z | 2020-10-02T21:41:08Z | 2020-10-02T21:41:07Z | 2020-10-02T21:48:05Z |
ERR: error handling in DataFrame.__rmatmul__ | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index 016e8d90e7d21..2804b410cfc0e 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -326,7 +326,7 @@ Numeric
- Bug in :meth:`Series.equals` where a ``ValueError`` was raised when numpy arrays were compared to scalars (:issue:`35267`)
- Bug in :class:`Series` where two :class:`Series` each have a :class:`DatetimeIndex` with different timezones having those indexes incorrectly changed when performing arithmetic operations (:issue:`33671`)
- Bug in :meth:`pd._testing.assert_almost_equal` was incorrect for complex numeric types (:issue:`28235`)
--
+- Bug in :meth:`DataFrame.__rmatmul__` error handling reporting transposed shapes (:issue:`21581`)
Conversion
^^^^^^^^^^
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 9b2540a1ce043..d1cb5bc816f33 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -1216,7 +1216,14 @@ def __rmatmul__(self, other):
"""
Matrix multiplication using binary `@` operator in Python>=3.5.
"""
- return self.T.dot(np.transpose(other)).T
+ try:
+ return self.T.dot(np.transpose(other)).T
+ except ValueError as err:
+ if "shape mismatch" not in str(err):
+ raise
+ # GH#21581 give exception message for original shapes
+ msg = f"shapes {np.shape(other)} and {self.shape} not aligned"
+ raise ValueError(msg) from err
# ----------------------------------------------------------------------
# IO methods (to / from other formats)
diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py
index 4324b03ed13d6..ee136533b0775 100644
--- a/pandas/tests/frame/test_analytics.py
+++ b/pandas/tests/frame/test_analytics.py
@@ -1177,6 +1177,20 @@ def test_matmul(self):
with pytest.raises(ValueError, match="aligned"):
operator.matmul(df, df2)
+ def test_matmul_message_shapes(self):
+ # GH#21581 exception message should reflect original shapes,
+ # not transposed shapes
+ a = np.random.rand(10, 4)
+ b = np.random.rand(5, 3)
+
+ df = DataFrame(b)
+
+ msg = r"shapes \(10, 4\) and \(5, 3\) not aligned"
+ with pytest.raises(ValueError, match=msg):
+ a @ df
+ with pytest.raises(ValueError, match=msg):
+ a.tolist() @ df
+
# ---------------------------------------------------------------------
# Unsorted
| - [x] closes #21581
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/36792 | 2020-10-01T22:19:02Z | 2020-10-02T21:12:36Z | 2020-10-02T21:12:36Z | 2020-10-02T21:54:06Z |
BUG: GroupBy.ffill()/bfill() do not return NaN values for NaN groups | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index 1336fd7d83f7e..b422344f1cd6d 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -443,6 +443,7 @@ Groupby/resample/rolling
- Bug in :meth:`Rolling.sum()` returned wrong values when dtypes where mixed between float and integer and axis was equal to one (:issue:`20649`, :issue:`35596`)
- Bug in :meth:`Rolling.count` returned ``np.nan`` with :class:`pandas.api.indexers.FixedForwardWindowIndexer` as window, ``min_periods=0`` and only missing values in window (:issue:`35579`)
- Bug where :class:`pandas.core.window.Rolling` produces incorrect window sizes when using a ``PeriodIndex`` (:issue:`34225`)
+- Bug in :meth:`DataFrameGroupBy.ffill` and :meth:`DataFrameGroupBy.bfill` where a ``NaN`` group would return filled values instead of ``NaN`` when ``dropna=True`` (:issue:`34725`)
- Bug in :meth:`RollingGroupby.count` where a ``ValueError`` was raised when specifying the ``closed`` parameter (:issue:`35869`)
Reshaping
diff --git a/pandas/_libs/groupby.pyx b/pandas/_libs/groupby.pyx
index a83634aad3ce2..5a958d5e0bd3c 100644
--- a/pandas/_libs/groupby.pyx
+++ b/pandas/_libs/groupby.pyx
@@ -344,7 +344,7 @@ def group_shift_indexer(int64_t[:] out, const int64_t[:] labels,
@cython.boundscheck(False)
def group_fillna_indexer(ndarray[int64_t] out, ndarray[int64_t] labels,
ndarray[uint8_t] mask, object direction,
- int64_t limit):
+ int64_t limit, bint dropna):
"""
Indexes how to fill values forwards or backwards within a group.
@@ -358,6 +358,7 @@ def group_fillna_indexer(ndarray[int64_t] out, ndarray[int64_t] labels,
direction : {'ffill', 'bfill'}
Direction for fill to be applied (forwards or backwards, respectively)
limit : Consecutive values to fill before stopping, or -1 for no limit
+ dropna : Flag to indicate if NaN groups should return all NaN values
Notes
-----
@@ -381,7 +382,9 @@ def group_fillna_indexer(ndarray[int64_t] out, ndarray[int64_t] labels,
with nogil:
for i in range(N):
idx = sorted_labels[i]
- if mask[idx] == 1: # is missing
+ if dropna and labels[idx] == -1: # nan-group gets nan-values
+ curr_fill_idx = -1
+ elif mask[idx] == 1: # is missing
# Stop filling once we've hit the limit
if filled_vals >= limit and limit != -1:
curr_fill_idx = -1
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index 54d52b1e79da3..c758844da3a2b 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -1866,6 +1866,7 @@ def _fill(self, direction, limit=None):
result_is_index=True,
direction=direction,
limit=limit,
+ dropna=self.dropna,
)
@Substitution(name="groupby")
diff --git a/pandas/tests/groupby/test_missing.py b/pandas/tests/groupby/test_missing.py
index 116aed9935694..70d8dfc20822a 100644
--- a/pandas/tests/groupby/test_missing.py
+++ b/pandas/tests/groupby/test_missing.py
@@ -82,3 +82,37 @@ def test_fill_consistency():
expected = df.groupby(level=0, axis=0).fillna(method="ffill")
result = df.T.groupby(level=0, axis=1).fillna(method="ffill").T
tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.parametrize("method", ["ffill", "bfill"])
+@pytest.mark.parametrize("dropna", [True, False])
+@pytest.mark.parametrize("has_nan_group", [True, False])
+def test_ffill_handles_nan_groups(dropna, method, has_nan_group):
+ # GH 34725
+
+ df_without_nan_rows = pd.DataFrame([(1, 0.1), (2, 0.2)])
+
+ ridx = [-1, 0, -1, -1, 1, -1]
+ df = df_without_nan_rows.reindex(ridx).reset_index(drop=True)
+
+ group_b = np.nan if has_nan_group else "b"
+ df["group_col"] = pd.Series(["a"] * 3 + [group_b] * 3)
+
+ grouped = df.groupby(by="group_col", dropna=dropna)
+ result = getattr(grouped, method)(limit=None)
+
+ expected_rows = {
+ ("ffill", True, True): [-1, 0, 0, -1, -1, -1],
+ ("ffill", True, False): [-1, 0, 0, -1, 1, 1],
+ ("ffill", False, True): [-1, 0, 0, -1, 1, 1],
+ ("ffill", False, False): [-1, 0, 0, -1, 1, 1],
+ ("bfill", True, True): [0, 0, -1, -1, -1, -1],
+ ("bfill", True, False): [0, 0, -1, 1, 1, -1],
+ ("bfill", False, True): [0, 0, -1, 1, 1, -1],
+ ("bfill", False, False): [0, 0, -1, 1, 1, -1],
+ }
+
+ ridx = expected_rows.get((method, dropna, has_nan_group))
+ expected = df_without_nan_rows.reindex(ridx).reset_index(drop=True)
+
+ tm.assert_frame_equal(result, expected)
| - [x] closes #34725
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
See #34725 for a good description of the problem.
This fixes the issue in the tidiest way I could find.
One test is added that covers all relevant cases I could think of. The comments in the test can be removed, they are just there to make it easier for the reviewer to follow as the test is a bit complicated.
This also fixes the copy-pastable example from the #34725. | https://api.github.com/repos/pandas-dev/pandas/pulls/36790 | 2020-10-01T21:45:55Z | 2020-10-10T22:32:29Z | 2020-10-10T22:32:29Z | 2020-10-10T22:32:35Z |
TST: Add test for 32724 | diff --git a/pandas/tests/window/test_rolling.py b/pandas/tests/window/test_rolling.py
index 9ac4871ad24a1..10527649b728f 100644
--- a/pandas/tests/window/test_rolling.py
+++ b/pandas/tests/window/test_rolling.py
@@ -824,3 +824,14 @@ def test_rolling_axis_1_non_numeric_dtypes(value):
result = df.rolling(window=2, min_periods=1, axis=1).sum()
expected = pd.DataFrame({"a": [1.0, 2.0]})
tm.assert_frame_equal(result, expected)
+
+
+def test_rolling_on_df_transposed():
+ # GH: 32724
+ df = pd.DataFrame({"A": [1, None], "B": [4, 5], "C": [7, 8]})
+ expected = pd.DataFrame({"A": [1.0, np.nan], "B": [5.0, 5.0], "C": [11.0, 13.0]})
+ result = df.rolling(min_periods=1, window=2, axis=1).sum()
+ tm.assert_frame_equal(result, expected)
+
+ result = df.T.rolling(min_periods=1, window=2).sum().T
+ tm.assert_frame_equal(result, expected)
| - [x] closes #32724
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
Was fixed in the past. | https://api.github.com/repos/pandas-dev/pandas/pulls/36789 | 2020-10-01T21:43:02Z | 2020-10-02T18:31:09Z | 2020-10-02T18:31:09Z | 2020-10-02T18:35:37Z |
BUG: Rolling returned nan with FixedForwardWindowIndexer for count when window contained only missing values | diff --git a/doc/source/whatsnew/v1.1.3.rst b/doc/source/whatsnew/v1.1.3.rst
index 15777abcb8084..8edfd5885fc28 100644
--- a/doc/source/whatsnew/v1.1.3.rst
+++ b/doc/source/whatsnew/v1.1.3.rst
@@ -58,6 +58,7 @@ Bug fixes
- Bug in :meth:`Series.astype` showing too much precision when casting from ``np.float32`` to string dtype (:issue:`36451`)
- Bug in :meth:`Series.isin` and :meth:`DataFrame.isin` when using ``NaN`` and a row length above 1,000,000 (:issue:`22205`)
- Bug in :func:`cut` raising a ``ValueError`` when passed a :class:`Series` of labels with ``ordered=False`` (:issue:`36603`)
+- Bug in :meth:`Rolling.count` returned ``np.nan`` with ``FixedForwardWindowIndexer`` as window, ``min_periods=0`` and only missing values in window (:issue:`35579`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py
index 6ab42dda865e7..83a07a2abb484 100644
--- a/pandas/core/window/rolling.py
+++ b/pandas/core/window/rolling.py
@@ -2056,7 +2056,7 @@ def count(self):
# when using a BaseIndexer subclass as a window
if self.is_freq_type or isinstance(self.window, BaseIndexer):
window_func = self._get_roll_func("roll_count")
- return self._apply(window_func, center=self.center, name="count")
+ return self._apply(window_func, center=self.center, name="count", floor=0)
return super().count()
diff --git a/pandas/tests/window/test_base_indexer.py b/pandas/tests/window/test_base_indexer.py
index f681b19d57600..2a47f7397b33f 100644
--- a/pandas/tests/window/test_base_indexer.py
+++ b/pandas/tests/window/test_base_indexer.py
@@ -253,3 +253,12 @@ def test_non_fixed_variable_window_indexer(closed, expected_data):
result = df.rolling(indexer, closed=closed).sum()
expected = DataFrame(expected_data, index=index)
tm.assert_frame_equal(result, expected)
+
+
+def test_fixed_forward_indexer_count():
+ # GH: 35579
+ df = DataFrame({"b": [None, None, None, 7]})
+ indexer = FixedForwardWindowIndexer(window_size=2)
+ result = df.rolling(window=indexer, min_periods=0).count()
+ expected = DataFrame({"b": [0.0, 0.0, 1.0, 1.0]})
+ tm.assert_frame_equal(result, expected)
| - [x] closes #35579
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
Issue still has milestone 1.1.3. If it's to late for this release, I'll move the whats new note to 1.1.4 for when the associated PR is merged.
cc @mroeschke | https://api.github.com/repos/pandas-dev/pandas/pulls/36787 | 2020-10-01T21:04:25Z | 2020-10-01T21:56:53Z | null | 2020-10-01T21:56:59Z |
CLN: Remove param _set_identity from MultiIndex | diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index 1628b44be4096..a157fdfdde447 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -278,7 +278,6 @@ def __new__(
copy=False,
name=None,
verify_integrity: bool = True,
- _set_identity: bool = True,
):
# compat with Index
@@ -312,10 +311,7 @@ def __new__(
new_codes = result._verify_integrity()
result._codes = new_codes
- if _set_identity:
- result._reset_identity()
-
- return result
+ return result._reset_identity()
def _validate_codes(self, level: List, code: List):
"""
@@ -1071,7 +1067,6 @@ def _shallow_copy(
codes=None,
sortorder=None,
names=lib.no_default,
- _set_identity: bool = True,
):
if names is not lib.no_default and name is not lib.no_default:
raise TypeError("Can only provide one of `names` and `name`")
@@ -1091,7 +1086,6 @@ def _shallow_copy(
sortorder=sortorder,
names=names,
verify_integrity=False,
- _set_identity=_set_identity,
)
result._cache = self._cache.copy()
result._cache.pop("levels", None) # GH32669
@@ -1119,7 +1113,6 @@ def copy(
codes=None,
deep=False,
name=None,
- _set_identity=False,
):
"""
Make a copy of this object. Names, dtype, levels and codes can be
@@ -1180,7 +1173,6 @@ def copy(
codes=codes,
names=names,
sortorder=self.sortorder,
- _set_identity=_set_identity,
)
if dtype:
| I don't see how `_set_identity`can be needed and other index classes don'r have this parameter, so best to just remove it.
Other index classes always end by calling `_reset_identity`, so I just do that here also. I'm not sure that's really needed, but that's for another day. | https://api.github.com/repos/pandas-dev/pandas/pulls/36786 | 2020-10-01T20:57:18Z | 2020-10-02T21:13:22Z | 2020-10-02T21:13:22Z | 2020-10-02T21:56:57Z |
CLN: test_moments_rolling.py for quantile/kurt/skew | diff --git a/pandas/tests/window/moments/test_moments_rolling.py b/pandas/tests/window/moments/test_moments_rolling.py
index 880316ec6111a..488306d0585c5 100644
--- a/pandas/tests/window/moments/test_moments_rolling.py
+++ b/pandas/tests/window/moments/test_moments_rolling.py
@@ -1,187 +1,12 @@
import numpy as np
-from numpy.random import randn
import pytest
import pandas.util._test_decorators as td
import pandas as pd
-from pandas import DataFrame, Series, isna, notna
+from pandas import DataFrame, Series
import pandas._testing as tm
-import pandas.tseries.offsets as offsets
-
-
-def _check_moment_func(
- static_comp,
- name,
- raw,
- has_min_periods=True,
- has_center=True,
- has_time_rule=True,
- fill_value=None,
- zero_min_periods_equal=True,
- series=None,
- frame=None,
- **kwargs,
-):
- def get_result(obj, window, min_periods=None, center=False):
- r = obj.rolling(window=window, min_periods=min_periods, center=center)
- return getattr(r, name)(**kwargs)
-
- series_result = get_result(series, window=50)
- assert isinstance(series_result, Series)
- tm.assert_almost_equal(series_result.iloc[-1], static_comp(series[-50:]))
-
- frame_result = get_result(frame, window=50)
- assert isinstance(frame_result, DataFrame)
- tm.assert_series_equal(
- frame_result.iloc[-1, :],
- frame.iloc[-50:, :].apply(static_comp, axis=0, raw=raw),
- check_names=False,
- )
-
- # check time_rule works
- if has_time_rule:
- win = 25
- minp = 10
- ser = series[::2].resample("B").mean()
- frm = frame[::2].resample("B").mean()
-
- if has_min_periods:
- series_result = get_result(ser, window=win, min_periods=minp)
- frame_result = get_result(frm, window=win, min_periods=minp)
- else:
- series_result = get_result(ser, window=win, min_periods=0)
- frame_result = get_result(frm, window=win, min_periods=0)
-
- last_date = series_result.index[-1]
- prev_date = last_date - 24 * offsets.BDay()
-
- trunc_series = series[::2].truncate(prev_date, last_date)
- trunc_frame = frame[::2].truncate(prev_date, last_date)
-
- tm.assert_almost_equal(series_result[-1], static_comp(trunc_series))
-
- tm.assert_series_equal(
- frame_result.xs(last_date),
- trunc_frame.apply(static_comp, raw=raw),
- check_names=False,
- )
-
- # excluding NaNs correctly
- obj = Series(randn(50))
- obj[:10] = np.NaN
- obj[-10:] = np.NaN
- if has_min_periods:
- result = get_result(obj, 50, min_periods=30)
- tm.assert_almost_equal(result.iloc[-1], static_comp(obj[10:-10]))
-
- # min_periods is working correctly
- result = get_result(obj, 20, min_periods=15)
- assert isna(result.iloc[23])
- assert not isna(result.iloc[24])
-
- assert not isna(result.iloc[-6])
- assert isna(result.iloc[-5])
-
- obj2 = Series(randn(20))
- result = get_result(obj2, 10, min_periods=5)
- assert isna(result.iloc[3])
- assert notna(result.iloc[4])
-
- if zero_min_periods_equal:
- # min_periods=0 may be equivalent to min_periods=1
- result0 = get_result(obj, 20, min_periods=0)
- result1 = get_result(obj, 20, min_periods=1)
- tm.assert_almost_equal(result0, result1)
- else:
- result = get_result(obj, 50)
- tm.assert_almost_equal(result.iloc[-1], static_comp(obj[10:-10]))
-
- # window larger than series length (#7297)
- if has_min_periods:
- for minp in (0, len(series) - 1, len(series)):
- result = get_result(series, len(series) + 1, min_periods=minp)
- expected = get_result(series, len(series), min_periods=minp)
- nan_mask = isna(result)
- tm.assert_series_equal(nan_mask, isna(expected))
-
- nan_mask = ~nan_mask
- tm.assert_almost_equal(result[nan_mask], expected[nan_mask])
- else:
- result = get_result(series, len(series) + 1, min_periods=0)
- expected = get_result(series, len(series), min_periods=0)
- nan_mask = isna(result)
- tm.assert_series_equal(nan_mask, isna(expected))
-
- nan_mask = ~nan_mask
- tm.assert_almost_equal(result[nan_mask], expected[nan_mask])
-
- # check center=True
- if has_center:
- if has_min_periods:
- result = get_result(obj, 20, min_periods=15, center=True)
- expected = get_result(
- pd.concat([obj, Series([np.NaN] * 9)]), 20, min_periods=15
- )[9:].reset_index(drop=True)
- else:
- result = get_result(obj, 20, min_periods=0, center=True)
- print(result)
- expected = get_result(
- pd.concat([obj, Series([np.NaN] * 9)]), 20, min_periods=0
- )[9:].reset_index(drop=True)
-
- tm.assert_series_equal(result, expected)
-
- # shifter index
- s = [f"x{x:d}" for x in range(12)]
-
- if has_min_periods:
- minp = 10
-
- series_xp = (
- get_result(
- series.reindex(list(series.index) + s), window=25, min_periods=minp
- )
- .shift(-12)
- .reindex(series.index)
- )
- frame_xp = (
- get_result(
- frame.reindex(list(frame.index) + s), window=25, min_periods=minp
- )
- .shift(-12)
- .reindex(frame.index)
- )
-
- series_rs = get_result(series, window=25, min_periods=minp, center=True)
- frame_rs = get_result(frame, window=25, min_periods=minp, center=True)
-
- else:
- series_xp = (
- get_result(
- series.reindex(list(series.index) + s), window=25, min_periods=0
- )
- .shift(-12)
- .reindex(series.index)
- )
- frame_xp = (
- get_result(
- frame.reindex(list(frame.index) + s), window=25, min_periods=0
- )
- .shift(-12)
- .reindex(frame.index)
- )
-
- series_rs = get_result(series, window=25, min_periods=0, center=True)
- frame_rs = get_result(frame, window=25, min_periods=0, center=True)
-
- if fill_value is not None:
- series_xp = series_xp.fillna(fill_value)
- frame_xp = frame_xp.fillna(fill_value)
- tm.assert_series_equal(series_xp, series_rs)
- tm.assert_frame_equal(frame_xp, frame_rs)
-
def test_centered_axis_validation():
@@ -716,33 +541,6 @@ def test_rolling_max_min_periods():
pd.Series([1, 2, 3]).rolling(window=3, min_periods=5).max()
-@pytest.mark.parametrize("q", [0.0, 0.1, 0.5, 0.9, 1.0])
-def test_rolling_quantile(q, raw, series, frame):
- def scoreatpercentile(a, per):
- values = np.sort(a, axis=0)
-
- idx = int(per / 1.0 * (values.shape[0] - 1))
-
- if idx == values.shape[0] - 1:
- retval = values[-1]
-
- else:
- qlow = float(idx) / float(values.shape[0] - 1)
- qhig = float(idx + 1) / float(values.shape[0] - 1)
- vlow = values[idx]
- vhig = values[idx + 1]
- retval = vlow + (vhig - vlow) * (per - qlow) / (qhig - qlow)
-
- return retval
-
- def quantile_func(x):
- return scoreatpercentile(x, q)
-
- _check_moment_func(
- quantile_func, name="quantile", quantile=q, raw=raw, series=series, frame=frame
- )
-
-
def test_rolling_quantile_np_percentile():
# #9413: Tests that rolling window's quantile default behavior
# is analogous to Numpy's percentile
@@ -845,25 +643,3 @@ def test_rolling_std_neg_sqrt():
b = a.ewm(span=3).std()
assert np.isfinite(b[2:]).all()
-
-
-@td.skip_if_no_scipy
-def test_rolling_skew(raw, series, frame):
- from scipy.stats import skew
-
- _check_moment_func(
- lambda x: skew(x, bias=False), name="skew", raw=raw, series=series, frame=frame
- )
-
-
-@td.skip_if_no_scipy
-def test_rolling_kurt(raw, series, frame):
- from scipy.stats import kurtosis
-
- _check_moment_func(
- lambda x: kurtosis(x, bias=False),
- name="kurt",
- raw=raw,
- series=series,
- frame=frame,
- )
diff --git a/pandas/tests/window/moments/test_moments_rolling_quantile.py b/pandas/tests/window/moments/test_moments_rolling_quantile.py
new file mode 100644
index 0000000000000..1b6d4a5c82164
--- /dev/null
+++ b/pandas/tests/window/moments/test_moments_rolling_quantile.py
@@ -0,0 +1,166 @@
+from functools import partial
+
+import numpy as np
+import pytest
+
+from pandas import DataFrame, Series, concat, isna, notna
+import pandas._testing as tm
+
+import pandas.tseries.offsets as offsets
+
+
+def scoreatpercentile(a, per):
+ values = np.sort(a, axis=0)
+
+ idx = int(per / 1.0 * (values.shape[0] - 1))
+
+ if idx == values.shape[0] - 1:
+ retval = values[-1]
+
+ else:
+ qlow = float(idx) / float(values.shape[0] - 1)
+ qhig = float(idx + 1) / float(values.shape[0] - 1)
+ vlow = values[idx]
+ vhig = values[idx + 1]
+ retval = vlow + (vhig - vlow) * (per - qlow) / (qhig - qlow)
+
+ return retval
+
+
+@pytest.mark.parametrize("q", [0.0, 0.1, 0.5, 0.9, 1.0])
+def test_series(series, q):
+ compare_func = partial(scoreatpercentile, per=q)
+ result = series.rolling(50).quantile(q)
+ assert isinstance(result, Series)
+ tm.assert_almost_equal(result.iloc[-1], compare_func(series[-50:]))
+
+
+@pytest.mark.parametrize("q", [0.0, 0.1, 0.5, 0.9, 1.0])
+def test_frame(raw, frame, q):
+ compare_func = partial(scoreatpercentile, per=q)
+ result = frame.rolling(50).quantile(q)
+ assert isinstance(result, DataFrame)
+ tm.assert_series_equal(
+ result.iloc[-1, :],
+ frame.iloc[-50:, :].apply(compare_func, axis=0, raw=raw),
+ check_names=False,
+ )
+
+
+@pytest.mark.parametrize("q", [0.0, 0.1, 0.5, 0.9, 1.0])
+def test_time_rule_series(series, q):
+ compare_func = partial(scoreatpercentile, per=q)
+ win = 25
+ ser = series[::2].resample("B").mean()
+ series_result = ser.rolling(window=win, min_periods=10).quantile(q)
+ last_date = series_result.index[-1]
+ prev_date = last_date - 24 * offsets.BDay()
+
+ trunc_series = series[::2].truncate(prev_date, last_date)
+ tm.assert_almost_equal(series_result[-1], compare_func(trunc_series))
+
+
+@pytest.mark.parametrize("q", [0.0, 0.1, 0.5, 0.9, 1.0])
+def test_time_rule_frame(raw, frame, q):
+ compare_func = partial(scoreatpercentile, per=q)
+ win = 25
+ frm = frame[::2].resample("B").mean()
+ frame_result = frm.rolling(window=win, min_periods=10).quantile(q)
+ last_date = frame_result.index[-1]
+ prev_date = last_date - 24 * offsets.BDay()
+
+ trunc_frame = frame[::2].truncate(prev_date, last_date)
+ tm.assert_series_equal(
+ frame_result.xs(last_date),
+ trunc_frame.apply(compare_func, raw=raw),
+ check_names=False,
+ )
+
+
+@pytest.mark.parametrize("q", [0.0, 0.1, 0.5, 0.9, 1.0])
+def test_nans(q):
+ compare_func = partial(scoreatpercentile, per=q)
+ obj = Series(np.random.randn(50))
+ obj[:10] = np.NaN
+ obj[-10:] = np.NaN
+
+ result = obj.rolling(50, min_periods=30).quantile(q)
+ tm.assert_almost_equal(result.iloc[-1], compare_func(obj[10:-10]))
+
+ # min_periods is working correctly
+ result = obj.rolling(20, min_periods=15).quantile(q)
+ assert isna(result.iloc[23])
+ assert not isna(result.iloc[24])
+
+ assert not isna(result.iloc[-6])
+ assert isna(result.iloc[-5])
+
+ obj2 = Series(np.random.randn(20))
+ result = obj2.rolling(10, min_periods=5).quantile(q)
+ assert isna(result.iloc[3])
+ assert notna(result.iloc[4])
+
+ result0 = obj.rolling(20, min_periods=0).quantile(q)
+ result1 = obj.rolling(20, min_periods=1).quantile(q)
+ tm.assert_almost_equal(result0, result1)
+
+
+@pytest.mark.parametrize("minp", [0, 99, 100])
+@pytest.mark.parametrize("q", [0.0, 0.1, 0.5, 0.9, 1.0])
+def test_min_periods(series, minp, q):
+ result = series.rolling(len(series) + 1, min_periods=minp).quantile(q)
+ expected = series.rolling(len(series), min_periods=minp).quantile(q)
+ nan_mask = isna(result)
+ tm.assert_series_equal(nan_mask, isna(expected))
+
+ nan_mask = ~nan_mask
+ tm.assert_almost_equal(result[nan_mask], expected[nan_mask])
+
+
+@pytest.mark.parametrize("q", [0.0, 0.1, 0.5, 0.9, 1.0])
+def test_center(q):
+ obj = Series(np.random.randn(50))
+ obj[:10] = np.NaN
+ obj[-10:] = np.NaN
+
+ result = obj.rolling(20, center=True).quantile(q)
+ expected = (
+ concat([obj, Series([np.NaN] * 9)])
+ .rolling(20)
+ .quantile(q)[9:]
+ .reset_index(drop=True)
+ )
+ tm.assert_series_equal(result, expected)
+
+
+@pytest.mark.parametrize("q", [0.0, 0.1, 0.5, 0.9, 1.0])
+def test_center_reindex_series(series, q):
+ # shifter index
+ s = [f"x{x:d}" for x in range(12)]
+
+ series_xp = (
+ series.reindex(list(series.index) + s)
+ .rolling(window=25)
+ .quantile(q)
+ .shift(-12)
+ .reindex(series.index)
+ )
+
+ series_rs = series.rolling(window=25, center=True).quantile(q)
+ tm.assert_series_equal(series_xp, series_rs)
+
+
+@pytest.mark.parametrize("q", [0.0, 0.1, 0.5, 0.9, 1.0])
+def test_center_reindex_frame(frame, q):
+ # shifter index
+ s = [f"x{x:d}" for x in range(12)]
+
+ frame_xp = (
+ frame.reindex(list(frame.index) + s)
+ .rolling(window=25)
+ .quantile(q)
+ .shift(-12)
+ .reindex(frame.index)
+ )
+ frame_rs = frame.rolling(window=25, center=True).quantile(q)
+ tm.assert_frame_equal(frame_xp, frame_rs)
diff --git a/pandas/tests/window/moments/test_moments_rolling_skew_kurt.py b/pandas/tests/window/moments/test_moments_rolling_skew_kurt.py
new file mode 100644
index 0000000000000..cc67e602be12e
--- /dev/null
+++ b/pandas/tests/window/moments/test_moments_rolling_skew_kurt.py
@@ -0,0 +1,163 @@
+from functools import partial
+
+import numpy as np
+import pytest
+
+import pandas.util._test_decorators as td
+
+from pandas import DataFrame, Series, concat, isna, notna
+import pandas._testing as tm
+
+import pandas.tseries.offsets as offsets
+
+
+@td.skip_if_no_scipy
+@pytest.mark.parametrize("sp_func, roll_func", [["kurtosis", "kurt"], ["skew", "skew"]])
+def test_series(series, sp_func, roll_func):
+ import scipy.stats
+
+ compare_func = partial(getattr(scipy.stats, sp_func), bias=False)
+ result = getattr(series.rolling(50), roll_func)()
+ assert isinstance(result, Series)
+ tm.assert_almost_equal(result.iloc[-1], compare_func(series[-50:]))
+
+
+@td.skip_if_no_scipy
+@pytest.mark.parametrize("sp_func, roll_func", [["kurtosis", "kurt"], ["skew", "skew"]])
+def test_frame(raw, frame, sp_func, roll_func):
+ import scipy.stats
+
+ compare_func = partial(getattr(scipy.stats, sp_func), bias=False)
+ result = getattr(frame.rolling(50), roll_func)()
+ assert isinstance(result, DataFrame)
+ tm.assert_series_equal(
+ result.iloc[-1, :],
+ frame.iloc[-50:, :].apply(compare_func, axis=0, raw=raw),
+ check_names=False,
+ )
+
+
+@td.skip_if_no_scipy
+@pytest.mark.parametrize("sp_func, roll_func", [["kurtosis", "kurt"], ["skew", "skew"]])
+def test_time_rule_series(series, sp_func, roll_func):
+ import scipy.stats
+
+ compare_func = partial(getattr(scipy.stats, sp_func), bias=False)
+ win = 25
+ ser = series[::2].resample("B").mean()
+ series_result = getattr(ser.rolling(window=win, min_periods=10), roll_func)()
+ last_date = series_result.index[-1]
+ prev_date = last_date - 24 * offsets.BDay()
+
+ trunc_series = series[::2].truncate(prev_date, last_date)
+ tm.assert_almost_equal(series_result[-1], compare_func(trunc_series))
+
+
+@td.skip_if_no_scipy
+@pytest.mark.parametrize("sp_func, roll_func", [["kurtosis", "kurt"], ["skew", "skew"]])
+def test_time_rule_frame(raw, frame, sp_func, roll_func):
+ import scipy.stats
+
+ compare_func = partial(getattr(scipy.stats, sp_func), bias=False)
+ win = 25
+ frm = frame[::2].resample("B").mean()
+ frame_result = getattr(frm.rolling(window=win, min_periods=10), roll_func)()
+ last_date = frame_result.index[-1]
+ prev_date = last_date - 24 * offsets.BDay()
+
+ trunc_frame = frame[::2].truncate(prev_date, last_date)
+ tm.assert_series_equal(
+ frame_result.xs(last_date),
+ trunc_frame.apply(compare_func, raw=raw),
+ check_names=False,
+ )
+
+
+@td.skip_if_no_scipy
+@pytest.mark.parametrize("sp_func, roll_func", [["kurtosis", "kurt"], ["skew", "skew"]])
+def test_nans(sp_func, roll_func):
+ import scipy.stats
+
+ compare_func = partial(getattr(scipy.stats, sp_func), bias=False)
+ obj = Series(np.random.randn(50))
+ obj[:10] = np.NaN
+ obj[-10:] = np.NaN
+
+ result = getattr(obj.rolling(50, min_periods=30), roll_func)()
+ tm.assert_almost_equal(result.iloc[-1], compare_func(obj[10:-10]))
+
+ # min_periods is working correctly
+ result = getattr(obj.rolling(20, min_periods=15), roll_func)()
+ assert isna(result.iloc[23])
+ assert not isna(result.iloc[24])
+
+ assert not isna(result.iloc[-6])
+ assert isna(result.iloc[-5])
+
+ obj2 = Series(np.random.randn(20))
+ result = getattr(obj2.rolling(10, min_periods=5), roll_func)()
+ assert isna(result.iloc[3])
+ assert notna(result.iloc[4])
+
+ result0 = getattr(obj.rolling(20, min_periods=0), roll_func)()
+ result1 = getattr(obj.rolling(20, min_periods=1), roll_func)()
+ tm.assert_almost_equal(result0, result1)
+
+
+@pytest.mark.parametrize("minp", [0, 99, 100])
+@pytest.mark.parametrize("roll_func", ["kurt", "skew"])
+def test_min_periods(series, minp, roll_func):
+ result = getattr(series.rolling(len(series) + 1, min_periods=minp), roll_func)()
+ expected = getattr(series.rolling(len(series), min_periods=minp), roll_func)()
+ nan_mask = isna(result)
+ tm.assert_series_equal(nan_mask, isna(expected))
+
+ nan_mask = ~nan_mask
+ tm.assert_almost_equal(result[nan_mask], expected[nan_mask])
+
+
+@pytest.mark.parametrize("roll_func", ["kurt", "skew"])
+def test_center(roll_func):
+ obj = Series(np.random.randn(50))
+ obj[:10] = np.NaN
+ obj[-10:] = np.NaN
+
+ result = getattr(obj.rolling(20, center=True), roll_func)()
+ expected = getattr(concat([obj, Series([np.NaN] * 9)]).rolling(20), roll_func)()[
+ 9:
+ ].reset_index(drop=True)
+ tm.assert_series_equal(result, expected)
+
+
+@pytest.mark.parametrize("roll_func", ["kurt", "skew"])
+def test_center_reindex_series(series, roll_func):
+ # shifter index
+ s = [f"x{x:d}" for x in range(12)]
+
+ series_xp = (
+ getattr(
+ series.reindex(list(series.index) + s).rolling(window=25),
+ roll_func,
+ )()
+ .shift(-12)
+ .reindex(series.index)
+ )
+ series_rs = getattr(series.rolling(window=25, center=True), roll_func)()
+ tm.assert_series_equal(series_xp, series_rs)
+
+
+@pytest.mark.parametrize("roll_func", ["kurt", "skew"])
+def test_center_reindex_frame(frame, roll_func):
+ # shifter index
+ s = [f"x{x:d}" for x in range(12)]
+
+ frame_xp = (
+ getattr(
+ frame.reindex(list(frame.index) + s).rolling(window=25),
+ roll_func,
+ )()
+ .shift(-12)
+ .reindex(frame.index)
+ )
+ frame_rs = getattr(frame.rolling(window=25, center=True), roll_func)()
+ tm.assert_frame_equal(frame_xp, frame_rs)
| - [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
| https://api.github.com/repos/pandas-dev/pandas/pulls/36784 | 2020-10-01T20:49:33Z | 2020-10-02T20:36:16Z | 2020-10-02T20:36:16Z | 2020-10-02T21:01:39Z |
Test that nan value counts are included when dropna=False. GH#31944 | diff --git a/pandas/tests/base/test_value_counts.py b/pandas/tests/base/test_value_counts.py
index de04c30432e6f..73a41e7010c5f 100644
--- a/pandas/tests/base/test_value_counts.py
+++ b/pandas/tests/base/test_value_counts.py
@@ -274,3 +274,17 @@ def test_value_counts_datetime64(index_or_series):
td2 = klass(td2, name="dt")
result2 = td2.value_counts()
tm.assert_series_equal(result2, expected_s)
+
+
+@pytest.mark.parametrize("dropna", [True, False])
+def test_value_counts_with_nan(dropna, index_or_series):
+ # GH31944
+ klass = index_or_series
+ values = [True, pd.NA, np.nan]
+ s = klass(values)
+ res = s.value_counts(dropna=dropna)
+ if dropna is True:
+ expected = Series([1], index=[True])
+ else:
+ expected = Series([2, 1], index=[pd.NA, True])
+ tm.assert_series_equal(res, expected)
| - [x] closes #31944
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
Added test per @Dr-Irv request.
| https://api.github.com/repos/pandas-dev/pandas/pulls/36783 | 2020-10-01T19:38:27Z | 2020-10-06T22:43:17Z | 2020-10-06T22:43:17Z | 2020-10-06T22:43:21Z |
Revert "Update README.md" | diff --git a/README.md b/README.md
index da8487d76f4a1..a2f2f1c04442a 100644
--- a/README.md
+++ b/README.md
@@ -32,32 +32,32 @@ its way towards this goal.
Here are just a few of the things that pandas does well:
- Easy handling of [**missing data**][missing-data] (represented as
- `NaN`, `NA`, or `NaT`) in floating point as well as non-floating point data;
+ `NaN`, `NA`, or `NaT`) in floating point as well as non-floating point data
- Size mutability: columns can be [**inserted and
deleted**][insertion-deletion] from DataFrame and higher dimensional
- objects;
+ objects
- Automatic and explicit [**data alignment**][alignment]: objects can
be explicitly aligned to a set of labels, or the user can simply
ignore the labels and let `Series`, `DataFrame`, etc. automatically
- align the data for you in computations;
+ align the data for you in computations
- Powerful, flexible [**group by**][groupby] functionality to perform
split-apply-combine operations on data sets, for both aggregating
- and transforming data;
+ and transforming data
- Make it [**easy to convert**][conversion] ragged,
differently-indexed data in other Python and NumPy data structures
- into DataFrame objects;
+ into DataFrame objects
- Intelligent label-based [**slicing**][slicing], [**fancy
indexing**][fancy-indexing], and [**subsetting**][subsetting] of
- large data sets;
+ large data sets
- Intuitive [**merging**][merging] and [**joining**][joining] data
- sets;
+ sets
- Flexible [**reshaping**][reshape] and [**pivoting**][pivot-table] of
- data sets;
+ data sets
- [**Hierarchical**][mi] labeling of axes (possible to have multiple
- labels per tick);
+ labels per tick)
- Robust IO tools for loading data from [**flat files**][flat-files]
(CSV and delimited), [**Excel files**][excel], [**databases**][db],
- and saving/loading data from the ultrafast [**HDF5 format**][hdfstore];
+ and saving/loading data from the ultrafast [**HDF5 format**][hdfstore]
- [**Time series**][timeseries]-specific functionality: date range
generation and frequency conversion, moving window statistics,
date shifting and lagging.
| Reverts pandas-dev/pandas#36772 | https://api.github.com/repos/pandas-dev/pandas/pulls/36781 | 2020-10-01T17:22:24Z | 2020-10-01T17:22:58Z | 2020-10-01T17:22:58Z | 2020-10-01T17:23:08Z |
DOC: Fix code style in documentation | diff --git a/doc/source/getting_started/intro_tutorials/06_calculate_statistics.rst b/doc/source/getting_started/intro_tutorials/06_calculate_statistics.rst
index bd85160d2622a..7e919777fdf03 100644
--- a/doc/source/getting_started/intro_tutorials/06_calculate_statistics.rst
+++ b/doc/source/getting_started/intro_tutorials/06_calculate_statistics.rst
@@ -122,8 +122,9 @@ aggregating statistics for given columns can be defined using the
.. ipython:: python
- titanic.agg({'Age': ['min', 'max', 'median', 'skew'],
- 'Fare': ['min', 'max', 'median', 'mean']})
+ titanic.agg(
+ {"Age": ["min", "max", "median", "skew"], "Fare": ["min", "max", "median", "mean"]}
+ )
.. raw:: html
diff --git a/doc/source/getting_started/intro_tutorials/07_reshape_table_layout.rst b/doc/source/getting_started/intro_tutorials/07_reshape_table_layout.rst
index c16fec6aaba9f..20c36133330c4 100644
--- a/doc/source/getting_started/intro_tutorials/07_reshape_table_layout.rst
+++ b/doc/source/getting_started/intro_tutorials/07_reshape_table_layout.rst
@@ -101,8 +101,9 @@ measurement.
.. ipython:: python
- air_quality = pd.read_csv("data/air_quality_long.csv",
- index_col="date.utc", parse_dates=True)
+ air_quality = pd.read_csv(
+ "data/air_quality_long.csv", index_col="date.utc", parse_dates=True
+ )
air_quality.head()
.. raw:: html
@@ -247,8 +248,9 @@ I want the mean concentrations for :math:`NO_2` and :math:`PM_{2.5}` in each of
.. ipython:: python
- air_quality.pivot_table(values="value", index="location",
- columns="parameter", aggfunc="mean")
+ air_quality.pivot_table(
+ values="value", index="location", columns="parameter", aggfunc="mean"
+ )
In the case of :meth:`~DataFrame.pivot`, the data is only rearranged. When multiple
values need to be aggregated (in this specific case, the values on
@@ -266,9 +268,13 @@ the ``margin`` parameter to ``True``:
.. ipython:: python
- air_quality.pivot_table(values="value", index="location",
- columns="parameter", aggfunc="mean",
- margins=True)
+ air_quality.pivot_table(
+ values="value",
+ index="location",
+ columns="parameter",
+ aggfunc="mean",
+ margins=True,
+ )
.. raw:: html
@@ -345,12 +351,12 @@ The :func:`pandas.melt` method can be defined in more detail:
.. ipython:: python
- no_2 = no2_pivoted.melt(id_vars="date.utc",
- value_vars=["BETR801",
- "FR04014",
- "London Westminster"],
- value_name="NO_2",
- var_name="id_location")
+ no_2 = no2_pivoted.melt(
+ id_vars="date.utc",
+ value_vars=["BETR801", "FR04014", "London Westminster"],
+ value_name="NO_2",
+ var_name="id_location",
+ )
no_2.head()
The result in the same, but in more detail defined:
diff --git a/doc/source/getting_started/intro_tutorials/08_combine_dataframes.rst b/doc/source/getting_started/intro_tutorials/08_combine_dataframes.rst
index d6da9a0aa4f22..be4c284912db4 100644
--- a/doc/source/getting_started/intro_tutorials/08_combine_dataframes.rst
+++ b/doc/source/getting_started/intro_tutorials/08_combine_dataframes.rst
@@ -155,8 +155,7 @@ index. For example:
.. ipython:: python
- air_quality_ = pd.concat([air_quality_pm25, air_quality_no2],
- keys=["PM25", "NO2"])
+ air_quality_ = pd.concat([air_quality_pm25, air_quality_no2], keys=["PM25", "NO2"])
.. ipython:: python
@@ -233,8 +232,7 @@ Add the station coordinates, provided by the stations metadata table, to the cor
.. ipython:: python
- air_quality = pd.merge(air_quality, stations_coord,
- how='left', on='location')
+ air_quality = pd.merge(air_quality, stations_coord, how="left", on="location")
air_quality.head()
Using the :meth:`~pandas.merge` function, for each of the rows in the
diff --git a/doc/source/getting_started/intro_tutorials/09_timeseries.rst b/doc/source/getting_started/intro_tutorials/09_timeseries.rst
index 19351e0e3bc75..598d3514baa15 100644
--- a/doc/source/getting_started/intro_tutorials/09_timeseries.rst
+++ b/doc/source/getting_started/intro_tutorials/09_timeseries.rst
@@ -204,10 +204,9 @@ Plot the typical :math:`NO_2` pattern during the day of our time series of all s
.. ipython:: python
fig, axs = plt.subplots(figsize=(12, 4))
- air_quality.groupby(
- air_quality["datetime"].dt.hour)["value"].mean().plot(kind='bar',
- rot=0,
- ax=axs)
+ air_quality.groupby(air_quality["datetime"].dt.hour)["value"].mean().plot(
+ kind='bar', rot=0, ax=axs
+ )
plt.xlabel("Hour of the day"); # custom x label using matplotlib
@savefig 09_bar_chart.png
plt.ylabel("$NO_2 (µg/m^3)$");
diff --git a/doc/source/getting_started/intro_tutorials/10_text_data.rst b/doc/source/getting_started/intro_tutorials/10_text_data.rst
index 93ad35fb1960b..b7fb99a98d78f 100644
--- a/doc/source/getting_started/intro_tutorials/10_text_data.rst
+++ b/doc/source/getting_started/intro_tutorials/10_text_data.rst
@@ -224,8 +224,7 @@ In the "Sex" column, replace values of "male" by "M" and values of "female" by "
.. ipython:: python
- titanic["Sex_short"] = titanic["Sex"].replace({"male": "M",
- "female": "F"})
+ titanic["Sex_short"] = titanic["Sex"].replace({"male": "M", "female": "F"})
titanic["Sex_short"]
Whereas :meth:`~Series.replace` is not a string method, it provides a convenient way
diff --git a/doc/source/user_guide/10min.rst b/doc/source/user_guide/10min.rst
index c3746cbe777a3..673f8689736f1 100644
--- a/doc/source/user_guide/10min.rst
+++ b/doc/source/user_guide/10min.rst
@@ -43,12 +43,16 @@ Creating a :class:`DataFrame` by passing a dict of objects that can be converted
.. ipython:: python
- df2 = pd.DataFrame({'A': 1.,
- 'B': pd.Timestamp('20130102'),
- 'C': pd.Series(1, index=list(range(4)), dtype='float32'),
- 'D': np.array([3] * 4, dtype='int32'),
- 'E': pd.Categorical(["test", "train", "test", "train"]),
- 'F': 'foo'})
+ df2 = pd.DataFrame(
+ {
+ "A": 1.0,
+ "B": pd.Timestamp("20130102"),
+ "C": pd.Series(1, index=list(range(4)), dtype="float32"),
+ "D": np.array([3] * 4, dtype="int32"),
+ "E": pd.Categorical(["test", "train", "test", "train"]),
+ "F": "foo",
+ }
+ )
df2
The columns of the resulting :class:`DataFrame` have different
@@ -512,12 +516,14 @@ See the :ref:`Grouping section <groupby>`.
.. ipython:: python
- df = pd.DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
- 'foo', 'bar', 'foo', 'foo'],
- 'B': ['one', 'one', 'two', 'three',
- 'two', 'two', 'one', 'three'],
- 'C': np.random.randn(8),
- 'D': np.random.randn(8)})
+ df = pd.DataFrame(
+ {
+ "A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
+ "B": ["one", "one", "two", "three", "two", "two", "one", "three"],
+ "C": np.random.randn(8),
+ "D": np.random.randn(8),
+ }
+ )
df
Grouping and then applying the :meth:`~pandas.core.groupby.GroupBy.sum` function to the resulting
@@ -545,10 +551,14 @@ Stack
.. ipython:: python
- tuples = list(zip(*[['bar', 'bar', 'baz', 'baz',
- 'foo', 'foo', 'qux', 'qux'],
- ['one', 'two', 'one', 'two',
- 'one', 'two', 'one', 'two']]))
+ tuples = list(
+ zip(
+ *[
+ ["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
+ ["one", "two", "one", "two", "one", "two", "one", "two"],
+ ]
+ )
+ )
index = pd.MultiIndex.from_tuples(tuples, names=['first', 'second'])
df = pd.DataFrame(np.random.randn(8, 2), index=index, columns=['A', 'B'])
df2 = df[:4]
@@ -578,11 +588,15 @@ See the section on :ref:`Pivot Tables <reshaping.pivot>`.
.. ipython:: python
- df = pd.DataFrame({'A': ['one', 'one', 'two', 'three'] * 3,
- 'B': ['A', 'B', 'C'] * 4,
- 'C': ['foo', 'foo', 'foo', 'bar', 'bar', 'bar'] * 2,
- 'D': np.random.randn(12),
- 'E': np.random.randn(12)})
+ df = pd.DataFrame(
+ {
+ "A": ["one", "one", "two", "three"] * 3,
+ "B": ["A", "B", "C"] * 4,
+ "C": ["foo", "foo", "foo", "bar", "bar", "bar"] * 2,
+ "D": np.random.randn(12),
+ "E": np.random.randn(12),
+ }
+ )
df
We can produce pivot tables from this data very easily:
@@ -653,8 +667,10 @@ pandas can include categorical data in a :class:`DataFrame`. For full docs, see
.. ipython:: python
- df = pd.DataFrame({"id": [1, 2, 3, 4, 5, 6],
- "raw_grade": ['a', 'b', 'b', 'a', 'a', 'e']})
+ df = pd.DataFrame(
+ {"id": [1, 2, 3, 4, 5, 6], "raw_grade": ["a", "b", "b", "a", "a", "e"]}
+ )
+
Convert the raw grades to a categorical data type.
@@ -674,8 +690,9 @@ Reorder the categories and simultaneously add the missing categories (methods un
.. ipython:: python
- df["grade"] = df["grade"].cat.set_categories(["very bad", "bad", "medium",
- "good", "very good"])
+ df["grade"] = df["grade"].cat.set_categories(
+ ["very bad", "bad", "medium", "good", "very good"]
+ )
df["grade"]
Sorting is per order in the categories, not lexical order.
@@ -705,8 +722,7 @@ We use the standard convention for referencing the matplotlib API:
.. ipython:: python
- ts = pd.Series(np.random.randn(1000),
- index=pd.date_range('1/1/2000', periods=1000))
+ ts = pd.Series(np.random.randn(1000), index=pd.date_range("1/1/2000", periods=1000))
ts = ts.cumsum()
@savefig series_plot_basic.png
@@ -717,8 +733,10 @@ of the columns with labels:
.. ipython:: python
- df = pd.DataFrame(np.random.randn(1000, 4), index=ts.index,
- columns=['A', 'B', 'C', 'D'])
+ df = pd.DataFrame(
+ np.random.randn(1000, 4), index=ts.index, columns=["A", "B", "C", "D"]
+ )
+
df = df.cumsum()
plt.figure()
diff --git a/doc/source/user_guide/sparse.rst b/doc/source/user_guide/sparse.rst
index 35e0e0fb86472..62e35cb994faf 100644
--- a/doc/source/user_guide/sparse.rst
+++ b/doc/source/user_guide/sparse.rst
@@ -303,14 +303,17 @@ The method requires a ``MultiIndex`` with two or more levels.
.. ipython:: python
s = pd.Series([3.0, np.nan, 1.0, 3.0, np.nan, np.nan])
- s.index = pd.MultiIndex.from_tuples([(1, 2, 'a', 0),
- (1, 2, 'a', 1),
- (1, 1, 'b', 0),
- (1, 1, 'b', 1),
- (2, 1, 'b', 0),
- (2, 1, 'b', 1)],
- names=['A', 'B', 'C', 'D'])
- s
+ s.index = pd.MultiIndex.from_tuples(
+ [
+ (1, 2, "a", 0),
+ (1, 2, "a", 1),
+ (1, 1, "b", 0),
+ (1, 1, "b", 1),
+ (2, 1, "b", 0),
+ (2, 1, "b", 1),
+ ],
+ names=["A", "B", "C", "D"],
+ )
ss = s.astype('Sparse')
ss
@@ -318,9 +321,10 @@ In the example below, we transform the ``Series`` to a sparse representation of
.. ipython:: python
- A, rows, columns = ss.sparse.to_coo(row_levels=['A', 'B'],
- column_levels=['C', 'D'],
- sort_labels=True)
+ A, rows, columns = ss.sparse.to_coo(
+ row_levels=["A", "B"], column_levels=["C", "D"], sort_labels=True
+ )
+
A
A.todense()
@@ -331,9 +335,9 @@ Specifying different row and column labels (and not sorting them) yields a diffe
.. ipython:: python
- A, rows, columns = ss.sparse.to_coo(row_levels=['A', 'B', 'C'],
- column_levels=['D'],
- sort_labels=False)
+ A, rows, columns = ss.sparse.to_coo(
+ row_levels=["A", "B", "C"], column_levels=["D"], sort_labels=False
+ )
A
A.todense()
@@ -345,8 +349,7 @@ A convenience method :meth:`Series.sparse.from_coo` is implemented for creating
.. ipython:: python
from scipy import sparse
- A = sparse.coo_matrix(([3.0, 1.0, 2.0], ([1, 0, 0], [0, 2, 3])),
- shape=(3, 4))
+ A = sparse.coo_matrix(([3.0, 1.0, 2.0], ([1, 0, 0], [0, 2, 3])), shape=(3, 4))
A
A.todense()
| ref #36777
| https://api.github.com/repos/pandas-dev/pandas/pulls/36780 | 2020-10-01T17:05:57Z | 2020-10-02T14:33:45Z | 2020-10-02T14:33:45Z | 2020-10-02T14:34:07Z |
CI: troubleshoot UnsatisfiableError in travis-36-cov on 1.1.x | diff --git a/ci/deps/travis-36-cov.yaml b/ci/deps/travis-36-cov.yaml
index c380423c55f19..296d9c39b5250 100644
--- a/ci/deps/travis-36-cov.yaml
+++ b/ci/deps/travis-36-cov.yaml
@@ -6,49 +6,48 @@ dependencies:
- python=3.6.*
# tools
- - cython>=0.29.21
- - pytest>=5.0.1
- - pytest-xdist>=1.21
- - hypothesis>=3.58.0
- - pytest-cov>=2.10.1 # this is only needed in the coverage build, ref: GH 35737
+ # - cython>=0.29.21
+ # - pytest>=5.0.1
+ # - pytest-xdist>=1.21
+ # - hypothesis>=3.58.0
+ # - pytest-cov>=2.10.1 # this is only needed in the coverage build, ref: GH 35737
# pandas dependencies
- - beautifulsoup4
- - botocore>=1.11
- - cython>=0.29.16
- - dask
- - fastparquet>=0.3.2
- - fsspec>=0.7.4
- - gcsfs>=0.6.0
- - geopandas
- - html5lib
- - matplotlib
- - moto
+ # - beautifulsoup4
+ # - botocore>=1.11
+ # - dask
+ # - fastparquet>=0.3.2
+ # - fsspec>=0.7.4
+ # - gcsfs>=0.6.0
+ # - geopandas
+ # - html5lib
+ # - matplotlib
+ # - moto
- nomkl
- - numexpr
+ # - numexpr
- numpy=1.15.*
- - odfpy
- - openpyxl
- - pandas-gbq
- - psycopg2
- - pyarrow>=0.13.0
- - pymysql
- - pytables
- - python-snappy
- - pytz
- - s3fs>=0.4.0
+ # - odfpy
+ # - openpyxl
+ # - pandas-gbq
+ # - psycopg2
+ # - pyarrow>=0.13.0
+ # - pymysql
+ # - pytables
+ # - python-snappy
+ # - pytz
+ # - s3fs>=0.4.0
- scikit-learn
- - scipy
- - sqlalchemy
- - statsmodels
- - xarray
- - xlrd
- - xlsxwriter
- - xlwt
- - pip
- - pip:
- - brotlipy
- - coverage
- - pandas-datareader
- - python-dateutil
- - pyxlsb
+ # - scipy
+ # - sqlalchemy
+ # - statsmodels
+ # - xarray
+ # - xlrd
+ # - xlsxwriter
+ # - xlwt
+ # - pip
+ # - pip:
+ # - brotlipy
+ # - coverage
+ # - pandas-datareader
+ # - python-dateutil
+ # - pyxlsb
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/36779 | 2020-10-01T16:44:58Z | 2020-10-01T17:29:43Z | null | 2020-11-26T10:31:31Z |
ENH: Implement FloatingArray reductions | diff --git a/pandas/core/arrays/floating.py b/pandas/core/arrays/floating.py
index c3710196a8611..a230760ca1abe 100644
--- a/pandas/core/arrays/floating.py
+++ b/pandas/core/arrays/floating.py
@@ -25,8 +25,7 @@
from pandas.core.dtypes.dtypes import register_extension_dtype
from pandas.core.dtypes.missing import isna
-from pandas.core import nanops, ops
-from pandas.core.array_algos import masked_reductions
+from pandas.core import ops
from pandas.core.ops import invalid_comparison
from pandas.core.ops.common import unpack_zerodim_and_defer
from pandas.core.tools.numeric import to_numeric
@@ -452,33 +451,21 @@ def cmp_method(self, other):
name = f"__{op.__name__}__"
return set_function_name(cmp_method, name, cls)
- def _reduce(self, name: str, skipna: bool = True, **kwargs):
- data = self._data
- mask = self._mask
-
- if name in {"sum", "prod", "min", "max"}:
- op = getattr(masked_reductions, name)
- return op(data, mask, skipna=skipna, **kwargs)
-
- # coerce to a nan-aware float if needed
- # (we explicitly use NaN within reductions)
- if self._hasna:
- data = self.to_numpy("float64", na_value=np.nan)
-
- op = getattr(nanops, "nan" + name)
- result = op(data, axis=0, skipna=skipna, mask=mask, **kwargs)
+ def sum(self, skipna=True, min_count=0, **kwargs):
+ nv.validate_sum((), kwargs)
+ return super()._reduce("sum", skipna=skipna, min_count=min_count)
- if np.isnan(result):
- return libmissing.NA
+ def prod(self, skipna=True, min_count=0, **kwargs):
+ nv.validate_prod((), kwargs)
+ return super()._reduce("prod", skipna=skipna, min_count=min_count)
- return result
+ def min(self, skipna=True, **kwargs):
+ nv.validate_min((), kwargs)
+ return super()._reduce("min", skipna=skipna)
- def sum(self, skipna=True, min_count=0, **kwargs):
- nv.validate_sum((), kwargs)
- result = masked_reductions.sum(
- values=self._data, mask=self._mask, skipna=skipna, min_count=min_count
- )
- return result
+ def max(self, skipna=True, **kwargs):
+ nv.validate_max((), kwargs)
+ return super()._reduce("max", skipna=skipna)
def _maybe_mask_result(self, result, mask, other, op_name: str):
"""
diff --git a/pandas/tests/arrays/floating/test_function.py b/pandas/tests/arrays/floating/test_function.py
index 84c650f880541..2767d93741d4c 100644
--- a/pandas/tests/arrays/floating/test_function.py
+++ b/pandas/tests/arrays/floating/test_function.py
@@ -112,8 +112,8 @@ def test_value_counts_empty():
@pytest.mark.parametrize("skipna", [True, False])
@pytest.mark.parametrize("min_count", [0, 4])
-def test_floating_array_sum(skipna, min_count):
- arr = pd.array([1, 2, 3, None], dtype="Float64")
+def test_floating_array_sum(skipna, min_count, dtype):
+ arr = pd.array([1, 2, 3, None], dtype=dtype)
result = arr.sum(skipna=skipna, min_count=min_count)
if skipna and min_count == 0:
assert result == 6.0
@@ -152,3 +152,26 @@ def test_preserve_dtypes(op):
index=pd.Index(["a", "b"], name="A"),
)
tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.parametrize("skipna", [True, False])
+@pytest.mark.parametrize("method", ["min", "max"])
+def test_floating_array_min_max(skipna, method, dtype):
+ arr = pd.array([0.0, 1.0, None], dtype=dtype)
+ func = getattr(arr, method)
+ result = func(skipna=skipna)
+ if skipna:
+ assert result == (0 if method == "min" else 1)
+ else:
+ assert result is pd.NA
+
+
+@pytest.mark.parametrize("skipna", [True, False])
+@pytest.mark.parametrize("min_count", [0, 9])
+def test_floating_array_prod(skipna, min_count, dtype):
+ arr = pd.array([1.0, 2.0, None], dtype=dtype)
+ result = arr.prod(skipna=skipna, min_count=min_count)
+ if skipna and min_count == 0:
+ assert result == 2
+ else:
+ assert result is pd.NA
| Mostly copy / paste from https://github.com/pandas-dev/pandas/pull/36761
@jorisvandenbossche Would this require a whatsnew note? Seems the current v1.2.0 note is fairly high level and doesn't talk about specific methods that are supported. Should we talk about this or leave as-is? | https://api.github.com/repos/pandas-dev/pandas/pulls/36778 | 2020-10-01T15:28:36Z | 2020-10-03T00:58:29Z | 2020-10-03T00:58:29Z | 2020-10-03T05:18:31Z |
Very useful toolkit for pythonUpdate | diff --git a/README.md b/README.md
index da8487d76f4a1..6c8dbebbb22b5 100644
--- a/README.md
+++ b/README.md
@@ -4,7 +4,7 @@
-----------------
-# pandas: powerful Python data analysis toolkit
+# pandas: very useful toolkit fot Python
[](https://pypi.org/project/pandas/)
[](https://anaconda.org/anaconda/pandas/)
[](https://doi.org/10.5281/zenodo.3509134)
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/36776 | 2020-10-01T14:48:55Z | 2020-10-01T14:52:56Z | null | 2020-10-02T09:08:28Z |
scatterplot issue | diff --git a/.github/ISSUE_TEMPLATE/plotscatter.py b/.github/ISSUE_TEMPLATE/plotscatter.py
new file mode 100644
index 0000000000000..279353c0884c6
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/plotscatter.py
@@ -0,0 +1,15 @@
+import matplotlib.pyplot as plt
+import seaborn as sns
+import numpy as np
+import pandas as pd
+
+x = [10,50,40]
+y = "male","female","unknown"
+
+data = [['male', 10], ['female', 50], ['unknown', 40]]
+
+df = pd.DataFrame(data, columns = ['x', 'y'])
+
+
+plt = df.plot.scatter(x='x', y='y')
+plt.set(xlabel="gender", ylabel = "age")
\ No newline at end of file
| added basic idea to add labels to scatter plot | https://api.github.com/repos/pandas-dev/pandas/pulls/36774 | 2020-10-01T13:43:30Z | 2020-10-01T13:56:38Z | null | 2020-10-02T09:08:37Z |
DOC: Fix code block line length | diff --git a/doc/source/getting_started/intro_tutorials/06_calculate_statistics.rst b/doc/source/getting_started/intro_tutorials/06_calculate_statistics.rst
index 7e919777fdf03..6ce98ba5dbd1b 100644
--- a/doc/source/getting_started/intro_tutorials/06_calculate_statistics.rst
+++ b/doc/source/getting_started/intro_tutorials/06_calculate_statistics.rst
@@ -123,7 +123,10 @@ aggregating statistics for given columns can be defined using the
.. ipython:: python
titanic.agg(
- {"Age": ["min", "max", "median", "skew"], "Fare": ["min", "max", "median", "mean"]}
+ {
+ "Age": ["min", "max", "median", "skew"],
+ "Fare": ["min", "max", "median", "mean"],
+ }
)
.. raw:: html
diff --git a/doc/source/user_guide/advanced.rst b/doc/source/user_guide/advanced.rst
index cec777e0f021e..2cd48ac7adb0e 100644
--- a/doc/source/user_guide/advanced.rst
+++ b/doc/source/user_guide/advanced.rst
@@ -304,7 +304,8 @@ whereas a tuple of lists refer to several values within a level:
.. ipython:: python
s = pd.Series(
- [1, 2, 3, 4, 5, 6], index=pd.MultiIndex.from_product([["A", "B"], ["c", "d", "e"]])
+ [1, 2, 3, 4, 5, 6],
+ index=pd.MultiIndex.from_product([["A", "B"], ["c", "d", "e"]]),
)
s.loc[[("A", "c"), ("B", "d")]] # list of tuples
s.loc[(["A", "B"], ["c", "d"])] # tuple of lists
@@ -819,7 +820,9 @@ values **not** in the categories, similarly to how you can reindex **any** panda
.. ipython:: python
- df3 = pd.DataFrame({"A": np.arange(3), "B": pd.Series(list("abc")).astype("category")})
+ df3 = pd.DataFrame(
+ {"A": np.arange(3), "B": pd.Series(list("abc")).astype("category")}
+ )
df3 = df3.set_index("B")
df3
@@ -934,7 +937,9 @@ example, be millisecond offsets.
np.random.randn(5, 2), index=np.arange(5) * 250.0, columns=list("AB")
),
pd.DataFrame(
- np.random.randn(6, 2), index=np.arange(4, 10) * 250.1, columns=list("AB")
+ np.random.randn(6, 2),
+ index=np.arange(4, 10) * 250.1,
+ columns=list("AB"),
),
]
)
diff --git a/doc/source/user_guide/basics.rst b/doc/source/user_guide/basics.rst
index 8c01913e55318..53fabf94e24e0 100644
--- a/doc/source/user_guide/basics.rst
+++ b/doc/source/user_guide/basics.rst
@@ -464,7 +464,10 @@ which we illustrate:
{"A": [1.0, np.nan, 3.0, 5.0, np.nan], "B": [np.nan, 2.0, 3.0, np.nan, 6.0]}
)
df2 = pd.DataFrame(
- {"A": [5.0, 2.0, 4.0, np.nan, 3.0, 7.0], "B": [np.nan, np.nan, 3.0, 4.0, 6.0, 8.0]}
+ {
+ "A": [5.0, 2.0, 4.0, np.nan, 3.0, 7.0],
+ "B": [np.nan, np.nan, 3.0, 4.0, 6.0, 8.0],
+ }
)
df1
df2
@@ -712,7 +715,10 @@ Similarly, you can get the most frequently occurring value(s), i.e. the mode, of
s5 = pd.Series([1, 1, 3, 3, 3, 5, 5, 7, 7, 7])
s5.mode()
df5 = pd.DataFrame(
- {"A": np.random.randint(0, 7, size=50), "B": np.random.randint(-10, 15, size=50)}
+ {
+ "A": np.random.randint(0, 7, size=50),
+ "B": np.random.randint(-10, 15, size=50),
+ }
)
df5.mode()
@@ -1192,7 +1198,9 @@ to :ref:`merging/joining functionality <merging>`:
.. ipython:: python
- s = pd.Series(["six", "seven", "six", "seven", "six"], index=["a", "b", "c", "d", "e"])
+ s = pd.Series(
+ ["six", "seven", "six", "seven", "six"], index=["a", "b", "c", "d", "e"]
+ )
t = pd.Series({"six": 6.0, "seven": 7.0})
s
s.map(t)
@@ -1494,7 +1502,9 @@ labels).
df = pd.DataFrame(
{"x": [1, 2, 3, 4, 5, 6], "y": [10, 20, 30, 40, 50, 60]},
- index=pd.MultiIndex.from_product([["a", "b", "c"], [1, 2]], names=["let", "num"]),
+ index=pd.MultiIndex.from_product(
+ [["a", "b", "c"], [1, 2]], names=["let", "num"]
+ ),
)
df
df.rename_axis(index={"let": "abc"})
@@ -1803,7 +1813,9 @@ used to sort a pandas object by its index levels.
}
)
- unsorted_df = df.reindex(index=["a", "d", "c", "b"], columns=["three", "two", "one"])
+ unsorted_df = df.reindex(
+ index=["a", "d", "c", "b"], columns=["three", "two", "one"]
+ )
unsorted_df
# DataFrame
@@ -1849,7 +1861,9 @@ to use to determine the sorted order.
.. ipython:: python
- df1 = pd.DataFrame({"one": [2, 1, 1, 1], "two": [1, 3, 2, 4], "three": [5, 4, 3, 2]})
+ df1 = pd.DataFrame(
+ {"one": [2, 1, 1, 1], "two": [1, 3, 2, 4], "three": [5, 4, 3, 2]}
+ )
df1.sort_values(by="two")
The ``by`` parameter can take a list of column names, e.g.:
@@ -1994,7 +2008,9 @@ all levels to ``by``.
.. ipython:: python
- df1.columns = pd.MultiIndex.from_tuples([("a", "one"), ("a", "two"), ("b", "three")])
+ df1.columns = pd.MultiIndex.from_tuples(
+ [("a", "one"), ("a", "two"), ("b", "three")]
+ )
df1.sort_values(by=("a", "two"))
@@ -2245,7 +2261,11 @@ to the correct type.
import datetime
df = pd.DataFrame(
- [[1, 2], ["a", "b"], [datetime.datetime(2016, 3, 2), datetime.datetime(2016, 3, 2)]]
+ [
+ [1, 2],
+ ["a", "b"],
+ [datetime.datetime(2016, 3, 2), datetime.datetime(2016, 3, 2)],
+ ]
)
df = df.T
df
diff --git a/doc/source/user_guide/categorical.rst b/doc/source/user_guide/categorical.rst
index 67f11bbb45b02..5c43de05fb5b9 100644
--- a/doc/source/user_guide/categorical.rst
+++ b/doc/source/user_guide/categorical.rst
@@ -513,7 +513,11 @@ The ordering of the categorical is determined by the ``categories`` of that colu
dfs = pd.DataFrame(
{
- "A": pd.Categorical(list("bbeebbaa"), categories=["e", "a", "b"], ordered=True),
+ "A": pd.Categorical(
+ list("bbeebbaa"),
+ categories=["e", "a", "b"],
+ ordered=True,
+ ),
"B": [1, 2, 1, 2, 2, 1, 2, 1],
}
)
@@ -642,7 +646,13 @@ Groupby will also show "unused" categories:
df.groupby("cats").mean()
cats2 = pd.Categorical(["a", "a", "b", "b"], categories=["a", "b", "c"])
- df2 = pd.DataFrame({"cats": cats2, "B": ["c", "d", "c", "d"], "values": [1, 2, 3, 4]})
+ df2 = pd.DataFrame(
+ {
+ "cats": cats2,
+ "B": ["c", "d", "c", "d"],
+ "values": [1, 2, 3, 4],
+ }
+ )
df2.groupby(["cats", "B"]).mean()
@@ -1115,7 +1125,11 @@ You can use ``fillna`` to handle missing values before applying a function.
.. ipython:: python
df = pd.DataFrame(
- {"a": [1, 2, 3, 4], "b": ["a", "b", "c", "d"], "cats": pd.Categorical([1, 2, 3, 2])}
+ {
+ "a": [1, 2, 3, 4],
+ "b": ["a", "b", "c", "d"],
+ "cats": pd.Categorical([1, 2, 3, 2]),
+ }
)
df.apply(lambda row: type(row["cats"]), axis=1)
df.apply(lambda col: col.dtype, axis=0)
diff --git a/doc/source/user_guide/computation.rst b/doc/source/user_guide/computation.rst
index 2f6ac6b06d85e..75fb3380821d8 100644
--- a/doc/source/user_guide/computation.rst
+++ b/doc/source/user_guide/computation.rst
@@ -787,7 +787,11 @@ can even be omitted:
.. ipython:: python
- covs = df[["B", "C", "D"]].rolling(window=50).cov(df[["A", "B", "C"]], pairwise=True)
+ covs = (
+ df[["B", "C", "D"]]
+ .rolling(window=50)
+ .cov(df[["A", "B", "C"]], pairwise=True)
+ )
covs.loc["2002-09-22":]
.. ipython:: python
diff --git a/doc/source/user_guide/cookbook.rst b/doc/source/user_guide/cookbook.rst
index 214b8a680fa7e..939acf10d6c0b 100644
--- a/doc/source/user_guide/cookbook.rst
+++ b/doc/source/user_guide/cookbook.rst
@@ -266,7 +266,9 @@ New columns
.. ipython:: python
- df = pd.DataFrame({"AAA": [1, 1, 1, 2, 2, 2, 3, 3], "BBB": [2, 1, 3, 4, 5, 1, 2, 3]})
+ df = pd.DataFrame(
+ {"AAA": [1, 1, 1, 2, 2, 2, 3, 3], "BBB": [2, 1, 3, 4, 5, 1, 2, 3]}
+ )
df
Method 1 : idxmin() to get the index of the minimums
@@ -327,7 +329,9 @@ Arithmetic
.. ipython:: python
- cols = pd.MultiIndex.from_tuples([(x, y) for x in ["A", "B", "C"] for y in ["O", "I"]])
+ cols = pd.MultiIndex.from_tuples(
+ [(x, y) for x in ["A", "B", "C"] for y in ["O", "I"]]
+ )
df = pd.DataFrame(np.random.randn(2, 6), index=["n", "m"], columns=cols)
df
df = df.div(df["C"], level=1)
@@ -566,7 +570,9 @@ Unlike agg, apply's callable is passed a sub-DataFrame which gives you access to
.. ipython:: python
- df = pd.DataFrame({"Color": "Red Red Red Blue".split(), "Value": [100, 150, 50, 50]})
+ df = pd.DataFrame(
+ {"Color": "Red Red Red Blue".split(), "Value": [100, 150, 50, 50]}
+ )
df
df["Counts"] = df.groupby(["Color"]).transform(len)
df
@@ -648,7 +654,10 @@ Create a list of dataframes, split using a delineation based on logic included i
dfs = list(
zip(
*df.groupby(
- (1 * (df["Case"] == "B")).cumsum().rolling(window=3, min_periods=1).median()
+ (1 * (df["Case"] == "B"))
+ .cumsum()
+ .rolling(window=3, min_periods=1)
+ .median()
)
)
)[-1]
@@ -740,7 +749,18 @@ The :ref:`Pivot <reshaping.pivot>` docs.
"yes",
],
"Passed": ["yes" if x > 50 else "no" for x in grades],
- "Employed": [True, True, True, False, False, False, False, True, True, False],
+ "Employed": [
+ True,
+ True,
+ True,
+ False,
+ False,
+ False,
+ False,
+ True,
+ True,
+ False,
+ ],
"Grade": grades,
}
)
@@ -791,7 +811,9 @@ Apply
return pd.Series(aList)
- df_orgz = pd.concat({ind: row.apply(SeriesFromSubList) for ind, row in df.iterrows()})
+ df_orgz = pd.concat(
+ {ind: row.apply(SeriesFromSubList) for ind, row in df.iterrows()}
+ )
df_orgz
`Rolling apply with a DataFrame returning a Series
@@ -1162,7 +1184,12 @@ Option 1: pass rows explicitly to skip rows
from io import StringIO
pd.read_csv(
- StringIO(data), sep=";", skiprows=[11, 12], index_col=0, parse_dates=True, header=10
+ StringIO(data),
+ sep=";",
+ skiprows=[11, 12],
+ index_col=0,
+ parse_dates=True,
+ header=10,
)
Option 2: read column names and then data
diff --git a/doc/source/user_guide/groupby.rst b/doc/source/user_guide/groupby.rst
index 6427cea6fa510..e8866daa9d99f 100644
--- a/doc/source/user_guide/groupby.rst
+++ b/doc/source/user_guide/groupby.rst
@@ -267,7 +267,9 @@ the length of the ``groups`` dict, so it is largely just a convenience:
height = np.random.normal(60, 10, size=n)
time = pd.date_range("1/1/2000", periods=n)
gender = np.random.choice(["male", "female"], size=n)
- df = pd.DataFrame({"height": height, "weight": weight, "gender": gender}, index=time)
+ df = pd.DataFrame(
+ {"height": height, "weight": weight, "gender": gender}, index=time
+ )
.. ipython:: python
@@ -767,7 +769,10 @@ For example, suppose we wished to standardize the data within each group:
ts.head()
ts.tail()
- transformed = ts.groupby(lambda x: x.year).transform(lambda x: (x - x.mean()) / x.std())
+ transformed = ts.groupby(lambda x: x.year).transform(
+ lambda x: (x - x.mean()) / x.std()
+ )
+
We would expect the result to now have mean 0 and standard deviation 1 within
each group, which we can easily check:
diff --git a/doc/source/user_guide/io.rst b/doc/source/user_guide/io.rst
index ae22ee836cd8c..0b24ff61d87b8 100644
--- a/doc/source/user_guide/io.rst
+++ b/doc/source/user_guide/io.rst
@@ -986,7 +986,12 @@ Note that ``infer_datetime_format`` is sensitive to ``dayfirst``. With
.. ipython:: python
# Try to infer the format for the index column
- df = pd.read_csv("foo.csv", index_col=0, parse_dates=True, infer_datetime_format=True)
+ df = pd.read_csv(
+ "foo.csv",
+ index_col=0,
+ parse_dates=True,
+ infer_datetime_format=True,
+ )
df
.. ipython:: python
@@ -1046,9 +1051,19 @@ writing to a file). For example:
val = "0.3066101993807095471566981359501369297504425048828125"
data = "a,b,c\n1,2,{0}".format(val)
- abs(pd.read_csv(StringIO(data), engine="c", float_precision=None)["c"][0] - float(val))
abs(
- pd.read_csv(StringIO(data), engine="c", float_precision="high")["c"][0] - float(val)
+ pd.read_csv(
+ StringIO(data),
+ engine="c",
+ float_precision=None,
+ )["c"][0] - float(val)
+ )
+ abs(
+ pd.read_csv(
+ StringIO(data),
+ engine="c",
+ float_precision="high",
+ )["c"][0] - float(val)
)
abs(
pd.read_csv(StringIO(data), engine="c", float_precision="round_trip")["c"][0]
@@ -2517,7 +2532,12 @@ columns to strings.
.. code-block:: python
url_mcc = "https://en.wikipedia.org/wiki/Mobile_country_code"
- dfs = pd.read_html(url_mcc, match="Telekom Albania", header=0, converters={"MNC": str})
+ dfs = pd.read_html(
+ url_mcc,
+ match="Telekom Albania",
+ header=0,
+ converters={"MNC": str},
+ )
Use some combination of the above:
@@ -3570,7 +3590,12 @@ HDFStore will by default not drop rows that are all missing. This behavior can b
.. ipython:: python
- df_with_missing = pd.DataFrame({"col1": [0, np.nan, 2], "col2": [1, np.nan, np.nan]})
+ df_with_missing = pd.DataFrame(
+ {
+ "col1": [0, np.nan, 2],
+ "col2": [1, np.nan, np.nan],
+ }
+ )
df_with_missing
df_with_missing.to_hdf("file.h5", "df_with_missing", format="table", mode="w")
@@ -3944,7 +3969,8 @@ specified in the format: ``<float>(<unit>)``, where float may be signed (and fra
{
"A": pd.Timestamp("20130101"),
"B": [
- pd.Timestamp("20130101") + timedelta(days=i, seconds=10) for i in range(10)
+ pd.Timestamp("20130101") + timedelta(days=i, seconds=10)
+ for i in range(10)
],
}
)
@@ -4241,7 +4267,11 @@ results.
store.select("df2_mt")
# as a multiple
- store.select_as_multiple(["df1_mt", "df2_mt"], where=["A>0", "B>0"], selector="df1_mt")
+ store.select_as_multiple(
+ ["df1_mt", "df2_mt"],
+ where=["A>0", "B>0"],
+ selector="df1_mt",
+ )
Delete from a table
@@ -4797,8 +4827,16 @@ Read only certain columns of a parquet file.
.. ipython:: python
- result = pd.read_parquet("example_fp.parquet", engine="fastparquet", columns=["a", "b"])
- result = pd.read_parquet("example_pa.parquet", engine="pyarrow", columns=["a", "b"])
+ result = pd.read_parquet(
+ "example_fp.parquet",
+ engine="fastparquet",
+ columns=["a", "b"],
+ )
+ result = pd.read_parquet(
+ "example_pa.parquet",
+ engine="pyarrow",
+ columns=["a", "b"],
+ )
result.dtypes
@@ -5176,7 +5214,11 @@ to pass to :func:`pandas.to_datetime`:
.. code-block:: python
pd.read_sql_table("data", engine, parse_dates={"Date": "%Y-%m-%d"})
- pd.read_sql_table("data", engine, parse_dates={"Date": {"format": "%Y-%m-%d %H:%M:%S"}})
+ pd.read_sql_table(
+ "data",
+ engine,
+ parse_dates={"Date": {"format": "%Y-%m-%d %H:%M:%S"}},
+ )
You can check if a table exists using :func:`~pandas.io.sql.has_table`
@@ -5593,7 +5635,11 @@ avoid converting categorical columns into ``pd.Categorical``:
.. code-block:: python
- df = pd.read_spss("spss_data.sav", usecols=["foo", "bar"], convert_categoricals=False)
+ df = pd.read_spss(
+ "spss_data.sav",
+ usecols=["foo", "bar"],
+ convert_categoricals=False,
+ )
More information about the SAV and ZSAV file formats is available here_.
diff --git a/doc/source/user_guide/merging.rst b/doc/source/user_guide/merging.rst
index 8dbfc261e6fa8..da16aaf5b3a56 100644
--- a/doc/source/user_guide/merging.rst
+++ b/doc/source/user_guide/merging.rst
@@ -1065,7 +1065,9 @@ join key), using ``join`` may be more convenient. Here is a simple example:
.. ipython:: python
- result = pd.merge(left, right, left_on="key", right_index=True, how="left", sort=False)
+ result = pd.merge(
+ left, right, left_on="key", right_index=True, how="left", sort=False
+ )
.. ipython:: python
:suppress:
@@ -1196,7 +1198,9 @@ the left argument, as in this example:
left = pd.DataFrame({"v1": range(12)}, index=leftindex)
left
- rightindex = pd.MultiIndex.from_product([list("abc"), list("xy")], names=["abc", "xy"])
+ rightindex = pd.MultiIndex.from_product(
+ [list("abc"), list("xy")], names=["abc", "xy"]
+ )
right = pd.DataFrame({"v2": [100 * i for i in range(1, 7)]}, index=rightindex)
right
@@ -1210,7 +1214,9 @@ done using the following code.
leftindex = pd.MultiIndex.from_tuples(
[("K0", "X0"), ("K0", "X1"), ("K1", "X2")], names=["key", "X"]
)
- left = pd.DataFrame({"A": ["A0", "A1", "A2"], "B": ["B0", "B1", "B2"]}, index=leftindex)
+ left = pd.DataFrame(
+ {"A": ["A0", "A1", "A2"], "B": ["B0", "B1", "B2"]}, index=leftindex
+ )
rightindex = pd.MultiIndex.from_tuples(
[("K0", "Y0"), ("K1", "Y1"), ("K2", "Y2"), ("K2", "Y3")], names=["key", "Y"]
@@ -1376,7 +1382,9 @@ one object from values for matching indices in the other. Here is an example:
.. ipython:: python
- df1 = pd.DataFrame([[np.nan, 3.0, 5.0], [-4.6, np.nan, np.nan], [np.nan, 7.0, np.nan]])
+ df1 = pd.DataFrame(
+ [[np.nan, 3.0, 5.0], [-4.6, np.nan, np.nan], [np.nan, 7.0, np.nan]]
+ )
df2 = pd.DataFrame([[-42.6, np.nan, -8.2], [-5.0, 1.6, 4]], index=[1, 2])
For this, use the :meth:`~DataFrame.combine_first` method:
diff --git a/doc/source/user_guide/missing_data.rst b/doc/source/user_guide/missing_data.rst
index 7eb377694910b..e6d06aa6bd1a0 100644
--- a/doc/source/user_guide/missing_data.rst
+++ b/doc/source/user_guide/missing_data.rst
@@ -400,7 +400,10 @@ You can also interpolate with a DataFrame:
.. ipython:: python
df = pd.DataFrame(
- {"A": [1, 2.1, np.nan, 4.7, 5.6, 6.8], "B": [0.25, np.nan, np.nan, 4, 12.2, 14.4]}
+ {
+ "A": [1, 2.1, np.nan, 4.7, 5.6, 6.8],
+ "B": [0.25, np.nan, np.nan, 4, 12.2, 14.4],
+ }
)
df
df.interpolate()
diff --git a/doc/source/user_guide/reshaping.rst b/doc/source/user_guide/reshaping.rst
index 2061185b25416..77cf43b2e2b19 100644
--- a/doc/source/user_guide/reshaping.rst
+++ b/doc/source/user_guide/reshaping.rst
@@ -238,7 +238,13 @@ calling ``sort_index``, of course). Here is a more complex example:
.. ipython:: python
columns = pd.MultiIndex.from_tuples(
- [("A", "cat"), ("B", "dog"), ("B", "cat"), ("A", "dog")], names=["exp", "animal"]
+ [
+ ("A", "cat"),
+ ("B", "dog"),
+ ("B", "cat"),
+ ("A", "dog"),
+ ],
+ names=["exp", "animal"],
)
index = pd.MultiIndex.from_product(
[("bar", "baz", "foo", "qux"), ("one", "two")], names=["first", "second"]
@@ -426,7 +432,12 @@ We can produce pivot tables from this data very easily:
pd.pivot_table(df, values="D", index=["A", "B"], columns=["C"])
pd.pivot_table(df, values="D", index=["B"], columns=["A", "C"], aggfunc=np.sum)
- pd.pivot_table(df, values=["D", "E"], index=["B"], columns=["A", "C"], aggfunc=np.sum)
+ pd.pivot_table(
+ df, values=["D", "E"],
+ index=["B"],
+ columns=["A", "C"],
+ aggfunc=np.sum,
+ )
The result object is a ``DataFrame`` having potentially hierarchical indexes on the
rows and columns. If the ``values`` column name is not given, the pivot table
@@ -800,14 +811,26 @@ parameter.
.. ipython:: python
- df.pivot_table(values="val0", index="row", columns="col", aggfunc="mean", fill_value=0)
+ df.pivot_table(
+ values="val0",
+ index="row",
+ columns="col",
+ aggfunc="mean",
+ fill_value=0,
+ )
Also note that we can pass in other aggregation functions as well. For example,
we can also pass in ``sum``.
.. ipython:: python
- df.pivot_table(values="val0", index="row", columns="col", aggfunc="sum", fill_value=0)
+ df.pivot_table(
+ values="val0",
+ index="row",
+ columns="col",
+ aggfunc="sum",
+ fill_value=0,
+ )
Another aggregation we can do is calculate the frequency in which the columns
and rows occur together a.k.a. "cross tabulation". To do this, we can pass
@@ -825,21 +848,36 @@ We can also perform multiple aggregations. For example, to perform both a
.. ipython:: python
- df.pivot_table(values="val0", index="row", columns="col", aggfunc=["mean", "sum"])
+ df.pivot_table(
+ values="val0",
+ index="row",
+ columns="col",
+ aggfunc=["mean", "sum"],
+ )
Note to aggregate over multiple value columns, we can pass in a list to the
``values`` parameter.
.. ipython:: python
- df.pivot_table(values=["val0", "val1"], index="row", columns="col", aggfunc=["mean"])
+ df.pivot_table(
+ values=["val0", "val1"],
+ index="row",
+ columns="col",
+ aggfunc=["mean"],
+ )
Note to subdivide over multiple columns we can pass in a list to the
``columns`` parameter.
.. ipython:: python
- df.pivot_table(values=["val0"], index="row", columns=["item", "col"], aggfunc=["mean"])
+ df.pivot_table(
+ values=["val0"],
+ index="row",
+ columns=["item", "col"],
+ aggfunc=["mean"],
+ )
.. _reshaping.explode:
diff --git a/doc/source/user_guide/text.rst b/doc/source/user_guide/text.rst
index 2ada09117273d..2b27d37904599 100644
--- a/doc/source/user_guide/text.rst
+++ b/doc/source/user_guide/text.rst
@@ -261,7 +261,8 @@ i.e., from the end of the string to the beginning of the string:
.. ipython:: python
s3 = pd.Series(
- ["A", "B", "C", "Aaba", "Baca", "", np.nan, "CABA", "dog", "cat"], dtype="string"
+ ["A", "B", "C", "Aaba", "Baca", "", np.nan, "CABA", "dog", "cat"],
+ dtype="string",
)
s3
s3.str.replace("^.a|dog", "XX-XX ", case=False)
@@ -515,7 +516,10 @@ DataFrame with one column per group.
.. ipython:: python
- pd.Series(["a1", "b2", "c3"], dtype="string").str.extract(r"([ab])(\d)", expand=False)
+ pd.Series(
+ ["a1", "b2", "c3"],
+ dtype="string",
+ ).str.extract(r"([ab])(\d)", expand=False)
Elements that do not match return a row filled with ``NaN``. Thus, a
Series of messy strings can be "converted" into a like-indexed Series
@@ -536,7 +540,10 @@ and optional groups like
.. ipython:: python
- pd.Series(["a1", "b2", "3"], dtype="string").str.extract(r"([ab])?(\d)", expand=False)
+ pd.Series(
+ ["a1", "b2", "3"],
+ dtype="string",
+ ).str.extract(r"([ab])?(\d)", expand=False)
can also be used. Note that any capture group names in the regular
expression will be used for column names; otherwise capture group
@@ -655,19 +662,28 @@ You can check whether elements contain a pattern:
.. ipython:: python
pattern = r"[0-9][a-z]"
- pd.Series(["1", "2", "3a", "3b", "03c", "4dx"], dtype="string").str.contains(pattern)
+ pd.Series(
+ ["1", "2", "3a", "3b", "03c", "4dx"],
+ dtype="string",
+ ).str.contains(pattern)
Or whether elements match a pattern:
.. ipython:: python
- pd.Series(["1", "2", "3a", "3b", "03c", "4dx"], dtype="string").str.match(pattern)
+ pd.Series(
+ ["1", "2", "3a", "3b", "03c", "4dx"],
+ dtype="string",
+ ).str.match(pattern)
.. versionadded:: 1.1.0
.. ipython:: python
- pd.Series(["1", "2", "3a", "3b", "03c", "4dx"], dtype="string").str.fullmatch(pattern)
+ pd.Series(
+ ["1", "2", "3a", "3b", "03c", "4dx"],
+ dtype="string",
+ ).str.fullmatch(pattern)
.. note::
diff --git a/doc/source/user_guide/timedeltas.rst b/doc/source/user_guide/timedeltas.rst
index cb265d34229dd..0b4ddaaa8a42a 100644
--- a/doc/source/user_guide/timedeltas.rst
+++ b/doc/source/user_guide/timedeltas.rst
@@ -409,7 +409,10 @@ Similarly to other of the datetime-like indices, ``DatetimeIndex`` and ``PeriodI
.. ipython:: python
- s = pd.Series(np.arange(100), index=pd.timedelta_range("1 days", periods=100, freq="h"))
+ s = pd.Series(
+ np.arange(100),
+ index=pd.timedelta_range("1 days", periods=100, freq="h"),
+ )
s
Selections work similarly, with coercion on string-likes and slices:
diff --git a/doc/source/user_guide/timeseries.rst b/doc/source/user_guide/timeseries.rst
index be2c67521dc5d..9fbd02df50d10 100644
--- a/doc/source/user_guide/timeseries.rst
+++ b/doc/source/user_guide/timeseries.rst
@@ -317,7 +317,9 @@ which can be specified. These are computed from the starting point specified by
.. ipython:: python
- pd.to_datetime([1349720105, 1349806505, 1349892905, 1349979305, 1350065705], unit="s")
+ pd.to_datetime(
+ [1349720105, 1349806505, 1349892905, 1349979305, 1350065705], unit="s"
+ )
pd.to_datetime(
[1349720105100, 1349720105200, 1349720105300, 1349720105400, 1349720105500],
@@ -707,7 +709,9 @@ If the timestamp string is treated as a slice, it can be used to index ``DataFra
.. ipython:: python
:okwarning:
- dft_minute = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=series_minute.index)
+ dft_minute = pd.DataFrame(
+ {"a": [1, 2, 3], "b": [4, 5, 6]}, index=series_minute.index
+ )
dft_minute["2011-12-31 23"]
@@ -748,10 +752,11 @@ With no defaults.
.. ipython:: python
dft[
- datetime.datetime(2013, 1, 1, 10, 12, 0): datetime.datetime(2013, 2, 28, 10, 12, 0)
+ datetime.datetime(2013, 1, 1, 10, 12, 0): datetime.datetime(
+ 2013, 2, 28, 10, 12, 0
+ )
]
-
Truncating & fancy indexing
~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -1036,8 +1041,15 @@ As an interesting example, let's look at Egypt where a Friday-Saturday weekend i
# They also observe International Workers' Day so let's
# add that for a couple of years
- holidays = ["2012-05-01", datetime.datetime(2013, 5, 1), np.datetime64("2014-05-01")]
- bday_egypt = pd.offsets.CustomBusinessDay(holidays=holidays, weekmask=weekmask_egypt)
+ holidays = [
+ "2012-05-01",
+ datetime.datetime(2013, 5, 1),
+ np.datetime64("2014-05-01"),
+ ]
+ bday_egypt = pd.offsets.CustomBusinessDay(
+ holidays=holidays,
+ weekmask=weekmask_egypt,
+ )
dt = datetime.datetime(2013, 4, 30)
dt + 2 * bday_egypt
@@ -1417,7 +1429,12 @@ An example of how holidays and holiday calendars are defined:
rules = [
USMemorialDay,
Holiday("July 4th", month=7, day=4, observance=nearest_workday),
- Holiday("Columbus Day", month=10, day=1, offset=pd.DateOffset(weekday=MO(2))),
+ Holiday(
+ "Columbus Day",
+ month=10,
+ day=1,
+ offset=pd.DateOffset(weekday=MO(2)),
+ ),
]
@@ -2279,7 +2296,12 @@ To return ``dateutil`` time zone objects, append ``dateutil/`` before the string
rng_dateutil.tz
# dateutil - utc special case
- rng_utc = pd.date_range("3/6/2012 00:00", periods=3, freq="D", tz=dateutil.tz.tzutc())
+ rng_utc = pd.date_range(
+ "3/6/2012 00:00",
+ periods=3,
+ freq="D",
+ tz=dateutil.tz.tzutc(),
+ )
rng_utc.tz
.. versionadded:: 0.25.0
@@ -2287,7 +2309,12 @@ To return ``dateutil`` time zone objects, append ``dateutil/`` before the string
.. ipython:: python
# datetime.timezone
- rng_utc = pd.date_range("3/6/2012 00:00", periods=3, freq="D", tz=datetime.timezone.utc)
+ rng_utc = pd.date_range(
+ "3/6/2012 00:00",
+ periods=3,
+ freq="D",
+ tz=datetime.timezone.utc,
+ )
rng_utc.tz
Note that the ``UTC`` time zone is a special case in ``dateutil`` and should be constructed explicitly
@@ -2440,10 +2467,18 @@ control over how they are handled.
.. ipython:: python
pd.Timestamp(
- datetime.datetime(2019, 10, 27, 1, 30, 0, 0), tz="dateutil/Europe/London", fold=0
+ datetime.datetime(2019, 10, 27, 1, 30, 0, 0),
+ tz="dateutil/Europe/London",
+ fold=0,
)
pd.Timestamp(
- year=2019, month=10, day=27, hour=1, minute=30, tz="dateutil/Europe/London", fold=1
+ year=2019,
+ month=10,
+ day=27,
+ hour=1,
+ minute=30,
+ tz="dateutil/Europe/London",
+ fold=1,
)
.. _timeseries.timezone_ambiguous:
diff --git a/doc/source/user_guide/visualization.rst b/doc/source/user_guide/visualization.rst
index a6c3d9814b03d..6ad7ad9657e30 100644
--- a/doc/source/user_guide/visualization.rst
+++ b/doc/source/user_guide/visualization.rst
@@ -1453,7 +1453,11 @@ Here is an example of one way to easily plot group means with standard deviation
)
df3 = pd.DataFrame(
- {"data1": [3, 2, 4, 3, 2, 4, 3, 2], "data2": [6, 5, 7, 5, 4, 5, 6, 5]}, index=ix3
+ {
+ "data1": [3, 2, 4, 3, 2, 4, 3, 2],
+ "data2": [6, 5, 7, 5, 4, 5, 6, 5],
+ },
+ index=ix3,
)
# Group by index labels and take the means and standard deviations
diff --git a/doc/source/whatsnew/v0.10.1.rst b/doc/source/whatsnew/v0.10.1.rst
index d71a0d5ca68cd..611ac2021fcec 100644
--- a/doc/source/whatsnew/v0.10.1.rst
+++ b/doc/source/whatsnew/v0.10.1.rst
@@ -180,7 +180,9 @@ combined result, by using ``where`` on a selector table.
store.select("df2_mt")
# as a multiple
- store.select_as_multiple(["df1_mt", "df2_mt"], where=["A>0", "B>0"], selector="df1_mt")
+ store.select_as_multiple(
+ ["df1_mt", "df2_mt"], where=["A>0", "B>0"], selector="df1_mt"
+ )
.. ipython:: python
:suppress:
diff --git a/doc/source/whatsnew/v0.13.1.rst b/doc/source/whatsnew/v0.13.1.rst
index 1215786b4cccc..249b9555b7fd4 100644
--- a/doc/source/whatsnew/v0.13.1.rst
+++ b/doc/source/whatsnew/v0.13.1.rst
@@ -101,7 +101,9 @@ Output formatting enhancements
.. ipython:: python
- df = pd.DataFrame([pd.Timestamp("20010101"), pd.Timestamp("20040601")], columns=["age"])
+ df = pd.DataFrame(
+ [pd.Timestamp("20010101"), pd.Timestamp("20040601")], columns=["age"]
+ )
df["today"] = pd.Timestamp("20130419")
df["diff"] = df["today"] - df["age"]
df
@@ -206,7 +208,9 @@ Enhancements
.. code-block:: python
# Try to infer the format for the index column
- df = pd.read_csv("foo.csv", index_col=0, parse_dates=True, infer_datetime_format=True)
+ df = pd.read_csv(
+ "foo.csv", index_col=0, parse_dates=True, infer_datetime_format=True
+ )
- ``date_format`` and ``datetime_format`` keywords can now be specified when writing to ``excel``
files (:issue:`4133`)
diff --git a/doc/source/whatsnew/v0.14.1.rst b/doc/source/whatsnew/v0.14.1.rst
index 78fd182ea86c3..a8f8955c3c1b9 100644
--- a/doc/source/whatsnew/v0.14.1.rst
+++ b/doc/source/whatsnew/v0.14.1.rst
@@ -124,7 +124,9 @@ Enhancements
.. ipython:: python
- rng = pd.date_range("3/6/2012 00:00", periods=10, freq="D", tz="dateutil/Europe/London")
+ rng = pd.date_range(
+ "3/6/2012 00:00", periods=10, freq="D", tz="dateutil/Europe/London"
+ )
rng.tz
See :ref:`the docs <timeseries.timezone>`.
diff --git a/doc/source/whatsnew/v0.17.0.rst b/doc/source/whatsnew/v0.17.0.rst
index 1658f877f5523..d8f39a7d6e3c0 100644
--- a/doc/source/whatsnew/v0.17.0.rst
+++ b/doc/source/whatsnew/v0.17.0.rst
@@ -786,7 +786,9 @@ Previous behavior:
.. ipython:: python
- df_with_missing = pd.DataFrame({"col1": [0, np.nan, 2], "col2": [1, np.nan, np.nan]})
+ df_with_missing = pd.DataFrame(
+ {"col1": [0, np.nan, 2], "col2": [1, np.nan, np.nan]}
+ )
df_with_missing
diff --git a/doc/source/whatsnew/v0.19.0.rst b/doc/source/whatsnew/v0.19.0.rst
index 08ccc1565125f..2ac7b0f54361b 100644
--- a/doc/source/whatsnew/v0.19.0.rst
+++ b/doc/source/whatsnew/v0.19.0.rst
@@ -1091,7 +1091,9 @@ Previously, most ``Index`` classes returned ``np.ndarray``, and ``DatetimeIndex`
.. ipython:: python
pd.Index([1, 2, 3]).unique()
- pd.DatetimeIndex(["2011-01-01", "2011-01-02", "2011-01-03"], tz="Asia/Tokyo").unique()
+ pd.DatetimeIndex(
+ ["2011-01-01", "2011-01-02", "2011-01-03"], tz="Asia/Tokyo"
+ ).unique()
.. _whatsnew_0190.api.multiindex:
diff --git a/doc/source/whatsnew/v0.8.0.rst b/doc/source/whatsnew/v0.8.0.rst
index 8a84630a28b34..b34c2a5c6a07c 100644
--- a/doc/source/whatsnew/v0.8.0.rst
+++ b/doc/source/whatsnew/v0.8.0.rst
@@ -178,7 +178,9 @@ types. For example, ``'kde'`` is a new option:
.. ipython:: python
- s = pd.Series(np.concatenate((np.random.randn(1000), np.random.randn(1000) * 0.5 + 3)))
+ s = pd.Series(
+ np.concatenate((np.random.randn(1000), np.random.randn(1000) * 0.5 + 3))
+ )
plt.figure()
s.hist(density=True, alpha=0.2)
s.plot(kind="kde")
diff --git a/setup.cfg b/setup.cfg
index e125eea226b10..1c1d3b744c574 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -33,7 +33,7 @@ exclude =
env # exclude asv benchmark environments from linting
[flake8-rst]
-max-line-length = 88
+max-line-length = 84
bootstrap =
import numpy as np
import pandas as pd
| https://github.com/pandas-dev/pandas/pull/36734#discussion_r498058234 | https://api.github.com/repos/pandas-dev/pandas/pulls/36773 | 2020-10-01T13:26:00Z | 2020-10-07T02:40:48Z | 2020-10-07T02:40:48Z | 2020-10-07T02:46:43Z |
Update README.md | diff --git a/README.md b/README.md
index a2f2f1c04442a..da8487d76f4a1 100644
--- a/README.md
+++ b/README.md
@@ -32,32 +32,32 @@ its way towards this goal.
Here are just a few of the things that pandas does well:
- Easy handling of [**missing data**][missing-data] (represented as
- `NaN`, `NA`, or `NaT`) in floating point as well as non-floating point data
+ `NaN`, `NA`, or `NaT`) in floating point as well as non-floating point data;
- Size mutability: columns can be [**inserted and
deleted**][insertion-deletion] from DataFrame and higher dimensional
- objects
+ objects;
- Automatic and explicit [**data alignment**][alignment]: objects can
be explicitly aligned to a set of labels, or the user can simply
ignore the labels and let `Series`, `DataFrame`, etc. automatically
- align the data for you in computations
+ align the data for you in computations;
- Powerful, flexible [**group by**][groupby] functionality to perform
split-apply-combine operations on data sets, for both aggregating
- and transforming data
+ and transforming data;
- Make it [**easy to convert**][conversion] ragged,
differently-indexed data in other Python and NumPy data structures
- into DataFrame objects
+ into DataFrame objects;
- Intelligent label-based [**slicing**][slicing], [**fancy
indexing**][fancy-indexing], and [**subsetting**][subsetting] of
- large data sets
+ large data sets;
- Intuitive [**merging**][merging] and [**joining**][joining] data
- sets
+ sets;
- Flexible [**reshaping**][reshape] and [**pivoting**][pivot-table] of
- data sets
+ data sets;
- [**Hierarchical**][mi] labeling of axes (possible to have multiple
- labels per tick)
+ labels per tick);
- Robust IO tools for loading data from [**flat files**][flat-files]
(CSV and delimited), [**Excel files**][excel], [**databases**][db],
- and saving/loading data from the ultrafast [**HDF5 format**][hdfstore]
+ and saving/loading data from the ultrafast [**HDF5 format**][hdfstore];
- [**Time series**][timeseries]-specific functionality: date range
generation and frequency conversion, moving window statistics,
date shifting and lagging.
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/36772 | 2020-10-01T12:49:56Z | 2020-10-01T14:29:47Z | 2020-10-01T14:29:47Z | 2020-10-01T14:40:05Z |
BUG: Timedelta drops decimals if precision is greater than nanoseconds | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index f751a91cecf19..fea647b919e30 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -410,6 +410,7 @@ Timedelta
- Bug in :class:`TimedeltaIndex`, :class:`Series`, and :class:`DataFrame` floor-division with ``timedelta64`` dtypes and ``NaT`` in the denominator (:issue:`35529`)
- Bug in parsing of ISO 8601 durations in :class:`Timedelta`, :meth:`pd.to_datetime` (:issue:`37159`, fixes :issue:`29773` and :issue:`36204`)
- Bug in :func:`to_timedelta` with a read-only array incorrectly raising (:issue:`34857`)
+- Bug in :class:`Timedelta` incorrectly truncating to sub-second portion of a string input when it has precision higher than nanoseconds (:issue:`36738`)
Timezones
^^^^^^^^^
diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx
index 29e8c58055f9e..e4b19d844dcab 100644
--- a/pandas/_libs/tslibs/timedeltas.pyx
+++ b/pandas/_libs/tslibs/timedeltas.pyx
@@ -405,9 +405,11 @@ cdef inline int64_t parse_timedelta_string(str ts) except? -1:
m = 10**(3 -len(frac)) * 1000 * 1000
elif len(frac) > 3 and len(frac) <= 6:
m = 10**(6 -len(frac)) * 1000
- else:
+ elif len(frac) > 6 and len(frac) <= 9:
m = 10**(9 -len(frac))
-
+ else:
+ m = 1
+ frac = frac[:9]
r = <int64_t>int(''.join(frac)) * m
result += timedelta_as_neg(r, neg)
@@ -1143,6 +1145,9 @@ class Timedelta(_Timedelta):
Notes
-----
The ``.value`` attribute is always in ns.
+
+ If the precision is higher than nanoseconds, the precision of the duration is
+ truncated to nanoseconds.
"""
def __new__(cls, object value=_no_input, unit=None, **kwargs):
diff --git a/pandas/core/tools/timedeltas.py b/pandas/core/tools/timedeltas.py
index e8faebd6b2542..6a9fd7a542a44 100644
--- a/pandas/core/tools/timedeltas.py
+++ b/pandas/core/tools/timedeltas.py
@@ -66,6 +66,11 @@ def to_timedelta(arg, unit=None, errors="raise"):
to_datetime : Convert argument to datetime.
convert_dtypes : Convert dtypes.
+ Notes
+ -----
+ If the precision is higher than nanoseconds, the precision of the duration is
+ truncated to nanoseconds for string inputs.
+
Examples
--------
Parsing a single string to a Timedelta:
diff --git a/pandas/tests/tools/test_to_timedelta.py b/pandas/tests/tools/test_to_timedelta.py
index 5be7e81df53f2..585ad4a7fab51 100644
--- a/pandas/tests/tools/test_to_timedelta.py
+++ b/pandas/tests/tools/test_to_timedelta.py
@@ -210,3 +210,20 @@ def test_to_timedelta_nullable_int64_dtype(self):
result = to_timedelta(Series([1, None], dtype="Int64"), unit="days")
tm.assert_series_equal(result, expected)
+
+ @pytest.mark.parametrize(
+ ("input", "expected"),
+ [
+ ("8:53:08.71800000001", "8:53:08.718"),
+ ("8:53:08.718001", "8:53:08.718001"),
+ ("8:53:08.7180000001", "8:53:08.7180000001"),
+ ("-8:53:08.71800000001", "-8:53:08.718"),
+ ("8:53:08.7180000089", "8:53:08.718000008"),
+ ],
+ )
+ @pytest.mark.parametrize("func", [pd.Timedelta, pd.to_timedelta])
+ def test_to_timedelta_precision_over_nanos(self, input, expected, func):
+ # GH: 36738
+ expected = pd.Timedelta(expected)
+ result = func(input)
+ assert result == expected
| - [x] closes #36738
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
I am not quite sure, if we should round to nanoseconds or cut if off after the 9th decimal. I implemented a simple cut off, but would be happy to change if rounding is better.
I would also add a note to the user guide if the future behavior is clear | https://api.github.com/repos/pandas-dev/pandas/pulls/36771 | 2020-10-01T12:09:11Z | 2020-11-15T17:31:47Z | 2020-11-15T17:31:47Z | 2020-11-15T17:32:28Z |
CI: troubleshoot travis ci on 1.1.x | diff --git a/ci/deps/travis-36-cov.yaml b/ci/deps/travis-36-cov.yaml
index c380423c55f19..8c8db106af05c 100644
--- a/ci/deps/travis-36-cov.yaml
+++ b/ci/deps/travis-36-cov.yaml
@@ -15,7 +15,6 @@ dependencies:
# pandas dependencies
- beautifulsoup4
- botocore>=1.11
- - cython>=0.29.16
- dask
- fastparquet>=0.3.2
- fsspec>=0.7.4
diff --git a/ci/deps/travis-36-locale.yaml b/ci/deps/travis-36-locale.yaml
index 21176054ae0d7..31281ce0aa243 100644
--- a/ci/deps/travis-36-locale.yaml
+++ b/ci/deps/travis-36-locale.yaml
@@ -27,6 +27,7 @@ dependencies:
- numpy
- openpyxl
- pandas-gbq=0.12.0
+ - google-cloud-bigquery==1.21.0
- psycopg2=2.6.2
- pyarrow>=0.13.0 # GH #35813
- pymysql=0.7.11
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/36770 | 2020-10-01T12:07:02Z | 2020-10-01T19:19:16Z | 2020-10-01T19:19:16Z | 2020-10-01T19:19:23Z |
BUG: Index sortlevel ascending add type checking #32334 | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index e810fc0239b40..bfe7a0489b10a 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -352,7 +352,7 @@ Indexing
- Bug in :meth:`PeriodIndex.get_loc` incorrectly raising ``ValueError`` on non-datelike strings instead of ``KeyError``, causing similar errors in :meth:`Series.__geitem__`, :meth:`Series.__contains__`, and :meth:`Series.loc.__getitem__` (:issue:`34240`)
- Bug in :meth:`Index.sort_values` where, when empty values were passed, the method would break by trying to compare missing values instead of pushing them to the end of the sort order. (:issue:`35584`)
- Bug in :meth:`Index.get_indexer` and :meth:`Index.get_indexer_non_unique` where int64 arrays are returned instead of intp. (:issue:`36359`)
--
+- Bug in :meth:`DataFrame.sort_index` where parameter ascending passed as a list on a single level index gives wrong result. (:issue:`32334`)
Missing
^^^^^^^
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 8ee09d8ad9be3..ff3d8bf05f9a5 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -1515,6 +1515,20 @@ def sortlevel(self, level=None, ascending=True, sort_remaining=None):
-------
Index
"""
+ if not isinstance(ascending, (list, bool)):
+ raise TypeError(
+ "ascending must be a single bool value or"
+ "a list of bool values of length 1"
+ )
+
+ if isinstance(ascending, list):
+ if len(ascending) != 1:
+ raise TypeError("ascending must be a list of bool values of length 1")
+ ascending = ascending[0]
+
+ if not isinstance(ascending, bool):
+ raise TypeError("ascending must be a bool value")
+
return self.sort_values(return_indexer=True, ascending=ascending)
def _get_level_values(self, level):
diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py
index 77585f4003fe9..8db1bcc84bfa6 100644
--- a/pandas/tests/indexes/test_base.py
+++ b/pandas/tests/indexes/test_base.py
@@ -2222,6 +2222,31 @@ def test_contains_method_removed(self, index):
with pytest.raises(AttributeError, match=msg):
index.contains(1)
+ def test_sortlevel(self):
+ index = pd.Index([5, 4, 3, 2, 1])
+ with pytest.raises(Exception, match="ascending must be a single bool value or"):
+ index.sortlevel(ascending="True")
+
+ with pytest.raises(
+ Exception, match="ascending must be a list of bool values of length 1"
+ ):
+ index.sortlevel(ascending=[True, True])
+
+ with pytest.raises(Exception, match="ascending must be a bool value"):
+ index.sortlevel(ascending=["True"])
+
+ expected = pd.Index([1, 2, 3, 4, 5])
+ result = index.sortlevel(ascending=[True])
+ tm.assert_index_equal(result[0], expected)
+
+ expected = pd.Index([1, 2, 3, 4, 5])
+ result = index.sortlevel(ascending=True)
+ tm.assert_index_equal(result[0], expected)
+
+ expected = pd.Index([5, 4, 3, 2, 1])
+ result = index.sortlevel(ascending=False)
+ tm.assert_index_equal(result[0], expected)
+
class TestMixedIntIndex(Base):
# Mostly the tests from common.py for which the results differ
| - [x] closes #32334
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
Add the check of ascending parameter in the sortlevel method, only allow bool value and single bool value list
cc @Dr-Irv
| https://api.github.com/repos/pandas-dev/pandas/pulls/36767 | 2020-10-01T09:22:49Z | 2020-10-01T18:32:25Z | 2020-10-01T18:32:25Z | 2020-10-05T05:57:39Z |
BUG: fix inconsistent col spacing in info | diff --git a/pandas/io/formats/info.py b/pandas/io/formats/info.py
index e8e41d4325103..9812ca39e2656 100644
--- a/pandas/io/formats/info.py
+++ b/pandas/io/formats/info.py
@@ -288,7 +288,11 @@ def _verbose_repr(
len_column = len(pprint_thing(column_head))
space = max(max_col, len_column) + col_space
- max_id = len(pprint_thing(col_count))
+ # GH #36765
+ # add one space in max_id because there is a one-space padding
+ # in front of the number
+ # this allows maintain two spaces gap between columns
+ max_id = len(pprint_thing(col_count)) + 1
len_id = len(pprint_thing(id_head))
space_num = max(max_id, len_id) + col_space
diff --git a/pandas/tests/io/formats/test_info.py b/pandas/tests/io/formats/test_info.py
index 7000daeb9b575..d98530b5435e7 100644
--- a/pandas/tests/io/formats/test_info.py
+++ b/pandas/tests/io/formats/test_info.py
@@ -87,7 +87,7 @@ def test_info_verbose():
frame.info(verbose=True, buf=buf)
res = buf.getvalue()
- header = " # Column Dtype \n--- ------ ----- "
+ header = " # Column Dtype \n--- ------ ----- "
assert header in res
frame.info(verbose=True, buf=buf)
@@ -101,6 +101,64 @@ def test_info_verbose():
assert line.startswith(line_nr)
+@pytest.mark.parametrize(
+ "size, header_exp, separator_exp, first_line_exp, last_line_exp",
+ [
+ (
+ 4,
+ " # Column Non-Null Count Dtype ",
+ "--- ------ -------------- ----- ",
+ " 0 0 3 non-null float64",
+ " 3 3 3 non-null float64",
+ ),
+ (
+ 11,
+ " # Column Non-Null Count Dtype ",
+ "--- ------ -------------- ----- ",
+ " 0 0 3 non-null float64",
+ " 10 10 3 non-null float64",
+ ),
+ (
+ 101,
+ " # Column Non-Null Count Dtype ",
+ "--- ------ -------------- ----- ",
+ " 0 0 3 non-null float64",
+ " 100 100 3 non-null float64",
+ ),
+ (
+ 1001,
+ " # Column Non-Null Count Dtype ",
+ "--- ------ -------------- ----- ",
+ " 0 0 3 non-null float64",
+ " 1000 1000 3 non-null float64",
+ ),
+ (
+ 10001,
+ " # Column Non-Null Count Dtype ",
+ "--- ------ -------------- ----- ",
+ " 0 0 3 non-null float64",
+ " 10000 10000 3 non-null float64",
+ ),
+ ],
+)
+def test_info_verbose_with_counts_spacing(
+ size, header_exp, separator_exp, first_line_exp, last_line_exp
+):
+ """Test header column, spacer, first line and last line in verbose mode."""
+ frame = DataFrame(np.random.randn(3, size))
+ buf = StringIO()
+ frame.info(verbose=True, null_counts=True, buf=buf)
+ all_lines = buf.getvalue().splitlines()
+ # Here table would contain only header, separator and table lines
+ # dframe repr, index summary, memory usage and dtypes are excluded
+ table = all_lines[3:-2]
+ header, separator, first_line, *rest, last_line = table
+ assert header == header_exp
+ assert separator == separator_exp
+ assert first_line == first_line_exp
+ assert last_line == last_line_exp
+
+
def test_info_memory():
# https://github.com/pandas-dev/pandas/issues/21056
df = DataFrame({"a": Series([1, 2], dtype="i8")})
| - [x] closes #36765
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
| https://api.github.com/repos/pandas-dev/pandas/pulls/36766 | 2020-10-01T07:52:29Z | 2020-10-07T15:08:18Z | 2020-10-07T15:08:18Z | 2020-10-07T15:08:18Z |
DOC: Fix PR09 errors in several files | diff --git a/pandas/core/computation/eval.py b/pandas/core/computation/eval.py
index 630606b4d8111..913f135b449f3 100644
--- a/pandas/core/computation/eval.py
+++ b/pandas/core/computation/eval.py
@@ -212,7 +212,8 @@ def eval(
truediv : bool, optional
Whether to use true division, like in Python >= 3.
- deprecated:: 1.0.0
+
+ .. deprecated:: 1.0.0
local_dict : dict or None, optional
A dictionary of local variables, taken from locals() by default.
diff --git a/pandas/core/flags.py b/pandas/core/flags.py
index 15966d8ddce2a..6a09bfa3bd082 100644
--- a/pandas/core/flags.py
+++ b/pandas/core/flags.py
@@ -10,7 +10,7 @@ class Flags:
Parameters
----------
obj : Series or DataFrame
- The object these flags are associated with
+ The object these flags are associated with.
allows_duplicate_labels : bool, default True
Whether to allow duplicate labels in this object. By default,
duplicate labels are permitted. Setting this to ``False`` will
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 9b2540a1ce043..75fdeb122a074 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -2337,7 +2337,7 @@ def to_parquet(
be parsed by ``fsspec``, e.g., starting "s3://", "gcs://". An error
will be raised if providing this argument with a local path or
a file-like buffer. See the fsspec and backend storage implementation
- docs for the set of allowed keys and values
+ docs for the set of allowed keys and values.
.. versionadded:: 1.2.0
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 6f0aa70625c1d..04e1fc91c5fd4 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -2253,7 +2253,7 @@ def to_json(
be parsed by ``fsspec``, e.g., starting "s3://", "gcs://". An error
will be raised if providing this argument with a local path or
a file-like buffer. See the fsspec and backend storage implementation
- docs for the set of allowed keys and values
+ docs for the set of allowed keys and values.
.. versionadded:: 1.2.0
@@ -2777,7 +2777,7 @@ def to_pickle(
be parsed by ``fsspec``, e.g., starting "s3://", "gcs://". An error
will be raised if providing this argument with a local path or
a file-like buffer. See the fsspec and backend storage implementation
- docs for the set of allowed keys and values
+ docs for the set of allowed keys and values.
.. versionadded:: 1.2.0
@@ -3286,7 +3286,7 @@ def to_csv(
be parsed by ``fsspec``, e.g., starting "s3://", "gcs://". An error
will be raised if providing this argument with a local path or
a file-like buffer. See the fsspec and backend storage implementation
- docs for the set of allowed keys and values
+ docs for the set of allowed keys and values.
.. versionadded:: 1.2.0
@@ -9195,7 +9195,7 @@ def shift(
extend the index when shifting and preserve the original data.
If `freq` is specified as "infer" then it will be inferred from
the freq or inferred_freq attributes of the index. If neither of
- those attributes exist, a ValueError is thrown
+ those attributes exist, a ValueError is thrown.
axis : {{0 or 'index', 1 or 'columns', None}}, default None
Shift direction.
fill_value : object, optional
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index f1a61f433fc51..887f50f8dbcd5 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -285,7 +285,7 @@ class providing the base-class of operations.
.. versionchanged:: 1.1.0
*args
- Positional arguments to pass to func
+ Positional arguments to pass to func.
engine : str, default None
* ``'cython'`` : Runs the function through C-extensions from cython.
* ``'numba'`` : Runs the function through JIT compiled code from numba.
@@ -394,7 +394,7 @@ class providing the base-class of operations.
.. versionchanged:: 1.1.0
*args
- Positional arguments to pass to func
+ Positional arguments to pass to func.
engine : str, default None
* ``'cython'`` : Runs the function through C-extensions from cython.
* ``'numba'`` : Runs the function through JIT compiled code from numba.
diff --git a/pandas/core/series.py b/pandas/core/series.py
index d2c702d924136..f2beea5bc7409 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -1458,7 +1458,7 @@ def to_markdown(
be parsed by ``fsspec``, e.g., starting "s3://", "gcs://". An error
will be raised if providing this argument with a local path or
a file-like buffer. See the fsspec and backend storage implementation
- docs for the set of allowed keys and values
+ docs for the set of allowed keys and values.
.. versionadded:: 1.2.0
diff --git a/pandas/io/json/_json.py b/pandas/io/json/_json.py
index 51bcb4acddd7e..ef684469dffbb 100644
--- a/pandas/io/json/_json.py
+++ b/pandas/io/json/_json.py
@@ -525,7 +525,7 @@ def read_json(
be parsed by ``fsspec``, e.g., starting "s3://", "gcs://". An error
will be raised if providing this argument with a local path or
a file-like buffer. See the fsspec and backend storage implementation
- docs for the set of allowed keys and values
+ docs for the set of allowed keys and values.
.. versionadded:: 1.2.0
diff --git a/pandas/io/pickle.py b/pandas/io/pickle.py
index 655deb5ca3779..80baa6f78ddd7 100644
--- a/pandas/io/pickle.py
+++ b/pandas/io/pickle.py
@@ -49,7 +49,7 @@ def to_pickle(
be parsed by ``fsspec``, e.g., starting "s3://", "gcs://". An error
will be raised if providing this argument with a local path or
a file-like buffer. See the fsspec and backend storage implementation
- docs for the set of allowed keys and values
+ docs for the set of allowed keys and values.
.. versionadded:: 1.2.0
@@ -146,7 +146,7 @@ def read_pickle(
be parsed by ``fsspec``, e.g., starting "s3://", "gcs://". An error
will be raised if providing this argument with a local path or
a file-like buffer. See the fsspec and backend storage implementation
- docs for the set of allowed keys and values
+ docs for the set of allowed keys and values.
.. versionadded:: 1.2.0
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index d62480baed71e..d0ea327a65a1d 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -615,8 +615,8 @@ def keys(self, include: str = "pandas") -> List[str]:
----------
include : str, default 'pandas'
- When kind equals 'pandas' return pandas objects
- When kind equals 'native' return native HDF5 Table objects
+ When kind equals 'pandas' return pandas objects.
+ When kind equals 'native' return native HDF5 Table objects.
.. versionadded:: 1.1.0
| - [x] closes #36764
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
As far as I can see, there are some PR09 pandas docstring errors. So with this PR I will resolve them. There are more commits soon.
```
PR09
Parameter description should finish with "."
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/36763 | 2020-10-01T06:13:25Z | 2020-10-02T17:34:28Z | 2020-10-02T17:34:27Z | 2020-10-03T01:43:39Z |
TST: insert 'match' to bare pytest raises in pandas/tests/indexing/te… | diff --git a/pandas/tests/indexing/test_chaining_and_caching.py b/pandas/tests/indexing/test_chaining_and_caching.py
index 1254f1f217a2e..1241d394d7936 100644
--- a/pandas/tests/indexing/test_chaining_and_caching.py
+++ b/pandas/tests/indexing/test_chaining_and_caching.py
@@ -155,10 +155,11 @@ def test_detect_chained_assignment(self):
)
assert df._is_copy is None
- with pytest.raises(com.SettingWithCopyError):
+ msg = "A value is trying to be set on a copy of a slice from a DataFrame"
+ with pytest.raises(com.SettingWithCopyError, match=msg):
df["A"][0] = -5
- with pytest.raises(com.SettingWithCopyError):
+ with pytest.raises(com.SettingWithCopyError, match=msg):
df["A"][1] = np.nan
assert df["A"]._is_copy is None
@@ -171,7 +172,7 @@ def test_detect_chained_assignment(self):
}
)
- with pytest.raises(com.SettingWithCopyError):
+ with pytest.raises(com.SettingWithCopyError, match=msg):
df.loc[0]["A"] = -5
# Doc example
@@ -183,17 +184,17 @@ def test_detect_chained_assignment(self):
)
assert df._is_copy is None
- with pytest.raises(com.SettingWithCopyError):
+ with pytest.raises(com.SettingWithCopyError, match=msg):
indexer = df.a.str.startswith("o")
df[indexer]["c"] = 42
expected = DataFrame({"A": [111, "bbb", "ccc"], "B": [1, 2, 3]})
df = DataFrame({"A": ["aaa", "bbb", "ccc"], "B": [1, 2, 3]})
- with pytest.raises(com.SettingWithCopyError):
+ with pytest.raises(com.SettingWithCopyError, match=msg):
df["A"][0] = 111
- with pytest.raises(com.SettingWithCopyError):
+ with pytest.raises(com.SettingWithCopyError, match=msg):
df.loc[0]["A"] = 111
df.loc[0, "A"] = 111
@@ -293,7 +294,7 @@ def random_text(nobs=100):
df = DataFrame(np.arange(0, 9), columns=["count"])
df["group"] = "b"
- with pytest.raises(com.SettingWithCopyError):
+ with pytest.raises(com.SettingWithCopyError, match=msg):
df.iloc[0:5]["group"] = "a"
# Mixed type setting but same dtype & changing dtype
@@ -306,13 +307,13 @@ def random_text(nobs=100):
)
)
- with pytest.raises(com.SettingWithCopyError):
+ with pytest.raises(com.SettingWithCopyError, match=msg):
df.loc[2]["D"] = "foo"
- with pytest.raises(com.SettingWithCopyError):
+ with pytest.raises(com.SettingWithCopyError, match=msg):
df.loc[2]["C"] = "foo"
- with pytest.raises(com.SettingWithCopyError):
+ with pytest.raises(com.SettingWithCopyError, match=msg):
df["C"][2] = "foo"
def test_setting_with_copy_bug(self):
@@ -340,8 +341,10 @@ def test_detect_chained_assignment_warnings_errors(self):
with option_context("chained_assignment", "warn"):
with tm.assert_produces_warning(com.SettingWithCopyWarning):
df.loc[0]["A"] = 111
+
+ msg = "A value is trying to be set on a copy of a slice from a DataFrame"
with option_context("chained_assignment", "raise"):
- with pytest.raises(com.SettingWithCopyError):
+ with pytest.raises(com.SettingWithCopyError, match=msg):
df.loc[0]["A"] = 111
def test_detect_chained_assignment_warnings_filter_and_dupe_cols(self):
| …st_chaining_and_caching.py
- [ ] ref https://github.com/pandas-dev/pandas/issues/30999
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/36762 | 2020-10-01T05:47:52Z | 2020-10-01T10:54:27Z | 2020-10-01T10:54:26Z | 2020-10-01T10:54:48Z |
ENH: Implement IntegerArray reductions | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index e810fc0239b40..a686a1f2371d6 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -168,6 +168,7 @@ Other enhancements
- ``Styler`` now allows direct CSS class name addition to individual data cells (:issue:`36159`)
- :meth:`Rolling.mean()` and :meth:`Rolling.sum()` use Kahan summation to calculate the mean to avoid numerical problems (:issue:`10319`, :issue:`11645`, :issue:`13254`, :issue:`32761`, :issue:`36031`)
- :meth:`DatetimeIndex.searchsorted`, :meth:`TimedeltaIndex.searchsorted`, :meth:`PeriodIndex.searchsorted`, and :meth:`Series.searchsorted` with datetimelike dtypes will now try to cast string arguments (listlike and scalar) to the matching datetimelike type (:issue:`36346`)
+- Added methods :meth:`IntegerArray.prod`, :meth:`IntegerArray.min`, and :meth:`IntegerArray.max` (:issue:`33790`)
.. _whatsnew_120.api_breaking.python:
diff --git a/pandas/core/arrays/integer.py b/pandas/core/arrays/integer.py
index 04c4c73954671..af521a8efacc7 100644
--- a/pandas/core/arrays/integer.py
+++ b/pandas/core/arrays/integer.py
@@ -25,7 +25,6 @@
from pandas.core.dtypes.missing import isna
from pandas.core import ops
-from pandas.core.array_algos import masked_reductions
from pandas.core.ops import invalid_comparison
from pandas.core.ops.common import unpack_zerodim_and_defer
from pandas.core.tools.numeric import to_numeric
@@ -550,10 +549,19 @@ def cmp_method(self, other):
def sum(self, skipna=True, min_count=0, **kwargs):
nv.validate_sum((), kwargs)
- result = masked_reductions.sum(
- values=self._data, mask=self._mask, skipna=skipna, min_count=min_count
- )
- return result
+ return super()._reduce("sum", skipna=skipna, min_count=min_count)
+
+ def prod(self, skipna=True, min_count=0, **kwargs):
+ nv.validate_prod((), kwargs)
+ return super()._reduce("prod", skipna=skipna, min_count=min_count)
+
+ def min(self, skipna=True, **kwargs):
+ nv.validate_min((), kwargs)
+ return super()._reduce("min", skipna=skipna)
+
+ def max(self, skipna=True, **kwargs):
+ nv.validate_max((), kwargs)
+ return super()._reduce("max", skipna=skipna)
def _maybe_mask_result(self, result, mask, other, op_name: str):
"""
diff --git a/pandas/tests/arrays/integer/test_function.py b/pandas/tests/arrays/integer/test_function.py
index a81434339fdae..8f64c9c0900f1 100644
--- a/pandas/tests/arrays/integer/test_function.py
+++ b/pandas/tests/arrays/integer/test_function.py
@@ -115,8 +115,9 @@ def test_value_counts_empty():
@pytest.mark.parametrize("skipna", [True, False])
@pytest.mark.parametrize("min_count", [0, 4])
-def test_integer_array_sum(skipna, min_count):
- arr = pd.array([1, 2, 3, None], dtype="Int64")
+def test_integer_array_sum(skipna, min_count, any_nullable_int_dtype):
+ dtype = any_nullable_int_dtype
+ arr = pd.array([1, 2, 3, None], dtype=dtype)
result = arr.sum(skipna=skipna, min_count=min_count)
if skipna and min_count == 0:
assert result == 6
@@ -124,6 +125,31 @@ def test_integer_array_sum(skipna, min_count):
assert result is pd.NA
+@pytest.mark.parametrize("skipna", [True, False])
+@pytest.mark.parametrize("method", ["min", "max"])
+def test_integer_array_min_max(skipna, method, any_nullable_int_dtype):
+ dtype = any_nullable_int_dtype
+ arr = pd.array([0, 1, None], dtype=dtype)
+ func = getattr(arr, method)
+ result = func(skipna=skipna)
+ if skipna:
+ assert result == (0 if method == "min" else 1)
+ else:
+ assert result is pd.NA
+
+
+@pytest.mark.parametrize("skipna", [True, False])
+@pytest.mark.parametrize("min_count", [0, 9])
+def test_integer_array_prod(skipna, min_count, any_nullable_int_dtype):
+ dtype = any_nullable_int_dtype
+ arr = pd.array([1, 2, None], dtype=dtype)
+ result = arr.prod(skipna=skipna, min_count=min_count)
+ if skipna and min_count == 0:
+ assert result == 2
+ else:
+ assert result is pd.NA
+
+
@pytest.mark.parametrize(
"values, expected", [([1, 2, 3], 6), ([1, 2, 3, None], 6), ([None], 0)]
)
| - [x] ref #33790
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
I'm not sure if this is the "best" way to implement these reductions (see linked issue), but in any case I figure this is better than nothing since we can get these reductions pretty much for free. As far as I can tell FloatingArray can use the same machinery.
(I'm not able to pass on kwargs here because the masked reductions don't take them all, e.g. `axis`, and various tests break as a result.) | https://api.github.com/repos/pandas-dev/pandas/pulls/36761 | 2020-10-01T02:46:29Z | 2020-10-02T21:45:18Z | 2020-10-02T21:45:18Z | 2020-10-02T21:46:32Z |
BUG: inconsistent name-retention in Series ops | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index 2fe878897b2e7..aa59467947986 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -337,6 +337,7 @@ Numeric
- Bug in :class:`Series` where two :class:`Series` each have a :class:`DatetimeIndex` with different timezones having those indexes incorrectly changed when performing arithmetic operations (:issue:`33671`)
- Bug in :meth:`pd._testing.assert_almost_equal` was incorrect for complex numeric types (:issue:`28235`)
- Bug in :meth:`DataFrame.__rmatmul__` error handling reporting transposed shapes (:issue:`21581`)
+- Bug in :class:`Series` flex arithmetic methods where the result when operating with a ``list``, ``tuple`` or ``np.ndarray`` would have an incorrect name (:issue:`36760`)
- Bug in :class:`IntegerArray` multiplication with ``timedelta`` and ``np.timedelta64`` objects (:issue:`36870`)
Conversion
diff --git a/pandas/conftest.py b/pandas/conftest.py
index 998c45d82fb4d..ebb24c184d9a4 100644
--- a/pandas/conftest.py
+++ b/pandas/conftest.py
@@ -677,6 +677,43 @@ def all_arithmetic_operators(request):
return request.param
+@pytest.fixture(
+ params=[
+ operator.add,
+ ops.radd,
+ operator.sub,
+ ops.rsub,
+ operator.mul,
+ ops.rmul,
+ operator.truediv,
+ ops.rtruediv,
+ operator.floordiv,
+ ops.rfloordiv,
+ operator.mod,
+ ops.rmod,
+ operator.pow,
+ ops.rpow,
+ operator.eq,
+ operator.ne,
+ operator.lt,
+ operator.le,
+ operator.gt,
+ operator.ge,
+ operator.and_,
+ ops.rand_,
+ operator.xor,
+ ops.rxor,
+ operator.or_,
+ ops.ror_,
+ ]
+)
+def all_binary_operators(request):
+ """
+ Fixture for operator and roperator arithmetic, comparison, and logical ops.
+ """
+ return request.param
+
+
@pytest.fixture(
params=[
operator.add,
diff --git a/pandas/core/ops/__init__.py b/pandas/core/ops/__init__.py
index 36e3a0e37c1ae..cca0e40698ba2 100644
--- a/pandas/core/ops/__init__.py
+++ b/pandas/core/ops/__init__.py
@@ -310,9 +310,8 @@ def arith_method_SERIES(cls, op, special):
@unpack_zerodim_and_defer(op_name)
def wrapper(left, right):
-
- left, right = _align_method_SERIES(left, right)
res_name = get_op_result_name(left, right)
+ left, right = _align_method_SERIES(left, right)
lvalues = extract_array(left, extract_numpy=True)
rvalues = extract_array(right, extract_numpy=True)
@@ -361,8 +360,8 @@ def bool_method_SERIES(cls, op, special):
@unpack_zerodim_and_defer(op_name)
def wrapper(self, other):
- self, other = _align_method_SERIES(self, other, align_asobject=True)
res_name = get_op_result_name(self, other)
+ self, other = _align_method_SERIES(self, other, align_asobject=True)
lvalues = extract_array(self, extract_numpy=True)
rvalues = extract_array(other, extract_numpy=True)
@@ -385,13 +384,17 @@ def flex_wrapper(self, other, level=None, fill_value=None, axis=0):
if axis is not None:
self._get_axis_number(axis)
+ res_name = get_op_result_name(self, other)
+
if isinstance(other, ABCSeries):
return self._binop(other, op, level=level, fill_value=fill_value)
elif isinstance(other, (np.ndarray, list, tuple)):
if len(other) != len(self):
raise ValueError("Lengths must be equal")
other = self._constructor(other, self.index)
- return self._binop(other, op, level=level, fill_value=fill_value)
+ result = self._binop(other, op, level=level, fill_value=fill_value)
+ result.name = res_name
+ return result
else:
if fill_value is not None:
self = self.fillna(fill_value)
diff --git a/pandas/tests/series/test_arithmetic.py b/pandas/tests/series/test_arithmetic.py
index f30246ff12fac..09181201beee4 100644
--- a/pandas/tests/series/test_arithmetic.py
+++ b/pandas/tests/series/test_arithmetic.py
@@ -699,3 +699,45 @@ def test_datetime_understood(self):
result = series - offset
expected = pd.Series(pd.to_datetime(["2011-12-26", "2011-12-27", "2011-12-28"]))
tm.assert_series_equal(result, expected)
+
+
+@pytest.mark.parametrize(
+ "names",
+ [
+ ("foo", None, None),
+ ("Egon", "Venkman", None),
+ ("NCC1701D", "NCC1701D", "NCC1701D"),
+ ],
+)
+@pytest.mark.parametrize("box", [list, tuple, np.array, pd.Index, pd.Series, pd.array])
+@pytest.mark.parametrize("flex", [True, False])
+def test_series_ops_name_retention(flex, box, names, all_binary_operators):
+ # GH#33930 consistent name renteiton
+ op = all_binary_operators
+
+ if op is ops.rfloordiv and box in [list, tuple]:
+ pytest.xfail("op fails because of inconsistent ndarray-wrapping GH#28759")
+
+ left = pd.Series(range(10), name=names[0])
+ right = pd.Series(range(10), name=names[1])
+
+ right = box(right)
+ if flex:
+ name = op.__name__.strip("_")
+ if name in ["and", "rand", "xor", "rxor", "or", "ror"]:
+ # Series doesn't have these as flex methods
+ return
+ result = getattr(left, name)(right)
+ else:
+ result = op(left, right)
+
+ if box is pd.Index and op.__name__.strip("_") in ["rxor", "ror", "rand"]:
+ # Index treats these as set operators, so does not defer
+ assert isinstance(result, pd.Index)
+ return
+
+ assert isinstance(result, Series)
+ if box in [pd.Index, pd.Series]:
+ assert result.name == names[2]
+ else:
+ assert result.name == names[0]
| - [x] closes #33930
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
As a follow-up I think we can remove a bunch of name-based tests from tests.arithmetic. | https://api.github.com/repos/pandas-dev/pandas/pulls/36760 | 2020-10-01T02:14:15Z | 2020-10-07T15:00:01Z | 2020-10-07T15:00:01Z | 2020-10-07T15:00:09Z |
ENH: allow for top and mid-level assignment to DataFrames with MultIndex columns #7475 | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index f2e833dfe7790..73b5554918da4 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -3189,10 +3189,20 @@ def _setitem_array(self, key, value):
self.iloc[indexer] = value
else:
if isinstance(value, DataFrame):
- if len(value.columns) != len(key):
- raise ValueError("Columns must be same length as key")
- for k1, k2 in zip(key, value.columns):
- self[k1] = value[k2]
+ columns = value.columns
+ if len(columns) == len(key):
+ for k1, k2 in zip(key, columns):
+ self[k1] = value[k2]
+ elif isinstance(columns, MultiIndex):
+ levels0 = columns.levels[0]
+ if len(levels0) == len(key):
+ for k1, k2 in zip(key, levels0):
+ self[k1] = value[k2]
+ else:
+ raise ValueError(
+ "Key must be same length as columns or top level of "
+ "MultiIndex"
+ )
else:
self.loc._ensure_listlike_indexer(key, axis=1, value=value)
indexer = self.loc._get_listlike_indexer(
@@ -3221,19 +3231,42 @@ def _setitem_frame(self, key, value):
def _set_item_frame_value(self, key, value: "DataFrame") -> None:
self._ensure_valid_index(value)
- # align right-hand-side columns if self.columns
- # is multi-index and self[key] is a sub-frame
- if isinstance(self.columns, MultiIndex) and key in self.columns:
- loc = self.columns.get_loc(key)
- if isinstance(loc, (slice, Series, np.ndarray, Index)):
- cols = maybe_droplevels(self.columns[loc], key)
- if len(cols) and not cols.equals(value.columns):
- value = value.reindex(cols, axis=1)
-
- # now align rows
- value = _reindex_for_setitem(value, self.index)
- value = value.T
- self._set_item_mgr(key, value)
+ # standardized key info
+ key_tup = key if isinstance(key, tuple) else (key,)
+ key_len = len(key_tup)
+
+ if key in self.columns or key_len == self.columns.nlevels:
+ # align right-hand-side columns if self.columns
+ # is multi-index and self[key] is a sub-frame
+ if isinstance(self.columns, MultiIndex) and key in self.columns:
+ loc = self.columns.get_loc(key)
+ if isinstance(loc, (slice, Series, np.ndarray, Index)):
+ cols = maybe_droplevels(self.columns[loc], key)
+ if len(cols) and not cols.equals(value.columns):
+ value = value.reindex(cols, axis=1)
+
+ # now align rows
+ value = _reindex_for_setitem(value, self.index)
+ value = value.T
+ self._set_item_mgr(key, value)
+ else:
+ if key_len + value.columns.nlevels != self.columns.nlevels:
+ raise ValueError(
+ "Must pass key/value pair that conforms with number of column "
+ "levels"
+ )
+
+ # fill out keys as necessary
+ if value.columns.nlevels > 1:
+ key_list = [key_tup + i for i in value.columns]
+ else:
+ key_list = [key_tup + (i,) for i in value.columns]
+ items = MultiIndex.from_tuples(key_list)
+
+ # align and append block
+ value = _reindex_for_setitem(value, self.index)
+ value = value.T
+ self._mgr.append_block(items, value)
def _iset_item_mgr(self, loc: int, value) -> None:
self._mgr.iset(loc, value)
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index e939c43015aed..fd2e5f2ee0b7b 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -1232,6 +1232,26 @@ def insert(self, loc: int, item: Label, value, allow_duplicates: bool = False):
stacklevel=5,
)
+ def append_block(self, items, values):
+ base, size = len(self.items), len(items)
+
+ new_axis = self.items.append(items)
+ block = make_block(
+ values=values, ndim=self.ndim, placement=slice(base, base + size)
+ )
+
+ blk_no = len(self.blocks)
+ self._blklocs = np.append(self.blklocs, range(size))
+ self._blknos = np.append(self.blknos, size * (blk_no,))
+
+ self.axes[0] = new_axis
+ self.blocks += (block,)
+
+ self._known_consolidated = False
+
+ if len(self.blocks) > 100:
+ self._consolidate_inplace()
+
def reindex_axis(
self,
new_index,
diff --git a/pandas/tests/frame/indexing/test_indexing.py b/pandas/tests/frame/indexing/test_indexing.py
index 49eb570c4ffe0..93f4d25e6bab5 100644
--- a/pandas/tests/frame/indexing/test_indexing.py
+++ b/pandas/tests/frame/indexing/test_indexing.py
@@ -119,7 +119,7 @@ def test_setitem_list(self, float_frame):
tm.assert_series_equal(float_frame["B"], data["A"], check_names=False)
tm.assert_series_equal(float_frame["A"], data["B"], check_names=False)
- msg = "Columns must be same length as key"
+ msg = "Key must be same length as columns or top level of MultiIndex"
with pytest.raises(ValueError, match=msg):
data[["A"]] = float_frame[["A", "B"]]
newcolumndata = range(len(data.index) - 1)
diff --git a/pandas/tests/indexing/multiindex/test_multiindex.py b/pandas/tests/indexing/multiindex/test_multiindex.py
index 9a3039c28416c..013ab7c36d724 100644
--- a/pandas/tests/indexing/multiindex/test_multiindex.py
+++ b/pandas/tests/indexing/multiindex/test_multiindex.py
@@ -1,4 +1,5 @@
import numpy as np
+import pytest
import pandas._libs.index as _index
from pandas.errors import PerformanceWarning
@@ -93,3 +94,102 @@ def test_multiindex_with_datatime_level_preserves_freq(self):
result = df.loc[0].index
tm.assert_index_equal(result, dti)
assert result.freq == dti.freq
+
+ def test_multiindex_get_loc_list_raises(self):
+ # https://github.com/pandas-dev/pandas/issues/35878
+ idx = pd.MultiIndex.from_tuples([("a", 1), ("b", 2)])
+ msg = "unhashable type"
+ with pytest.raises(TypeError, match=msg):
+ idx.get_loc([])
+
+ def test_multiindex_frame_assign(self):
+ df0 = DataFrame({"a": [0, 1, 2, 3], "b": [3, 4, 5, 6]})
+ df1 = pd.concat({"x": df0, "y": df0}, axis=1)
+ df2 = pd.concat({"q": df1, "r": df1}, axis=1)
+
+ # level one assign
+ result = df2.copy()
+ result["m"] = result["q"] + result["r"]
+ expected = pd.concat({"q": df1, "r": df1, "m": 2 * df1}, axis=1)
+ tm.assert_frame_equal(result, expected)
+
+ # level one assign - multiple
+ result = df2.copy()
+ result[["m", "n"]] = 2 * result[["q", "r"]]
+ expected = pd.concat({"q": df1, "r": df1, "m": 2 * df1, "n": 2 * df1}, axis=1)
+ tm.assert_frame_equal(result, expected)
+
+ # level two assign
+ result = df2.copy()
+ result["m", "x"] = df2["q", "x"] + df2["q", "y"]
+ expected = pd.concat(
+ {"q": df1, "r": df1, "m": pd.concat({"x": 2 * df0}, axis=1)}, axis=1
+ )
+ tm.assert_frame_equal(result, expected)
+
+ # level two assign - multiple (seems like getitem is not caught up here)
+ result = df2.copy()
+ result[[("m", "x"), ("n", "y")]] = 2 * df2["q"]
+ expected = pd.concat(
+ {
+ "q": df1,
+ "r": df1,
+ "m": pd.concat({"x": 2 * df0}, axis=1),
+ "n": pd.concat({"y": 2 * df0}, axis=1),
+ },
+ axis=1,
+ )
+ tm.assert_frame_equal(result, expected)
+
+ # level three assign
+ result = df2.copy()
+ result["m", "x", "a"] = df2["q", "x", "a"] + df2["q", "x", "b"]
+ expected = pd.concat(
+ {
+ "q": df1,
+ "r": df1,
+ "m": pd.concat(
+ {"x": pd.concat({"a": df0["a"] + df0["b"]}, axis=1)}, axis=1
+ ),
+ },
+ axis=1,
+ )
+ tm.assert_frame_equal(result, expected)
+
+ # level three assign - multiple
+ result = df2.copy()
+ result[[("m", "x", "a"), ("n", "y", "b")]] = 2 * df2["q", "x"]
+ expected = pd.concat(
+ {
+ "q": df1,
+ "r": df1,
+ "m": pd.concat({"x": pd.concat({"a": 2 * df0["a"]}, axis=1)}, axis=1),
+ "n": pd.concat({"y": pd.concat({"b": 2 * df0["b"]}, axis=1)}, axis=1),
+ },
+ axis=1,
+ )
+ tm.assert_frame_equal(result, expected)
+
+ # invalid usage
+ msg = "Must pass key/value pair that conforms with number of column levels"
+ msg2 = "Wrong number of items passed 2, placement implies 1"
+
+ # too few levels at level one
+ with pytest.raises(ValueError, match=msg):
+ df2["m"] = df0
+
+ # too few levels at level two - this appears to be desired
+ # with pytest.raises(ValueError, match=msg):
+ # df2["m", "x"] = df0["a"]
+
+ # too many levels at level one
+ with pytest.raises(ValueError, match=msg):
+ df2["m"] = df2
+
+ # too many levels at level two
+ with pytest.raises(ValueError, match=msg):
+ df2["m", "x"] = df1
+
+ # too many levels at level three
+ with pytest.raises(ValueError, match=msg2):
+ df2["m", "x", "a"] = df0
| - [x] closes #7475 (related to #35727)
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
This allows for one to assign to the top and mid-levels of DataFrame columns. To do so, it adds another fallback execution path to `__setitem__`. The logic is:
- In `__setitem__`, if the other existing paths don't apply, there is a single key, and the columns are a MultiIndex, pass to `_setitem_multilevel`. Here, assign if the key exists in the columns. Otherwise, use the new function `append_block` to assign multiple columns at once.
- In the existing `_setitem_array`, as a fallback, if the length of the assigning key equals the length of the top level of the value columns, recurse these back onto `__setitem__`. This allows for higher level list assignments.
I've also added some tests of the functionality in various cases. Any comments much appreciated. | https://api.github.com/repos/pandas-dev/pandas/pulls/36755 | 2020-09-30T23:10:34Z | 2021-10-04T00:10:48Z | null | 2021-10-04T00:10:48Z |
ENH: Add headers paramater to read_json and read_csv | diff --git a/pandas/io/common.py b/pandas/io/common.py
index c147ae9fd0aa8..05542d596e961 100644
--- a/pandas/io/common.py
+++ b/pandas/io/common.py
@@ -148,9 +148,9 @@ def urlopen(*args, **kwargs):
Lazy-import wrapper for stdlib urlopen, as that imports a big chunk of
the stdlib.
"""
- import urllib.request
+ from urllib.request import Request, urlopen as _urlopen
- return urllib.request.urlopen(*args, **kwargs)
+ return _urlopen(Request(*args, **kwargs))
def is_fsspec_url(url: FilePathOrBuffer) -> bool:
@@ -176,6 +176,7 @@ def get_filepath_or_buffer(
compression: CompressionOptions = None,
mode: ModeVar = None, # type: ignore[assignment]
storage_options: StorageOptions = None,
+ headers: Dict[str, Any] = {},
) -> IOargs[ModeVar, EncodingVar]:
"""
If the filepath_or_buffer is a url, translate and return the buffer.
@@ -251,7 +252,7 @@ def get_filepath_or_buffer(
raise ValueError(
"storage_options passed with file object or non-fsspec file path"
)
- req = urlopen(filepath_or_buffer)
+ req = urlopen(filepath_or_buffer, headers=headers)
content_encoding = req.headers.get("Content-Encoding", None)
if content_encoding == "gzip":
# Override compression based on Content-Encoding header
diff --git a/pandas/io/json/_json.py b/pandas/io/json/_json.py
index ef684469dffbb..e75e8d166215e 100644
--- a/pandas/io/json/_json.py
+++ b/pandas/io/json/_json.py
@@ -3,7 +3,7 @@
from io import BytesIO, StringIO
from itertools import islice
import os
-from typing import IO, Any, Callable, List, Optional, Type
+from typing import IO, Any, Callable, Dict, List, Optional, Type
import numpy as np
@@ -377,6 +377,7 @@ def read_json(
compression: CompressionOptions = "infer",
nrows: Optional[int] = None,
storage_options: StorageOptions = None,
+ headers: Dict[str, Any] = {},
):
"""
Convert a JSON string to pandas object.
@@ -527,6 +528,10 @@ def read_json(
a file-like buffer. See the fsspec and backend storage implementation
docs for the set of allowed keys and values.
+ headers : dict, optional
+ HTTP headers that are passed to urlopen. Allows to specify the User-Agent
+ in case the urllib User-Agent is blocked for example
+
.. versionadded:: 1.2.0
Returns
@@ -614,6 +619,7 @@ def read_json(
encoding=encoding,
compression=compression,
storage_options=storage_options,
+ headers=headers,
)
json_reader = JsonReader(
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index dd3588faedf7a..7db96657f6b8f 100644
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -352,6 +352,10 @@
a file-like buffer. See the fsspec and backend storage implementation
docs for the set of allowed keys and values.
+headers : dict, optional
+ HTTP headers that are passed to urlopen. Allows to specify the User-Agent
+ in case the urllib User-Agent is blocked for example
+
.. versionadded:: 1.2
Returns
@@ -432,9 +436,14 @@ def _read(filepath_or_buffer: FilePathOrBuffer, kwds):
encoding = re.sub("_", "-", encoding).lower()
kwds["encoding"] = encoding
compression = kwds.get("compression", "infer")
+ headers = kwds.get("headers", {})
ioargs = get_filepath_or_buffer(
- filepath_or_buffer, encoding, compression, storage_options=storage_options
+ filepath_or_buffer,
+ encoding,
+ compression,
+ storage_options=storage_options,
+ headers=headers,
)
kwds["compression"] = ioargs.compression
@@ -599,6 +608,7 @@ def read_csv(
memory_map=False,
float_precision=None,
storage_options: StorageOptions = None,
+ headers: Dict[str, Any] = {},
):
# gh-23761
#
@@ -686,6 +696,7 @@ def read_csv(
infer_datetime_format=infer_datetime_format,
skip_blank_lines=skip_blank_lines,
storage_options=storage_options,
+ headers=headers,
)
return _read(filepath_or_buffer, kwds)
diff --git a/pandas/tests/io/test_common.py b/pandas/tests/io/test_common.py
index ede8d61490778..d216024b2fa33 100644
--- a/pandas/tests/io/test_common.py
+++ b/pandas/tests/io/test_common.py
@@ -417,3 +417,11 @@ def test_is_fsspec_url():
assert not icom.is_fsspec_url("random:pandas/somethingelse.com")
assert not icom.is_fsspec_url("/local/path")
assert not icom.is_fsspec_url("relative/local/path")
+
+
+def test_urlopen_headers():
+ headers = {"User-Agent": "Pandas 1.1.0"}
+ # this returns the User-Agent
+ url = "http://ifconfig.me/ua"
+ r = icom.urlopen(url, headers=headers)
+ assert r.read().decode("utf-8") == headers["User-Agent"]
| - [x] closes #36688
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
This adds the option to specify headers when reading a csv or a json file from an URL in Python3.
Let me know if new tests are needed. | https://api.github.com/repos/pandas-dev/pandas/pulls/36754 | 2020-09-30T22:03:49Z | 2020-11-27T18:43:36Z | null | 2020-11-27T18:43:36Z |
BUG: Segfault with string Index when using Rolling after Groupby | diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py
index 6ab42dda865e7..6cad9cdd6528f 100644
--- a/pandas/core/window/rolling.py
+++ b/pandas/core/window/rolling.py
@@ -397,7 +397,7 @@ def _insert_on_column(self, result: "DataFrame", obj: "DataFrame"):
if self.on is not None and not self._on.equals(obj.index):
name = self._on.name
- extra_col = Series(self._on, index=obj.index, name=name)
+ extra_col = Series(self._on, index=self.obj.index, name=name)
if name in result.columns:
# TODO: sure we want to overwrite results?
result[name] = extra_col
@@ -2268,7 +2268,7 @@ def _get_window_indexer(self, window: int) -> GroupbyRollingIndexer:
"""
rolling_indexer: Type[BaseIndexer]
indexer_kwargs: Optional[Dict] = None
- index_array = self.obj.index.asi8
+ index_array = self._on.asi8
if isinstance(self.window, BaseIndexer):
rolling_indexer = type(self.window)
indexer_kwargs = self.window.__dict__
diff --git a/pandas/tests/window/test_grouper.py b/pandas/tests/window/test_grouper.py
index 0eebd657e97b7..b93f9d5076b61 100644
--- a/pandas/tests/window/test_grouper.py
+++ b/pandas/tests/window/test_grouper.py
@@ -416,3 +416,32 @@ def test_groupby_rolling_empty_frame(self):
result = expected.groupby(["s1", "s2"]).rolling(window=1).sum()
expected.index = pd.MultiIndex.from_tuples([], names=["s1", "s2", None])
tm.assert_frame_equal(result, expected)
+
+ def test_groupby_rolling_string_index(self):
+ # GH: 36727
+ df = pd.DataFrame(
+ [
+ ["A", "group_1", pd.Timestamp(2019, 1, 1, 9)],
+ ["B", "group_1", pd.Timestamp(2019, 1, 2, 9)],
+ ["Z", "group_2", pd.Timestamp(2019, 1, 3, 9)],
+ ["H", "group_1", pd.Timestamp(2019, 1, 6, 9)],
+ ["E", "group_2", pd.Timestamp(2019, 1, 20, 9)],
+ ],
+ columns=["index", "group", "eventTime"],
+ ).set_index("index")
+
+ groups = df.groupby("group")
+ df["count_to_date"] = groups.cumcount()
+ rolling_groups = groups.rolling("10d", on="eventTime")
+ result = rolling_groups.apply(lambda df: df.shape[0])
+ expected = pd.DataFrame(
+ [
+ ["A", "group_1", pd.Timestamp(2019, 1, 1, 9), 1.0],
+ ["B", "group_1", pd.Timestamp(2019, 1, 2, 9), 2.0],
+ ["H", "group_1", pd.Timestamp(2019, 1, 6, 9), 3.0],
+ ["Z", "group_2", pd.Timestamp(2019, 1, 3, 9), 1.0],
+ ["E", "group_2", pd.Timestamp(2019, 1, 20, 9), 1.0],
+ ],
+ columns=["index", "group", "eventTime", "count_to_date"],
+ ).set_index(["group", "index"])
+ tm.assert_frame_equal(result, expected)
| - [x] closes #36727
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
The segfault was caused by ``self.obj.index.asi8=None`` when Index is a string Index. ``self._on.asi8`` solves that issue.
Additionally I noticed, that ``obj.index`` was already sorted, so the insert of ``extra_col`` mixed up the order. We should use ``self.obj.index``.
I will add a whats new after #36689 is merged.
cc @mroeschke | https://api.github.com/repos/pandas-dev/pandas/pulls/36753 | 2020-09-30T21:33:33Z | 2020-10-06T18:04:01Z | 2020-10-06T18:04:00Z | 2020-10-10T23:35:12Z |
REF: simplify info.py | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 801307a8f9481..244423a1dd4e3 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -2604,7 +2604,7 @@ def to_html(
DataFrame.memory_usage: Memory usage of DataFrame columns."""
),
)
- @doc(DataFrameInfo.info)
+ @doc(DataFrameInfo.to_buffer)
def info(
self,
verbose: Optional[bool] = None,
@@ -2613,9 +2613,16 @@ def info(
memory_usage: Optional[Union[bool, str]] = None,
null_counts: Optional[bool] = None,
) -> None:
- return DataFrameInfo(
- self, verbose, buf, max_cols, memory_usage, null_counts
- ).info()
+ info = DataFrameInfo(
+ data=self,
+ memory_usage=memory_usage,
+ )
+ info.to_buffer(
+ buf=buf,
+ max_cols=max_cols,
+ verbose=verbose,
+ show_counts=null_counts,
+ )
def memory_usage(self, index=True, deep=False) -> Series:
"""
diff --git a/pandas/io/formats/info.py b/pandas/io/formats/info.py
index a57fda7472878..891b3ea7af0e2 100644
--- a/pandas/io/formats/info.py
+++ b/pandas/io/formats/info.py
@@ -1,6 +1,6 @@
-from abc import ABCMeta, abstractmethod
+from abc import ABC, abstractmethod
import sys
-from typing import IO, TYPE_CHECKING, List, Optional, Tuple, Union
+from typing import IO, TYPE_CHECKING, Iterator, List, Mapping, Optional, Sequence, Union
from pandas._config import get_option
@@ -12,6 +12,7 @@
from pandas.io.formats.printing import pprint_thing
if TYPE_CHECKING:
+ from pandas.core.frame import DataFrame
from pandas.core.series import Series
@@ -72,92 +73,148 @@ def _sizeof_fmt(num: Union[int, float], size_qualifier: str) -> str:
return f"{num:3.1f}{size_qualifier} PB"
-class BaseInfo(metaclass=ABCMeta):
+def _initialize_memory_usage(
+ memory_usage: Optional[Union[bool, str]] = None,
+) -> Union[bool, str]:
+ """Get memory usage based on inputs and display options."""
+ if memory_usage is None:
+ memory_usage = get_option("display.memory_usage")
+ return memory_usage
+
+
+class BaseInfo(ABC):
+ """Base class for DataFrameInfo and SeriesInfo.
+
+ Parameters
+ ----------
+ data : FrameOrSeries
+ Either dataframe or series.
+ memory_usage : bool or str, optional
+ If "deep", introspect the data deeply by interrogating object dtypes
+ for system-level memory consumption, and include it in the returned
+ values.
+ """
+
def __init__(
self,
data: FrameOrSeries,
- verbose: Optional[bool] = None,
- buf: Optional[IO[str]] = None,
- max_cols: Optional[int] = None,
memory_usage: Optional[Union[bool, str]] = None,
- null_counts: Optional[bool] = None,
):
- if buf is None: # pragma: no cover
- buf = sys.stdout
- if memory_usage is None:
- memory_usage = get_option("display.memory_usage")
-
self.data = data
- self.verbose = verbose
- self.buf = buf
- self.max_cols = max_cols
- self.memory_usage = memory_usage
- self.null_counts = null_counts
+ self.memory_usage = _initialize_memory_usage(memory_usage)
+
+ @property
+ @abstractmethod
+ def ids(self) -> Index:
+ """Column names or index names."""
+ @property
@abstractmethod
- def _get_mem_usage(self, deep: bool) -> int:
+ def dtype_counts(self) -> Mapping[str, int]:
+ """Mapping dtype - number of counts."""
+
+ @property
+ @abstractmethod
+ def non_null_counts(self) -> Sequence[int]:
+ """Sequence of non-null counts for all columns or column (if series)."""
+
+ @property
+ @abstractmethod
+ def dtypes(self) -> "Series":
+ """Dtypes.
+
+ Returns
+ -------
+ dtypes : Series
+ Dtype of each of the DataFrame's columns.
"""
- Get memory usage in bytes.
+ return self.data.dtypes
- Parameters
- ----------
- deep : bool
- If True, introspect the data deeply by interrogating object dtypes
- for system-level memory consumption, and include it in the returned
- values.
+ @property
+ def memory_usage_bytes(self) -> int:
+ """Memory usage in bytes.
Returns
-------
- mem_usage : int
+ memory_usage_bytes : int
Object's total memory usage in bytes.
"""
+ if self.memory_usage == "deep":
+ deep = True
+ else:
+ deep = False
+ return self.data.memory_usage(index=True, deep=deep).sum()
- @abstractmethod
- def _get_ids_and_dtypes(self) -> Tuple["Index", "Series"]:
- """
- Get column names and dtypes.
+ @property
+ def memory_usage_string(self) -> str:
+ """Memory usage in a form of human readable string."""
+ return f"{_sizeof_fmt(self.memory_usage_bytes, self.size_qualifier)}\n"
+
+ @property
+ def size_qualifier(self) -> str:
+ size_qualifier = ""
+ if self.memory_usage:
+ if self.memory_usage != "deep":
+ # size_qualifier is just a best effort; not guaranteed to catch
+ # all cases (e.g., it misses categorical data even with object
+ # categories)
+ if (
+ "object" in self.dtype_counts
+ or self.data.index._is_memory_usage_qualified()
+ ):
+ size_qualifier = "+"
+ return size_qualifier
+
+
+class DataFrameInfo(BaseInfo):
+ """Class storing dataframe-specific info."""
+
+ @property
+ def ids(self) -> Index:
+ """Column names.
Returns
-------
ids : Index
DataFrame's column names.
- dtypes : Series
- Dtype of each of the DataFrame's columns.
"""
+ return self.data.columns
- @abstractmethod
- def _verbose_repr(
- self, lines: List[str], ids: "Index", dtypes: "Series", show_counts: bool
- ) -> None:
- """
- Append name, non-null count (optional), and dtype for each column to `lines`.
+ @property
+ def dtypes(self) -> "Series":
+ """Dtypes.
- Parameters
- ----------
- lines : List[str]
- Lines that will contain `info` representation.
- ids : Index
- The DataFrame's column names.
+ Returns
+ -------
dtypes : Series
- The DataFrame's columns' dtypes.
- show_counts : bool
- If True, count of non-NA cells for each column will be appended to `lines`.
+ Dtype of each of the DataFrame's columns.
"""
+ return self.data.dtypes
- @abstractmethod
- def _non_verbose_repr(self, lines: List[str], ids: "Index") -> None:
- """
- Append short summary of columns' names to `lines`.
+ @property
+ def dtype_counts(self) -> Mapping[str, int]:
+ """Mapping dtype - number of counts."""
+ # groupby dtype.name to collect e.g. Categorical columns
+ return self.dtypes.value_counts().groupby(lambda x: x.name).sum()
- Parameters
- ----------
- lines : List[str]
- Lines that will contain `info` representation.
- ids : Index
- The DataFrame's column names.
- """
+ @property
+ def non_null_counts(self) -> Sequence[int]:
+ """Sequence of non-null counts for all columns."""
+ return self.data.count()
+
+ @property
+ def col_count(self) -> int:
+ """Number of columns to be summarized."""
+ return len(self.ids)
- def info(self) -> None:
+ def to_buffer(
+ self,
+ *,
+ buf: Optional[IO[str]],
+ max_cols: Optional[int],
+ verbose: Optional[bool],
+ show_counts: Optional[bool],
+ ) -> None:
"""
Print a concise summary of a %(klass)s.
@@ -209,151 +266,359 @@ def info(self) -> None:
--------
%(examples_sub)s
"""
- lines = []
+ printer = InfoPrinter(
+ info=self,
+ max_cols=max_cols,
+ verbose=verbose,
+ show_counts=show_counts,
+ )
+ printer.to_buffer(buf)
- lines.append(str(type(self.data)))
- lines.append(self.data.index._summary())
- ids, dtypes = self._get_ids_and_dtypes()
- col_count = len(ids)
+class InfoPrinter:
+ """Class for printing dataframe or series info.
- if col_count == 0:
- lines.append(f"Empty {type(self.data).__name__}")
- fmt.buffer_put_lines(self.buf, lines)
- return
+ Parameters
+ ----------
+ info : DataFrameInfo
+ Instance of DataFrameInfo.
+ max_cols : int, optional
+ When to switch from the verbose to the truncated output.
+ verbose : bool, optional
+ Whether to print the full summary.
+ show_counts : bool, optional
+ Whether to show the non-null counts.
+ """
- # hack
- max_cols = self.max_cols
+ def __init__(
+ self,
+ info: DataFrameInfo,
+ max_cols: Optional[int] = None,
+ verbose: Optional[bool] = None,
+ show_counts: Optional[bool] = None,
+ ):
+ self.info = info
+ self.data = info.data
+ self.verbose = verbose
+ self.max_cols = self._initialize_max_cols(max_cols)
+ self.show_counts = self._initialize_show_counts(show_counts)
+
+ @property
+ def max_rows(self) -> int:
+ """Maximum info rows to be displayed."""
+ return get_option("display.max_info_rows", len(self.data) + 1)
+
+ @property
+ def exceeds_info_cols(self) -> bool:
+ """Check if number of columns to be summarized does not exceed maximum."""
+ return bool(self.col_count > self.max_cols)
+
+ @property
+ def exceeds_info_rows(self) -> bool:
+ """Check if number of rows to be summarized does not exceed maximum."""
+ return bool(len(self.data) > self.max_rows)
+
+ @property
+ def col_count(self) -> int:
+ """Number of columns to be summarized."""
+ return self.info.col_count
+
+ def _initialize_max_cols(self, max_cols: Optional[int]) -> int:
if max_cols is None:
- max_cols = get_option("display.max_info_columns", col_count + 1)
-
- max_rows = get_option("display.max_info_rows", len(self.data) + 1)
+ return get_option("display.max_info_columns", self.col_count + 1)
+ return max_cols
- if self.null_counts is None:
- show_counts = (col_count <= max_cols) and (len(self.data) < max_rows)
+ def _initialize_show_counts(self, show_counts: Optional[bool]) -> bool:
+ if show_counts is None:
+ return bool(not self.exceeds_info_cols and not self.exceeds_info_rows)
else:
- show_counts = self.null_counts
- exceeds_info_cols = col_count > max_cols
+ return show_counts
+
+ def to_buffer(self, buf: Optional[IO[str]] = None) -> None:
+ """Save dataframe info into buffer."""
+ table_builder = self._create_table_builder()
+ lines = table_builder.get_lines()
+ if buf is None: # pragma: no cover
+ buf = sys.stdout
+ fmt.buffer_put_lines(buf, lines)
+ def _create_table_builder(self) -> "DataFrameTableBuilder":
+ """
+ Create instance of table builder based on verbosity and display settings.
+ """
if self.verbose:
- self._verbose_repr(lines, ids, dtypes, show_counts)
+ return DataFrameTableBuilderVerbose(
+ info=self.info,
+ with_counts=self.show_counts,
+ )
elif self.verbose is False: # specifically set to False, not necessarily None
- self._non_verbose_repr(lines, ids)
+ return DataFrameTableBuilderNonVerbose(info=self.info)
else:
- if exceeds_info_cols:
- self._non_verbose_repr(lines, ids)
+ if self.exceeds_info_cols:
+ return DataFrameTableBuilderNonVerbose(info=self.info)
else:
- self._verbose_repr(lines, ids, dtypes, show_counts)
+ return DataFrameTableBuilderVerbose(
+ info=self.info,
+ with_counts=self.show_counts,
+ )
- # groupby dtype.name to collect e.g. Categorical columns
- counts = dtypes.value_counts().groupby(lambda x: x.name).sum()
- collected_dtypes = [f"{k[0]}({k[1]:d})" for k in sorted(counts.items())]
- lines.append(f"dtypes: {', '.join(collected_dtypes)}")
- if self.memory_usage:
- # append memory usage of df to display
- size_qualifier = ""
- if self.memory_usage == "deep":
- deep = True
- else:
- # size_qualifier is just a best effort; not guaranteed to catch
- # all cases (e.g., it misses categorical data even with object
- # categories)
- deep = False
- if "object" in counts or self.data.index._is_memory_usage_qualified():
- size_qualifier = "+"
- mem_usage = self._get_mem_usage(deep=deep)
- lines.append(f"memory usage: {_sizeof_fmt(mem_usage, size_qualifier)}\n")
- fmt.buffer_put_lines(self.buf, lines)
+class TableBuilderAbstract(ABC):
+ """Abstract builder for info table.
+ Parameters
+ ----------
+ info : BaseInfo
+ Instance of DataFrameInfo or SeriesInfo.
+ """
-class DataFrameInfo(BaseInfo):
- def _get_mem_usage(self, deep: bool) -> int:
- return self.data.memory_usage(index=True, deep=deep).sum()
+ _lines: List[str]
- def _get_ids_and_dtypes(self) -> Tuple["Index", "Series"]:
- return self.data.columns, self.data.dtypes
+ def __init__(self, *, info):
+ self.info = info
- def _verbose_repr(
- self, lines: List[str], ids: "Index", dtypes: "Series", show_counts: bool
- ) -> None:
- col_count = len(ids)
- lines.append(f"Data columns (total {col_count} columns):")
-
- id_head = " # "
- column_head = "Column"
- col_space = 2
-
- max_col = max(len(pprint_thing(k)) for k in ids)
- len_column = len(pprint_thing(column_head))
- space = max(max_col, len_column) + col_space
-
- # GH #36765
- # add one space in max_id because there is a one-space padding
- # in front of the number
- # this allows maintain two spaces gap between columns
- max_id = len(pprint_thing(col_count)) + 1
- len_id = len(pprint_thing(id_head))
- space_num = max(max_id, len_id) + col_space
-
- if show_counts:
- counts = self.data.count()
- if col_count != len(counts): # pragma: no cover
- raise AssertionError(
- f"Columns must equal counts ({col_count} != {len(counts)})"
- )
- count_header = "Non-Null Count"
- len_count = len(count_header)
- non_null = " non-null"
- max_count = max(len(pprint_thing(k)) for k in counts) + len(non_null)
- space_count = max(len_count, max_count) + col_space
- count_temp = "{count}" + non_null
+ @abstractmethod
+ def get_lines(self) -> List[str]:
+ """Product in a form of list of lines (strings)."""
+
+
+class DataFrameTableBuilder(TableBuilderAbstract):
+ """Abstract builder for dataframe info table."""
+
+ def get_lines(self) -> List[str]:
+ self._lines = []
+ if self.col_count == 0:
+ self._fill_empty_info()
else:
- count_header = ""
- space_count = len(count_header)
- len_count = space_count
- count_temp = "{count}"
+ self._fill_non_empty_info()
+ return self._lines
+
+ def _fill_empty_info(self) -> None:
+ """Add lines to the info table, pertaining to empty dataframe."""
+ self.add_object_type_line()
+ self.add_index_range_line()
+ self._lines.append(f"Empty {type(self.data).__name__}")
+
+ def _fill_non_empty_info(self) -> None:
+ """Add lines to the info table, pertaining to non-empty dataframe."""
+ self.add_object_type_line()
+ self.add_index_range_line()
+ self.add_columns_summary_line()
+ self.add_header_line()
+ self.add_separator_line()
+ self.add_body_lines()
+ self.add_dtypes_line()
+ if self.display_memory_usage:
+ self.add_memory_usage_line()
+
+ @property
+ def data(self) -> "DataFrame":
+ """DataFrame."""
+ return self.info.data
+
+ @property
+ def dtype_counts(self) -> Mapping[str, int]:
+ """Mapping dtype - number of counts."""
+ return self.info.dtype_counts
+
+ @property
+ def non_null_counts(self) -> Sequence[int]:
+ return self.info.non_null_counts
+
+ @property
+ def display_memory_usage(self) -> bool:
+ """Whether to display memory usage."""
+ return self.info.memory_usage
+
+ @property
+ def memory_usage_string(self) -> str:
+ """Memory usage string with proper size qualifier."""
+ return self.info.memory_usage_string
+
+ @property
+ def ids(self) -> Index:
+ """Dataframe columns."""
+ return self.info.ids
+
+ @property
+ def dtypes(self) -> "Series":
+ """Dtypes of each of the DataFrame's columns."""
+ return self.info.dtypes
+
+ @property
+ def col_count(self) -> int:
+ """Number of dataframe columns to be summarized."""
+ return self.info.col_count
+
+ def add_object_type_line(self) -> None:
+ """Add line with string representation of dataframe to the table."""
+ self._lines.append(str(type(self.data)))
+
+ def add_index_range_line(self) -> None:
+ """Add line with range of indices to the table."""
+ self._lines.append(self.data.index._summary())
+
+ @abstractmethod
+ def add_columns_summary_line(self) -> None:
+ """Add line with columns summary to the table."""
+
+ @abstractmethod
+ def add_header_line(self) -> None:
+ """Add header line to the table."""
+
+ @abstractmethod
+ def add_separator_line(self) -> None:
+ """Add separator line between header and body of the table."""
+
+ @abstractmethod
+ def add_body_lines(self) -> None:
+ """Add content of the table body."""
+
+ def add_dtypes_line(self) -> None:
+ """Add summary line with dtypes present in dataframe."""
+ collected_dtypes = [
+ f"{key}({val:d})" for key, val in sorted(self.dtype_counts.items())
+ ]
+ self._lines.append(f"dtypes: {', '.join(collected_dtypes)}")
+
+ def add_memory_usage_line(self) -> None:
+ """Add line containing memory usage."""
+ self._lines.append(f"memory usage: {self.memory_usage_string}")
+
+
+class DataFrameTableBuilderNonVerbose(DataFrameTableBuilder):
+ """Info table builder for non-verbose output."""
- dtype_header = "Dtype"
- len_dtype = len(dtype_header)
- max_dtypes = max(len(pprint_thing(k)) for k in dtypes)
- space_dtype = max(len_dtype, max_dtypes)
+ def add_columns_summary_line(self) -> None:
+ self._lines.append(self.ids._summary(name="Columns"))
- header = "".join(
+ def add_header_line(self) -> None:
+ """No header in non-verbose output."""
+
+ def add_separator_line(self) -> None:
+ """No separator in non-verbose output."""
+
+ def add_body_lines(self) -> None:
+ """No body in non-verbose output."""
+
+
+class DataFrameTableBuilderVerbose(DataFrameTableBuilder):
+ """Info table builder for verbose output."""
+
+ SPACING = " " * 2
+
+ def __init__(
+ self,
+ *,
+ info: DataFrameInfo,
+ with_counts: bool,
+ ):
+ super().__init__(info=info)
+ self.with_counts = with_counts
+ self.strrows: Sequence[Sequence[str]] = list(self._gen_rows())
+ self.gross_column_widths: Sequence[int] = self._get_gross_column_widths()
+
+ @property
+ def headers(self) -> Sequence[str]:
+ """Headers names of the columns in verbose table."""
+ if self.with_counts:
+ return [" # ", "Column", "Non-Null Count", "Dtype"]
+ return [" # ", "Column", "Dtype"]
+
+ def _gen_rows(self) -> Iterator[Sequence[str]]:
+ """Generator function yielding rows content.
+
+ Each element represents a row comprising a sequence of strings.
+ """
+ if self.with_counts:
+ return self._gen_rows_with_counts()
+ else:
+ return self._gen_rows_without_counts()
+
+ def add_columns_summary_line(self) -> None:
+ self._lines.append(f"Data columns (total {self.col_count} columns):")
+
+ @property
+ def header_column_widths(self) -> Sequence[int]:
+ """Widths of header columns (only titles)."""
+ return [len(col) for col in self.headers]
+
+ def _get_gross_column_widths(self) -> Sequence[int]:
+ """Get widths of columns containing both headers and actual content."""
+ body_column_widths = self._get_body_column_widths()
+ return [
+ max(*widths)
+ for widths in zip(self.header_column_widths, body_column_widths)
+ ]
+
+ def _get_body_column_widths(self) -> Sequence[int]:
+ """Get widths of table content columns."""
+ strcols: Sequence[Sequence[str]] = list(zip(*self.strrows))
+ return [max(len(x) for x in col) for col in strcols]
+
+ def add_header_line(self) -> None:
+ header_line = self.SPACING.join(
[
- _put_str(id_head, space_num),
- _put_str(column_head, space),
- _put_str(count_header, space_count),
- _put_str(dtype_header, space_dtype),
+ _put_str(header, col_width)
+ for header, col_width in zip(self.headers, self.gross_column_widths)
]
)
- lines.append(header)
+ self._lines.append(header_line)
- top_separator = "".join(
+ def add_separator_line(self) -> None:
+ separator_line = self.SPACING.join(
[
- _put_str("-" * len_id, space_num),
- _put_str("-" * len_column, space),
- _put_str("-" * len_count, space_count),
- _put_str("-" * len_dtype, space_dtype),
+ _put_str("-" * header_colwidth, gross_colwidth)
+ for header_colwidth, gross_colwidth in zip(
+ self.header_column_widths, self.gross_column_widths
+ )
]
)
- lines.append(top_separator)
-
- for i, col in enumerate(ids):
- dtype = dtypes.iloc[i]
- col = pprint_thing(col)
-
- line_no = _put_str(f" {i}", space_num)
- count = ""
- if show_counts:
- count = counts.iloc[i]
-
- lines.append(
- line_no
- + _put_str(col, space)
- + _put_str(count_temp.format(count=count), space_count)
- + _put_str(dtype, space_dtype)
+ self._lines.append(separator_line)
+
+ def add_body_lines(self) -> None:
+ for row in self.strrows:
+ body_line = self.SPACING.join(
+ [
+ _put_str(col, gross_colwidth)
+ for col, gross_colwidth in zip(row, self.gross_column_widths)
+ ]
)
+ self._lines.append(body_line)
+
+ def _gen_rows_without_counts(self) -> Iterator[Sequence[str]]:
+ """Iterator with string representation of body data without counts."""
+ yield from zip(
+ self._gen_line_numbers(),
+ self._gen_columns(),
+ self._gen_dtypes(),
+ )
+
+ def _gen_rows_with_counts(self) -> Iterator[Sequence[str]]:
+ """Iterator with string representation of body data with counts."""
+ yield from zip(
+ self._gen_line_numbers(),
+ self._gen_columns(),
+ self._gen_non_null_counts(),
+ self._gen_dtypes(),
+ )
- def _non_verbose_repr(self, lines: List[str], ids: "Index") -> None:
- lines.append(ids._summary(name="Columns"))
+ def _gen_line_numbers(self) -> Iterator[str]:
+ """Iterator with string representation of column numbers."""
+ for i, _ in enumerate(self.ids):
+ yield f" {i}"
+
+ def _gen_columns(self) -> Iterator[str]:
+ """Iterator with string representation of column names."""
+ for col in self.ids:
+ yield pprint_thing(col)
+
+ def _gen_dtypes(self) -> Iterator[str]:
+ """Iterator with string representation of column dtypes."""
+ for dtype in self.dtypes:
+ yield pprint_thing(dtype)
+
+ def _gen_non_null_counts(self) -> Iterator[str]:
+ """Iterator with string representation of non-null counts."""
+ for count in self.non_null_counts:
+ yield f"{count} non-null"
diff --git a/pandas/tests/io/formats/test_info.py b/pandas/tests/io/formats/test_info.py
index fd44bd431d50f..418d05a6b8752 100644
--- a/pandas/tests/io/formats/test_info.py
+++ b/pandas/tests/io/formats/test_info.py
@@ -51,6 +51,20 @@ def datetime_frame():
return DataFrame(tm.getTimeSeriesData())
+def test_info_empty():
+ df = DataFrame()
+ buf = StringIO()
+ df.info(buf=buf)
+ result = buf.getvalue()
+ expected = textwrap.dedent(
+ """\
+ <class 'pandas.core.frame.DataFrame'>
+ Index: 0 entries
+ Empty DataFrame"""
+ )
+ assert result == expected
+
+
def test_info_categorical_column():
# make sure it works
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
Enable polymorphism and builder pattern in ``info.py``.
The main benefits:
- Separated data representation from data itself
- More clear construction of each element via builder pattern
- Different builders for various cases (verbose with counts, verbose without counts, non-verbose) enabled one eliminate numerous if statements
May be useful to implement the present PR first and then build ``Series.info`` (https://github.com/pandas-dev/pandas/pull/31796) on top of this.
I do realize that I deleted ``BaseInfo`` class.
Now I see that it is required by the referenced PR, so I can move it back.
**Note:** needed to change test behavior. Here is the reason why.
I noticed inconsistency:
```
>>> df = pd.DataFrame({'long long column': np.random.rand(1000000)})
>>> df.info()
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 1000000 entries, 0 to 999999
Data columns (total 1 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 long long column 1000000 non-null float64
dtypes: float64(1)
memory usage: 7.6 MB
```
Here we have two spaces between columns.
However, if we create a dataframe with 10000 columns, then the distance between # col and "Column" is only one space.
```
>>> import pandas as pd
>>> import numpy as np
>>> df = pd.DataFrame(np.random.rand(3, 10001))
>>> with open('out.txt', 'w') as buf: df.info(verbose=True, buf=buf)
```
```
$ tail out.txt
9993 9993 float64
9994 9994 float64
9995 9995 float64
9996 9996 float64
9997 9997 float64
9998 9998 float64
9999 9999 float64
10000 10000 float64
dtypes: float64(10001)
memory usage: 234.5 KB
```
See, only one space between the first and the second columns.
I find it inconsistent.
So, I changed it, so that there are two spaces everywhere.
What do you think?
I am going to finalize it by documenting and introducing typing where required.
| https://api.github.com/repos/pandas-dev/pandas/pulls/36752 | 2020-09-30T21:31:23Z | 2020-10-21T12:54:55Z | 2020-10-21T12:54:55Z | 2020-10-21T12:55:00Z |
Backport PR #36706 on branch 1.1.x (CI: npdev new exception message) | diff --git a/pandas/tests/arithmetic/common.py b/pandas/tests/arithmetic/common.py
index 755fbd0d9036c..cd8dd102dc27c 100644
--- a/pandas/tests/arithmetic/common.py
+++ b/pandas/tests/arithmetic/common.py
@@ -76,6 +76,13 @@ def assert_invalid_comparison(left, right, box):
"Cannot compare type",
"not supported between",
"invalid type promotion",
+ (
+ # GH#36706 npdev 1.20.0 2020-09-28
+ r"The DTypes <class 'numpy.dtype\[datetime64\]'> and "
+ r"<class 'numpy.dtype\[int64\]'> do not have a common DType. "
+ "For example they cannot be stored in a single array unless the "
+ "dtype is `object`."
+ ),
]
)
with pytest.raises(TypeError, match=msg):
diff --git a/pandas/tests/frame/test_arithmetic.py b/pandas/tests/frame/test_arithmetic.py
index e17357e9845b5..166f26f668502 100644
--- a/pandas/tests/frame/test_arithmetic.py
+++ b/pandas/tests/frame/test_arithmetic.py
@@ -53,6 +53,11 @@ def check(df, df2):
msgs = [
r"Invalid comparison between dtype=datetime64\[ns\] and ndarray",
"invalid type promotion",
+ (
+ # npdev 1.20.0
+ r"The DTypes <class 'numpy.dtype\[.*\]'> and "
+ r"<class 'numpy.dtype\[.*\]'> do not have a common DType."
+ ),
]
msg = "|".join(msgs)
with pytest.raises(TypeError, match=msg):
| Backport PR #36706: CI: npdev new exception message | https://api.github.com/repos/pandas-dev/pandas/pulls/36751 | 2020-09-30T21:14:04Z | 2020-10-01T16:48:38Z | 2020-10-01T16:48:38Z | 2020-10-01T16:48:39Z |
Backport PR #36552 on branch 1.1.x (REGR: Series.__mod__ behaves different with numexpr) | diff --git a/doc/source/whatsnew/v1.1.3.rst b/doc/source/whatsnew/v1.1.3.rst
index 91b9cf59687b3..15777abcb8084 100644
--- a/doc/source/whatsnew/v1.1.3.rst
+++ b/doc/source/whatsnew/v1.1.3.rst
@@ -34,6 +34,7 @@ Fixed regressions
- Fixed regression when adding a :meth:`timedelta_range` to a :class:`Timestamp` raised a ``ValueError`` (:issue:`35897`)
- Fixed regression in :meth:`Series.__getitem__` incorrectly raising when the input was a tuple (:issue:`35534`)
- Fixed regression in :meth:`Series.__getitem__` incorrectly raising when the input was a frozenset (:issue:`35747`)
+- Fixed regression in modulo of :class:`Index`, :class:`Series` and :class:`DataFrame` using ``numexpr`` using C not Python semantics (:issue:`36047`, :issue:`36526`)
- Fixed regression in :meth:`read_excel` with ``engine="odf"`` caused ``UnboundLocalError`` in some cases where cells had nested child nodes (:issue:`36122`, :issue:`35802`)
- Fixed regression in :meth:`DataFrame.replace` inconsistent replace when using a float in the replace method (:issue:`35376`)
- Fixed regression in :class:`DataFrame` and :class:`Series` comparisons between numeric arrays and strings (:issue:`35700`, :issue:`36377`)
diff --git a/pandas/core/computation/expressions.py b/pandas/core/computation/expressions.py
index 0e9077e6d557e..da290db362019 100644
--- a/pandas/core/computation/expressions.py
+++ b/pandas/core/computation/expressions.py
@@ -132,7 +132,10 @@ def _evaluate_numexpr(op, op_str, a, b):
roperator.rtruediv: "/",
operator.floordiv: "//",
roperator.rfloordiv: "//",
- operator.mod: "%",
+ # we require Python semantics for mod of negative for backwards compatibility
+ # see https://github.com/pydata/numexpr/issues/365
+ # so sticking with unaccelerated for now
+ operator.mod: None,
roperator.rmod: "%",
operator.pow: "**",
roperator.rpow: "**",
diff --git a/pandas/core/ops/methods.py b/pandas/core/ops/methods.py
index a4694a6e5134f..c60b67fa2f4f6 100644
--- a/pandas/core/ops/methods.py
+++ b/pandas/core/ops/methods.py
@@ -171,8 +171,6 @@ def _create_methods(cls, arith_method, comp_method, bool_method, special):
mul=arith_method(cls, operator.mul, special),
truediv=arith_method(cls, operator.truediv, special),
floordiv=arith_method(cls, operator.floordiv, special),
- # Causes a floating point exception in the tests when numexpr enabled,
- # so for now no speedup
mod=arith_method(cls, operator.mod, special),
pow=arith_method(cls, operator.pow, special),
# not entirely sure why this is necessary, but previously was included
diff --git a/pandas/tests/test_expressions.py b/pandas/tests/test_expressions.py
index 2368e93ddc256..cc8a134ebcc9f 100644
--- a/pandas/tests/test_expressions.py
+++ b/pandas/tests/test_expressions.py
@@ -6,7 +6,7 @@
import pytest
import pandas._testing as tm
-from pandas.core.api import DataFrame
+from pandas.core.api import DataFrame, Index, Series
from pandas.core.computation import expressions as expr
_frame = DataFrame(randn(10000, 4), columns=list("ABCD"), dtype="float64")
@@ -380,3 +380,41 @@ def test_frame_series_axis(self, axis, arith):
result = op_func(other, axis=axis)
tm.assert_frame_equal(expected, result)
+
+ @pytest.mark.parametrize(
+ "op",
+ [
+ "__mod__",
+ pytest.param("__rmod__", marks=pytest.mark.xfail(reason="GH-36552")),
+ "__floordiv__",
+ "__rfloordiv__",
+ ],
+ )
+ @pytest.mark.parametrize("box", [DataFrame, Series, Index])
+ @pytest.mark.parametrize("scalar", [-5, 5])
+ def test_python_semantics_with_numexpr_installed(self, op, box, scalar):
+ # https://github.com/pandas-dev/pandas/issues/36047
+ expr._MIN_ELEMENTS = 0
+ data = np.arange(-50, 50)
+ obj = box(data)
+ method = getattr(obj, op)
+ result = method(scalar)
+
+ # compare result with numpy
+ expr.set_use_numexpr(False)
+ expected = method(scalar)
+ expr.set_use_numexpr(True)
+ tm.assert_equal(result, expected)
+
+ # compare result element-wise with Python
+ for i, elem in enumerate(data):
+ if box == DataFrame:
+ scalar_result = result.iloc[i, 0]
+ else:
+ scalar_result = result[i]
+ try:
+ expected = getattr(int(elem), op)(scalar)
+ except ZeroDivisionError:
+ pass
+ else:
+ assert scalar_result == expected
| Backport PR #36552: REGR: Series.__mod__ behaves different with numexpr | https://api.github.com/repos/pandas-dev/pandas/pulls/36750 | 2020-09-30T20:29:02Z | 2020-10-01T16:49:48Z | 2020-10-01T16:49:48Z | 2020-10-01T16:49:48Z |
Added notes for Jupyter & Colab users | diff --git a/README.md b/README.md
index a2f2f1c04442a..ab5dce5a64048 100644
--- a/README.md
+++ b/README.md
@@ -153,6 +153,9 @@ has been under active development since then.
For usage questions, the best place to go to is [StackOverflow](https://stackoverflow.com/questions/tagged/pandas).
Further, general questions and discussions can also take place on the [pydata mailing list](https://groups.google.com/forum/?fromgroups#!forum/pydata).
+## Notes
+If you are using Jupyter notebook or Google's Colaboratory then pandas are pre-installed in it.
+
## Discussion and Development
Most development discussions take place on github in this repo. Further, the [pandas-dev mailing list](https://mail.python.org/mailman/listinfo/pandas-dev) can also be used for specialized discussions or design issues, and a [Gitter channel](https://gitter.im/pydata/pandas) is available for quick development related questions.
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/36749 | 2020-09-30T19:44:30Z | 2020-10-01T10:54:10Z | null | 2020-10-02T09:08:48Z |
[DOC]: Add explanation about DataFrame methods use all Categories | diff --git a/doc/source/user_guide/categorical.rst b/doc/source/user_guide/categorical.rst
index 926c2d9be74c2..6a8e1767ef7e8 100644
--- a/doc/source/user_guide/categorical.rst
+++ b/doc/source/user_guide/categorical.rst
@@ -618,6 +618,19 @@ even if some categories are not present in the data:
s = pd.Series(pd.Categorical(["a", "b", "c", "c"], categories=["c", "a", "b", "d"]))
s.value_counts()
+``DataFrame`` methods like :meth:`DataFrame.sum` also show "unused" categories.
+
+.. ipython:: python
+
+ columns = pd.Categorical(
+ ["One", "One", "Two"], categories=["One", "Two", "Three"], ordered=True
+ )
+ df = pd.DataFrame(
+ data=[[1, 2, 3], [4, 5, 6]],
+ columns=pd.MultiIndex.from_arrays([["A", "B", "B"], columns]),
+ )
+ df.sum(axis=1, level=1)
+
Groupby will also show "unused" categories:
.. ipython:: python
| - [x] closes #36740
| https://api.github.com/repos/pandas-dev/pandas/pulls/36747 | 2020-09-30T18:41:43Z | 2020-10-01T01:18:42Z | 2020-10-01T01:18:41Z | 2020-10-01T07:26:09Z |
Standardize cast_str behavior in all datetimelike fill_value validators | diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py
index 83a9c0ba61c2d..ef402fce642b9 100644
--- a/pandas/core/arrays/datetimelike.py
+++ b/pandas/core/arrays/datetimelike.py
@@ -756,9 +756,7 @@ def _validate_shift_value(self, fill_value):
return self._unbox(fill_value)
- def _validate_scalar(
- self, value, msg: Optional[str] = None, cast_str: bool = False
- ):
+ def _validate_scalar(self, value, msg: Optional[str] = None):
"""
Validate that the input value can be cast to our scalar_type.
@@ -769,14 +767,12 @@ def _validate_scalar(
Message to raise in TypeError on invalid input.
If not provided, `value` is cast to a str and used
as the message.
- cast_str : bool, default False
- Whether to try to parse string input to scalar_type.
Returns
-------
self._scalar_type or NaT
"""
- if cast_str and isinstance(value, str):
+ if isinstance(value, str):
# NB: Careful about tzawareness
try:
value = self._scalar_from_string(value)
@@ -798,9 +794,7 @@ def _validate_scalar(
return value
- def _validate_listlike(
- self, value, opname: str, cast_str: bool = False, allow_object: bool = False
- ):
+ def _validate_listlike(self, value, opname: str, allow_object: bool = False):
if isinstance(value, type(self)):
return value
@@ -809,7 +803,7 @@ def _validate_listlike(
value = array(value)
value = extract_array(value, extract_numpy=True)
- if cast_str and is_dtype_equal(value.dtype, "string"):
+ if is_dtype_equal(value.dtype, "string"):
# We got a StringArray
try:
# TODO: Could use from_sequence_of_strings if implemented
@@ -839,9 +833,9 @@ def _validate_listlike(
def _validate_searchsorted_value(self, value):
msg = "searchsorted requires compatible dtype or scalar"
if not is_list_like(value):
- value = self._validate_scalar(value, msg, cast_str=True)
+ value = self._validate_scalar(value, msg)
else:
- value = self._validate_listlike(value, "searchsorted", cast_str=True)
+ value = self._validate_listlike(value, "searchsorted")
rv = self._unbox(value)
return self._rebox_native(rv)
@@ -852,15 +846,15 @@ def _validate_setitem_value(self, value):
f"or array of those. Got '{type(value).__name__}' instead."
)
if is_list_like(value):
- value = self._validate_listlike(value, "setitem", cast_str=True)
+ value = self._validate_listlike(value, "setitem")
else:
- value = self._validate_scalar(value, msg, cast_str=True)
+ value = self._validate_scalar(value, msg)
return self._unbox(value, setitem=True)
def _validate_insert_value(self, value):
msg = f"cannot insert {type(self).__name__} with incompatible label"
- value = self._validate_scalar(value, msg, cast_str=False)
+ value = self._validate_scalar(value, msg)
self._check_compatible_with(value, setitem=True)
# TODO: if we dont have compat, should we raise or astype(object)?
diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py
index d2162d987ccd6..5128c644e6bcb 100644
--- a/pandas/core/indexes/datetimelike.py
+++ b/pandas/core/indexes/datetimelike.py
@@ -646,7 +646,7 @@ def _wrap_joined_index(self, joined: np.ndarray, other):
def _convert_arr_indexer(self, keyarr):
try:
return self._data._validate_listlike(
- keyarr, "convert_arr_indexer", cast_str=True, allow_object=True
+ keyarr, "convert_arr_indexer", allow_object=True
)
except (ValueError, TypeError):
return com.asarray_tuplesafe(keyarr)
diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py
index 854c4e33eca01..7e635e55288e5 100644
--- a/pandas/core/indexes/timedeltas.py
+++ b/pandas/core/indexes/timedeltas.py
@@ -217,7 +217,7 @@ def get_loc(self, key, method=None, tolerance=None):
raise InvalidIndexError(key)
try:
- key = self._data._validate_scalar(key, cast_str=True)
+ key = self._data._validate_scalar(key)
except TypeError as err:
raise KeyError(key) from err
diff --git a/pandas/tests/arrays/test_datetimelike.py b/pandas/tests/arrays/test_datetimelike.py
index 3f5ab5baa7d69..91bcdf32603f4 100644
--- a/pandas/tests/arrays/test_datetimelike.py
+++ b/pandas/tests/arrays/test_datetimelike.py
@@ -160,6 +160,16 @@ def test_take_fill(self):
result = arr.take([-1, 1], allow_fill=True, fill_value=pd.NaT)
assert result[0] is pd.NaT
+ def test_take_fill_str(self, arr1d):
+ # Cast str fill_value matching other fill_value-taking methods
+ result = arr1d.take([-1, 1], allow_fill=True, fill_value=str(arr1d[-1]))
+ expected = arr1d[[-1, 1]]
+ tm.assert_equal(result, expected)
+
+ msg = r"'fill_value' should be a <.*>\. Got 'foo'"
+ with pytest.raises(ValueError, match=msg):
+ arr1d.take([-1, 1], allow_fill=True, fill_value="foo")
+
def test_concat_same_type(self):
data = np.arange(10, dtype="i8") * 24 * 3600 * 10 ** 9
diff --git a/pandas/tests/indexes/datetimelike.py b/pandas/tests/indexes/datetimelike.py
index 71ae1d6bda9c7..df857cce05bbb 100644
--- a/pandas/tests/indexes/datetimelike.py
+++ b/pandas/tests/indexes/datetimelike.py
@@ -115,3 +115,24 @@ def test_not_equals_numeric(self):
assert not index.equals(pd.Index(index.asi8))
assert not index.equals(pd.Index(index.asi8.astype("u8")))
assert not index.equals(pd.Index(index.asi8).astype("f8"))
+
+ def test_where_cast_str(self):
+ index = self.create_index()
+
+ mask = np.ones(len(index), dtype=bool)
+ mask[-1] = False
+
+ result = index.where(mask, str(index[0]))
+ expected = index.where(mask, index[0])
+ tm.assert_index_equal(result, expected)
+
+ result = index.where(mask, [str(index[0])])
+ tm.assert_index_equal(result, expected)
+
+ msg = "Where requires matching dtype, not foo"
+ with pytest.raises(TypeError, match=msg):
+ index.where(mask, "foo")
+
+ msg = r"Where requires matching dtype, not \['foo'\]"
+ with pytest.raises(TypeError, match=msg):
+ index.where(mask, ["foo"])
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
This will in turn allow us to simplify/de-duplicate a bunch of code since we now pass cast_str in all cases. | https://api.github.com/repos/pandas-dev/pandas/pulls/36746 | 2020-09-30T18:33:31Z | 2020-10-06T02:29:09Z | 2020-10-06T02:29:09Z | 2020-10-06T02:48:21Z |
BUG: DTI/TDI.equals with i8 | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index 016e8d90e7d21..770b64975ead4 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -304,7 +304,7 @@ Datetimelike
- Bug in :meth:`DatetimeIndex.searchsorted`, :meth:`TimedeltaIndex.searchsorted`, :meth:`PeriodIndex.searchsorted`, and :meth:`Series.searchsorted` with ``datetime64``, ``timedelta64`` or ``Period`` dtype placement of ``NaT`` values being inconsistent with ``NumPy`` (:issue:`36176`, :issue:`36254`)
- Inconsistency in :class:`DatetimeArray`, :class:`TimedeltaArray`, and :class:`PeriodArray` setitem casting arrays of strings to datetimelike scalars but not scalar strings (:issue:`36261`)
- Bug in :class:`DatetimeIndex.shift` incorrectly raising when shifting empty indexes (:issue:`14811`)
-
+- Bug in :meth:`DatetimeIndex.equals` and :meth:`TimedeltaIndex.equals` incorrectly considering ``int64`` indexes as equal (:issue:`36744`)
Timedelta
^^^^^^^^^
diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py
index 23cc93b9ecb33..d2162d987ccd6 100644
--- a/pandas/core/indexes/datetimelike.py
+++ b/pandas/core/indexes/datetimelike.py
@@ -134,6 +134,8 @@ def equals(self, other: object) -> bool:
if not isinstance(other, Index):
return False
+ elif other.dtype.kind in ["f", "i", "u", "c"]:
+ return False
elif not isinstance(other, type(self)):
try:
other = type(self)(other)
diff --git a/pandas/core/ops/__init__.py b/pandas/core/ops/__init__.py
index 2dc97a3583dfb..ab04cbfff3f9a 100644
--- a/pandas/core/ops/__init__.py
+++ b/pandas/core/ops/__init__.py
@@ -539,7 +539,9 @@ def _should_reindex_frame_op(
if fill_value is None and level is None and axis is default_axis:
# TODO: any other cases we should handle here?
cols = left.columns.intersection(right.columns)
- if not (cols.equals(left.columns) and cols.equals(right.columns)):
+
+ if len(cols) and not (cols.equals(left.columns) and cols.equals(right.columns)):
+ # TODO: is there a shortcut available when len(cols) == 0?
return True
return False
diff --git a/pandas/tests/computation/test_eval.py b/pandas/tests/computation/test_eval.py
index cca64a6bf487c..b78c7775e8a37 100644
--- a/pandas/tests/computation/test_eval.py
+++ b/pandas/tests/computation/test_eval.py
@@ -1054,15 +1054,15 @@ def test_complex_series_frame_alignment(self, engine, parser):
m2, n, data_gen_f=f, r_idx_type=r2, c_idx_type=c2
)
index = getattr(locals().get(obj_name), index_name)
- s = Series(np.random.randn(n), index[:n])
+ ser = Series(np.random.randn(n), index[:n])
if r2 == "dt" or c2 == "dt":
if engine == "numexpr":
- expected2 = df2.add(s)
+ expected2 = df2.add(ser)
else:
- expected2 = df2 + s
+ expected2 = df2 + ser
else:
- expected2 = df2 + s
+ expected2 = df2 + ser
if r1 == "dt" or c1 == "dt":
if engine == "numexpr":
@@ -1072,11 +1072,11 @@ def test_complex_series_frame_alignment(self, engine, parser):
else:
expected = expected2 + df
- if should_warn(df2.index, s.index, df.index):
+ if should_warn(df2.index, ser.index, df.index):
with tm.assert_produces_warning(RuntimeWarning):
- res = pd.eval("df2 + s + df", engine=engine, parser=parser)
+ res = pd.eval("df2 + ser + df", engine=engine, parser=parser)
else:
- res = pd.eval("df2 + s + df", engine=engine, parser=parser)
+ res = pd.eval("df2 + ser + df", engine=engine, parser=parser)
assert res.shape == expected.shape
tm.assert_frame_equal(res, expected)
diff --git a/pandas/tests/indexes/datetimelike.py b/pandas/tests/indexes/datetimelike.py
index f667e5a610419..71ae1d6bda9c7 100644
--- a/pandas/tests/indexes/datetimelike.py
+++ b/pandas/tests/indexes/datetimelike.py
@@ -108,3 +108,10 @@ def test_getitem_preserves_freq(self):
result = index[:]
assert result.freq == index.freq
+
+ def test_not_equals_numeric(self):
+ index = self.create_index()
+
+ assert not index.equals(pd.Index(index.asi8))
+ assert not index.equals(pd.Index(index.asi8.astype("u8")))
+ assert not index.equals(pd.Index(index.asi8).astype("f8"))
| - [ ] closes #xxxx
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/36744 | 2020-09-30T16:34:37Z | 2020-10-02T21:29:24Z | 2020-10-02T21:29:24Z | 2020-10-02T21:47:36Z |
TYP: some more static definitions of methods for DatetimeIndex | diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py
index 016544d823ae3..da78f8ff5d603 100644
--- a/pandas/core/indexes/datetimes.py
+++ b/pandas/core/indexes/datetimes.py
@@ -1,6 +1,6 @@
from datetime import date, datetime, time, timedelta, tzinfo
import operator
-from typing import Optional
+from typing import TYPE_CHECKING, Optional
import warnings
import numpy as np
@@ -30,6 +30,9 @@
from pandas.core.indexes.extension import inherit_names
from pandas.core.tools.times import to_time
+if TYPE_CHECKING:
+ from pandas import DataFrame, Float64Index, PeriodIndex, TimedeltaIndex
+
def _new_DatetimeIndex(cls, d):
"""
@@ -64,8 +67,7 @@ def _new_DatetimeIndex(cls, d):
@inherit_names(
- ["to_perioddelta", "to_julian_date", "strftime", "isocalendar"]
- + DatetimeArray._field_ops
+ DatetimeArray._field_ops
+ [
method
for method in DatetimeArray._datetimelike_methods
@@ -220,7 +222,12 @@ class DatetimeIndex(DatetimeTimedeltaMixin):
tz: Optional[tzinfo]
# --------------------------------------------------------------------
- # methods that dispatch to array and wrap result in DatetimeIndex
+ # methods that dispatch to DatetimeArray and wrap result
+
+ @doc(DatetimeArray.strftime)
+ def strftime(self, date_format) -> Index:
+ arr = self._data.strftime(date_format)
+ return Index(arr, name=self.name)
@doc(DatetimeArray.tz_convert)
def tz_convert(self, tz) -> "DatetimeIndex":
@@ -235,9 +242,30 @@ def tz_localize(
return type(self)._simple_new(arr, name=self.name)
@doc(DatetimeArray.to_period)
- def to_period(self, freq=None) -> "DatetimeIndex":
+ def to_period(self, freq=None) -> "PeriodIndex":
+ from pandas.core.indexes.api import PeriodIndex
+
arr = self._data.to_period(freq)
- return type(self)._simple_new(arr, name=self.name)
+ return PeriodIndex._simple_new(arr, name=self.name)
+
+ @doc(DatetimeArray.to_perioddelta)
+ def to_perioddelta(self, freq) -> "TimedeltaIndex":
+ from pandas.core.indexes.api import TimedeltaIndex
+
+ arr = self._data.to_perioddelta(freq)
+ return TimedeltaIndex._simple_new(arr, name=self.name)
+
+ @doc(DatetimeArray.to_julian_date)
+ def to_julian_date(self) -> "Float64Index":
+ from pandas.core.indexes.api import Float64Index
+
+ arr = self._data.to_julian_date()
+ return Float64Index._simple_new(arr, name=self.name)
+
+ @doc(DatetimeArray.isocalendar)
+ def isocalendar(self) -> "DataFrame":
+ df = self._data.isocalendar()
+ return df.set_index(self)
# --------------------------------------------------------------------
# Constructors
| xref #32100, https://github.com/pandas-dev/pandas/issues/31160#issuecomment-701244665 | https://api.github.com/repos/pandas-dev/pandas/pulls/36742 | 2020-09-30T14:26:27Z | 2020-10-01T01:23:33Z | 2020-10-01T01:23:33Z | 2020-10-01T09:06:17Z |
TST: honor encoding in read_fwf for memory-mapped files | diff --git a/pandas/tests/io/parser/test_read_fwf.py b/pandas/tests/io/parser/test_read_fwf.py
index 13519154f82b8..d45317aaa3458 100644
--- a/pandas/tests/io/parser/test_read_fwf.py
+++ b/pandas/tests/io/parser/test_read_fwf.py
@@ -634,3 +634,24 @@ def test_binary_mode():
df = pd.read_fwf(file)
file.seek(0)
tm.assert_frame_equal(df, df_reference)
+
+
+@pytest.mark.parametrize("memory_map", [True, False])
+def test_encoding_mmap(memory_map):
+ """
+ encoding should be working, even when using a memory-mapped file.
+
+ GH 23254.
+ """
+ encoding = "iso8859_1"
+ data = BytesIO(" 1 A Ä 2\n".encode(encoding))
+ df = pd.read_fwf(
+ data,
+ header=None,
+ widths=[2, 2, 2, 2],
+ encoding=encoding,
+ memory_map=memory_map,
+ )
+ data.seek(0)
+ df_reference = pd.DataFrame([[1, "A", "Ä", 2]])
+ tm.assert_frame_equal(df, df_reference)
| - [x] closes #23254
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry: not needed
`encoding` was already working for memory-mapped files in `read_fwf`.
| https://api.github.com/repos/pandas-dev/pandas/pulls/36737 | 2020-09-30T06:14:33Z | 2020-09-30T17:22:42Z | 2020-09-30T17:22:42Z | 2020-09-30T17:25:34Z |
TST: read binary file objects with read_fwf | diff --git a/pandas/tests/io/parser/test_read_fwf.py b/pandas/tests/io/parser/test_read_fwf.py
index 127d0dc4c9829..13519154f82b8 100644
--- a/pandas/tests/io/parser/test_read_fwf.py
+++ b/pandas/tests/io/parser/test_read_fwf.py
@@ -6,6 +6,7 @@
from datetime import datetime
from io import BytesIO, StringIO
+from pathlib import Path
import numpy as np
import pytest
@@ -614,3 +615,22 @@ def test_fwf_compression(compression_only, infer):
result = read_fwf(path, **kwargs)
tm.assert_frame_equal(result, expected)
+
+
+def test_binary_mode():
+ """
+ read_fwf supports opening files in binary mode.
+
+ GH 18035.
+ """
+ data = """aas aas aas
+bba bab b a"""
+ df_reference = pd.DataFrame(
+ [["bba", "bab", "b a"]], columns=["aas", "aas.1", "aas.2"], index=[0]
+ )
+ with tm.ensure_clean() as path:
+ Path(path).write_text(data)
+ with open(path, "rb") as file:
+ df = pd.read_fwf(file)
+ file.seek(0)
+ tm.assert_frame_equal(df, df_reference)
| - [x] closes #18035
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry: not needed
`read_fwf` has already supported reading binary file objects. Added the test case from the issue.
| https://api.github.com/repos/pandas-dev/pandas/pulls/36735 | 2020-09-30T05:50:52Z | 2020-09-30T13:10:05Z | 2020-09-30T13:10:04Z | 2020-09-30T13:10:08Z |
DOC: Format more code blocks | diff --git a/doc/source/user_guide/io.rst b/doc/source/user_guide/io.rst
index fc5aad12cd5e8..e483cebf71614 100644
--- a/doc/source/user_guide/io.rst
+++ b/doc/source/user_guide/io.rst
@@ -135,12 +135,10 @@ usecols : list-like or callable, default ``None``
import pandas as pd
from io import StringIO
- data = ('col1,col2,col3\n'
- 'a,b,1\n'
- 'a,b,2\n'
- 'c,d,3')
+
+ data = "col1,col2,col3\na,b,1\na,b,2\nc,d,3"
pd.read_csv(StringIO(data))
- pd.read_csv(StringIO(data), usecols=lambda x: x.upper() in ['COL1', 'COL3'])
+ pd.read_csv(StringIO(data), usecols=lambda x: x.upper() in ["COL1", "COL3"])
Using this parameter results in much faster parsing time and lower memory usage.
squeeze : boolean, default ``False``
@@ -181,10 +179,7 @@ skiprows : list-like or integer, default ``None``
.. ipython:: python
- data = ('col1,col2,col3\n'
- 'a,b,1\n'
- 'a,b,2\n'
- 'c,d,3')
+ data = "col1,col2,col3\na,b,1\na,b,2\nc,d,3"
pd.read_csv(StringIO(data))
pd.read_csv(StringIO(data), skiprows=lambda x: x % 2 != 0)
@@ -365,17 +360,14 @@ columns:
.. ipython:: python
import numpy as np
- data = ('a,b,c,d\n'
- '1,2,3,4\n'
- '5,6,7,8\n'
- '9,10,11')
+
+ data = "a,b,c,d\n1,2,3,4\n5,6,7,8\n9,10,11"
print(data)
df = pd.read_csv(StringIO(data), dtype=object)
df
- df['a'][0]
- df = pd.read_csv(StringIO(data),
- dtype={'b': object, 'c': np.float64, 'd': 'Int64'})
+ df["a"][0]
+ df = pd.read_csv(StringIO(data), dtype={"b": object, "c": np.float64, "d": "Int64"})
df.dtypes
Fortunately, pandas offers more than one way to ensure that your column(s)
@@ -390,14 +382,10 @@ of :func:`~pandas.read_csv`:
.. ipython:: python
- data = ("col_1\n"
- "1\n"
- "2\n"
- "'A'\n"
- "4.22")
- df = pd.read_csv(StringIO(data), converters={'col_1': str})
+ data = "col_1\n1\n2\n'A'\n4.22"
+ df = pd.read_csv(StringIO(data), converters={"col_1": str})
df
- df['col_1'].apply(type).value_counts()
+ df["col_1"].apply(type).value_counts()
Or you can use the :func:`~pandas.to_numeric` function to coerce the
dtypes after reading in the data,
@@ -405,9 +393,9 @@ dtypes after reading in the data,
.. ipython:: python
df2 = pd.read_csv(StringIO(data))
- df2['col_1'] = pd.to_numeric(df2['col_1'], errors='coerce')
+ df2["col_1"] = pd.to_numeric(df2["col_1"], errors="coerce")
df2
- df2['col_1'].apply(type).value_counts()
+ df2["col_1"].apply(type).value_counts()
which will convert all valid parsing to floats, leaving the invalid parsing
as ``NaN``.
@@ -429,12 +417,12 @@ worth trying.
.. ipython:: python
:okwarning:
- col_1 = list(range(500000)) + ['a', 'b'] + list(range(500000))
- df = pd.DataFrame({'col_1': col_1})
- df.to_csv('foo.csv')
- mixed_df = pd.read_csv('foo.csv')
- mixed_df['col_1'].apply(type).value_counts()
- mixed_df['col_1'].dtype
+ col_1 = list(range(500000)) + ["a", "b"] + list(range(500000))
+ df = pd.DataFrame({"col_1": col_1})
+ df.to_csv("foo.csv")
+ mixed_df = pd.read_csv("foo.csv")
+ mixed_df["col_1"].apply(type).value_counts()
+ mixed_df["col_1"].dtype
will result with ``mixed_df`` containing an ``int`` dtype for certain chunks
of the column, and ``str`` for others due to the mixed dtypes from the
@@ -445,7 +433,8 @@ worth trying.
:suppress:
import os
- os.remove('foo.csv')
+
+ os.remove("foo.csv")
.. _io.categorical:
@@ -457,21 +446,18 @@ Specifying categorical dtype
.. ipython:: python
- data = ('col1,col2,col3\n'
- 'a,b,1\n'
- 'a,b,2\n'
- 'c,d,3')
+ data = "col1,col2,col3\na,b,1\na,b,2\nc,d,3"
pd.read_csv(StringIO(data))
pd.read_csv(StringIO(data)).dtypes
- pd.read_csv(StringIO(data), dtype='category').dtypes
+ pd.read_csv(StringIO(data), dtype="category").dtypes
Individual columns can be parsed as a ``Categorical`` using a dict
specification:
.. ipython:: python
- pd.read_csv(StringIO(data), dtype={'col1': 'category'}).dtypes
+ pd.read_csv(StringIO(data), dtype={"col1": "category"}).dtypes
Specifying ``dtype='category'`` will result in an unordered ``Categorical``
whose ``categories`` are the unique values observed in the data. For more
@@ -482,16 +468,17 @@ that column's ``dtype``.
.. ipython:: python
from pandas.api.types import CategoricalDtype
- dtype = CategoricalDtype(['d', 'c', 'b', 'a'], ordered=True)
- pd.read_csv(StringIO(data), dtype={'col1': dtype}).dtypes
+
+ dtype = CategoricalDtype(["d", "c", "b", "a"], ordered=True)
+ pd.read_csv(StringIO(data), dtype={"col1": dtype}).dtypes
When using ``dtype=CategoricalDtype``, "unexpected" values outside of
``dtype.categories`` are treated as missing values.
.. ipython:: python
- dtype = CategoricalDtype(['a', 'b', 'd']) # No 'c'
- pd.read_csv(StringIO(data), dtype={'col1': dtype}).col1
+ dtype = CategoricalDtype(["a", "b", "d"]) # No 'c'
+ pd.read_csv(StringIO(data), dtype={"col1": dtype}).col1
This matches the behavior of :meth:`Categorical.set_categories`.
@@ -507,11 +494,11 @@ This matches the behavior of :meth:`Categorical.set_categories`.
.. ipython:: python
- df = pd.read_csv(StringIO(data), dtype='category')
+ df = pd.read_csv(StringIO(data), dtype="category")
df.dtypes
- df['col3']
- df['col3'].cat.categories = pd.to_numeric(df['col3'].cat.categories)
- df['col3']
+ df["col3"]
+ df["col3"].cat.categories = pd.to_numeric(df["col3"].cat.categories)
+ df["col3"]
Naming and using columns
@@ -527,10 +514,7 @@ used as the column names:
.. ipython:: python
- data = ('a,b,c\n'
- '1,2,3\n'
- '4,5,6\n'
- '7,8,9')
+ data = "a,b,c\n1,2,3\n4,5,6\n7,8,9"
print(data)
pd.read_csv(StringIO(data))
@@ -541,19 +525,15 @@ any):
.. ipython:: python
print(data)
- pd.read_csv(StringIO(data), names=['foo', 'bar', 'baz'], header=0)
- pd.read_csv(StringIO(data), names=['foo', 'bar', 'baz'], header=None)
+ pd.read_csv(StringIO(data), names=["foo", "bar", "baz"], header=0)
+ pd.read_csv(StringIO(data), names=["foo", "bar", "baz"], header=None)
If the header is in a row other than the first, pass the row number to
``header``. This will skip the preceding rows:
.. ipython:: python
- data = ('skip this skip it\n'
- 'a,b,c\n'
- '1,2,3\n'
- '4,5,6\n'
- '7,8,9')
+ data = "skip this skip it\na,b,c\n1,2,3\n4,5,6\n7,8,9"
pd.read_csv(StringIO(data), header=1)
.. note::
@@ -574,9 +554,7 @@ distinguish between them so as to prevent overwriting data:
.. ipython:: python
- data = ('a,b,a\n'
- '0,1,2\n'
- '3,4,5')
+ data = "a,b,a\n0,1,2\n3,4,5"
pd.read_csv(StringIO(data))
There is no more duplicate data because ``mangle_dupe_cols=True`` by default,
@@ -613,18 +591,18 @@ file, either using the column names, position numbers or a callable:
.. ipython:: python
- data = 'a,b,c,d\n1,2,3,foo\n4,5,6,bar\n7,8,9,baz'
+ data = "a,b,c,d\n1,2,3,foo\n4,5,6,bar\n7,8,9,baz"
pd.read_csv(StringIO(data))
- pd.read_csv(StringIO(data), usecols=['b', 'd'])
+ pd.read_csv(StringIO(data), usecols=["b", "d"])
pd.read_csv(StringIO(data), usecols=[0, 2, 3])
- pd.read_csv(StringIO(data), usecols=lambda x: x.upper() in ['A', 'C'])
+ pd.read_csv(StringIO(data), usecols=lambda x: x.upper() in ["A", "C"])
The ``usecols`` argument can also be used to specify which columns not to
use in the final result:
.. ipython:: python
- pd.read_csv(StringIO(data), usecols=lambda x: x not in ['a', 'c'])
+ pd.read_csv(StringIO(data), usecols=lambda x: x not in ["a", "c"])
In this case, the callable is specifying that we exclude the "a" and "c"
columns from the output.
@@ -642,26 +620,15 @@ be ignored. By default, completely blank lines will be ignored as well.
.. ipython:: python
- data = ('\n'
- 'a,b,c\n'
- ' \n'
- '# commented line\n'
- '1,2,3\n'
- '\n'
- '4,5,6')
+ data = "\na,b,c\n \n# commented line\n1,2,3\n\n4,5,6"
print(data)
- pd.read_csv(StringIO(data), comment='#')
+ pd.read_csv(StringIO(data), comment="#")
If ``skip_blank_lines=False``, then ``read_csv`` will not ignore blank lines:
.. ipython:: python
- data = ('a,b,c\n'
- '\n'
- '1,2,3\n'
- '\n'
- '\n'
- '4,5,6')
+ data = "a,b,c\n\n1,2,3\n\n\n4,5,6"
pd.read_csv(StringIO(data), skip_blank_lines=False)
.. warning::
@@ -672,32 +639,28 @@ If ``skip_blank_lines=False``, then ``read_csv`` will not ignore blank lines:
.. ipython:: python
- data = ('#comment\n'
- 'a,b,c\n'
- 'A,B,C\n'
- '1,2,3')
- pd.read_csv(StringIO(data), comment='#', header=1)
- data = ('A,B,C\n'
- '#comment\n'
- 'a,b,c\n'
- '1,2,3')
- pd.read_csv(StringIO(data), comment='#', skiprows=2)
+ data = "#comment\na,b,c\nA,B,C\n1,2,3"
+ pd.read_csv(StringIO(data), comment="#", header=1)
+ data = "A,B,C\n#comment\na,b,c\n1,2,3"
+ pd.read_csv(StringIO(data), comment="#", skiprows=2)
If both ``header`` and ``skiprows`` are specified, ``header`` will be
relative to the end of ``skiprows``. For example:
.. ipython:: python
- data = ('# empty\n'
- '# second empty line\n'
- '# third emptyline\n'
- 'X,Y,Z\n'
- '1,2,3\n'
- 'A,B,C\n'
- '1,2.,4.\n'
- '5.,NaN,10.0\n')
+ data = (
+ "# empty\n"
+ "# second empty line\n"
+ "# third emptyline\n"
+ "X,Y,Z\n"
+ "1,2,3\n"
+ "A,B,C\n"
+ "1,2.,4.\n"
+ "5.,NaN,10.0\n"
+ )
print(data)
- pd.read_csv(StringIO(data), comment='#', skiprows=4, header=1)
+ pd.read_csv(StringIO(data), comment="#", skiprows=4, header=1)
.. _io.comments:
@@ -709,36 +672,38 @@ Sometimes comments or meta data may be included in a file:
.. ipython:: python
:suppress:
- data = ("ID,level,category\n"
- "Patient1,123000,x # really unpleasant\n"
- "Patient2,23000,y # wouldn't take his medicine\n"
- "Patient3,1234018,z # awesome")
+ data = (
+ "ID,level,category\n"
+ "Patient1,123000,x # really unpleasant\n"
+ "Patient2,23000,y # wouldn't take his medicine\n"
+ "Patient3,1234018,z # awesome"
+ )
- with open('tmp.csv', 'w') as fh:
+ with open("tmp.csv", "w") as fh:
fh.write(data)
.. ipython:: python
- print(open('tmp.csv').read())
+ print(open("tmp.csv").read())
By default, the parser includes the comments in the output:
.. ipython:: python
- df = pd.read_csv('tmp.csv')
+ df = pd.read_csv("tmp.csv")
df
We can suppress the comments using the ``comment`` keyword:
.. ipython:: python
- df = pd.read_csv('tmp.csv', comment='#')
+ df = pd.read_csv("tmp.csv", comment="#")
df
.. ipython:: python
:suppress:
- os.remove('tmp.csv')
+ os.remove("tmp.csv")
.. _io.unicode:
@@ -751,13 +716,12 @@ result in byte strings being decoded to unicode in the result:
.. ipython:: python
from io import BytesIO
- data = (b'word,length\n'
- b'Tr\xc3\xa4umen,7\n'
- b'Gr\xc3\xbc\xc3\x9fe,5')
- data = data.decode('utf8').encode('latin-1')
- df = pd.read_csv(BytesIO(data), encoding='latin-1')
+
+ data = b"word,length\n" b"Tr\xc3\xa4umen,7\n" b"Gr\xc3\xbc\xc3\x9fe,5"
+ data = data.decode("utf8").encode("latin-1")
+ df = pd.read_csv(BytesIO(data), encoding="latin-1")
df
- df['word'][1]
+ df["word"][1]
Some formats which encode all characters as multiple bytes, like UTF-16, won't
parse correctly at all without specifying the encoding. `Full list of Python
@@ -774,16 +738,12 @@ first column will be used as the ``DataFrame``'s row names:
.. ipython:: python
- data = ('a,b,c\n'
- '4,apple,bat,5.7\n'
- '8,orange,cow,10')
+ data = "a,b,c\n4,apple,bat,5.7\n8,orange,cow,10"
pd.read_csv(StringIO(data))
.. ipython:: python
- data = ('index,a,b,c\n'
- '4,apple,bat,5.7\n'
- '8,orange,cow,10')
+ data = "index,a,b,c\n4,apple,bat,5.7\n8,orange,cow,10"
pd.read_csv(StringIO(data), index_col=0)
Ordinarily, you can achieve this behavior using the ``index_col`` option.
@@ -794,9 +754,7 @@ index column inference and discard the last column, pass ``index_col=False``:
.. ipython:: python
- data = ('a,b,c\n'
- '4,apple,bat,\n'
- '8,orange,cow,')
+ data = "a,b,c\n4,apple,bat,\n8,orange,cow,"
print(data)
pd.read_csv(StringIO(data))
pd.read_csv(StringIO(data), index_col=False)
@@ -806,12 +764,10 @@ If a subset of data is being parsed using the ``usecols`` option, the
.. ipython:: python
- data = ('a,b,c\n'
- '4,apple,bat,\n'
- '8,orange,cow,')
+ data = "a,b,c\n4,apple,bat,\n8,orange,cow,"
print(data)
- pd.read_csv(StringIO(data), usecols=['b', 'c'])
- pd.read_csv(StringIO(data), usecols=['b', 'c'], index_col=0)
+ pd.read_csv(StringIO(data), usecols=["b", "c"])
+ pd.read_csv(StringIO(data), usecols=["b", "c"], index_col=0)
.. _io.parse_dates:
@@ -831,14 +787,14 @@ The simplest case is to just pass in ``parse_dates=True``:
.. ipython:: python
:suppress:
- f = open('foo.csv', 'w')
- f.write('date,A,B,C\n20090101,a,1,2\n20090102,b,3,4\n20090103,c,4,5')
+ f = open("foo.csv", "w")
+ f.write("date,A,B,C\n20090101,a,1,2\n20090102,b,3,4\n20090103,c,4,5")
f.close()
.. ipython:: python
# Use a column as an index, and parse it as dates.
- df = pd.read_csv('foo.csv', index_col=0, parse_dates=True)
+ df = pd.read_csv("foo.csv", index_col=0, parse_dates=True)
df
# These are Python datetime objects
@@ -856,20 +812,22 @@ column names:
.. ipython:: python
:suppress:
- data = ("KORD,19990127, 19:00:00, 18:56:00, 0.8100\n"
- "KORD,19990127, 20:00:00, 19:56:00, 0.0100\n"
- "KORD,19990127, 21:00:00, 20:56:00, -0.5900\n"
- "KORD,19990127, 21:00:00, 21:18:00, -0.9900\n"
- "KORD,19990127, 22:00:00, 21:56:00, -0.5900\n"
- "KORD,19990127, 23:00:00, 22:56:00, -0.5900")
+ data = (
+ "KORD,19990127, 19:00:00, 18:56:00, 0.8100\n"
+ "KORD,19990127, 20:00:00, 19:56:00, 0.0100\n"
+ "KORD,19990127, 21:00:00, 20:56:00, -0.5900\n"
+ "KORD,19990127, 21:00:00, 21:18:00, -0.9900\n"
+ "KORD,19990127, 22:00:00, 21:56:00, -0.5900\n"
+ "KORD,19990127, 23:00:00, 22:56:00, -0.5900"
+ )
- with open('tmp.csv', 'w') as fh:
+ with open("tmp.csv", "w") as fh:
fh.write(data)
.. ipython:: python
- print(open('tmp.csv').read())
- df = pd.read_csv('tmp.csv', header=None, parse_dates=[[1, 2], [1, 3]])
+ print(open("tmp.csv").read())
+ df = pd.read_csv("tmp.csv", header=None, parse_dates=[[1, 2], [1, 3]])
df
By default the parser removes the component date columns, but you can choose
@@ -877,8 +835,9 @@ to retain them via the ``keep_date_col`` keyword:
.. ipython:: python
- df = pd.read_csv('tmp.csv', header=None, parse_dates=[[1, 2], [1, 3]],
- keep_date_col=True)
+ df = pd.read_csv(
+ "tmp.csv", header=None, parse_dates=[[1, 2], [1, 3]], keep_date_col=True
+ )
df
Note that if you wish to combine multiple columns into a single date column, a
@@ -891,8 +850,8 @@ You can also use a dict to specify custom name columns:
.. ipython:: python
- date_spec = {'nominal': [1, 2], 'actual': [1, 3]}
- df = pd.read_csv('tmp.csv', header=None, parse_dates=date_spec)
+ date_spec = {"nominal": [1, 2], "actual": [1, 3]}
+ df = pd.read_csv("tmp.csv", header=None, parse_dates=date_spec)
df
It is important to remember that if multiple text columns are to be parsed into
@@ -903,9 +862,10 @@ data columns:
.. ipython:: python
- date_spec = {'nominal': [1, 2], 'actual': [1, 3]}
- df = pd.read_csv('tmp.csv', header=None, parse_dates=date_spec,
- index_col=0) # index is the nominal column
+ date_spec = {"nominal": [1, 2], "actual": [1, 3]}
+ df = pd.read_csv(
+ "tmp.csv", header=None, parse_dates=date_spec, index_col=0
+ ) # index is the nominal column
df
.. note::
@@ -929,8 +889,9 @@ take full advantage of the flexibility of the date parsing API:
.. ipython:: python
- df = pd.read_csv('tmp.csv', header=None, parse_dates=date_spec,
- date_parser=pd.to_datetime)
+ df = pd.read_csv(
+ "tmp.csv", header=None, parse_dates=date_spec, date_parser=pd.to_datetime
+ )
df
Pandas will try to call the ``date_parser`` function in three different ways. If
@@ -957,7 +918,7 @@ Note that performance-wise, you should try these methods of parsing dates in ord
.. ipython:: python
:suppress:
- os.remove('tmp.csv')
+ os.remove("tmp.csv")
.. _io.csv.mixed_timezones:
@@ -976,17 +937,20 @@ an object-dtype column with strings, even with ``parse_dates``.
a
2000-01-01T00:00:00+05:00
2000-01-01T00:00:00+06:00"""
- df = pd.read_csv(StringIO(content), parse_dates=['a'])
- df['a']
+ df = pd.read_csv(StringIO(content), parse_dates=["a"])
+ df["a"]
To parse the mixed-timezone values as a datetime column, pass a partially-applied
:func:`to_datetime` with ``utc=True`` as the ``date_parser``.
.. ipython:: python
- df = pd.read_csv(StringIO(content), parse_dates=['a'],
- date_parser=lambda col: pd.to_datetime(col, utc=True))
- df['a']
+ df = pd.read_csv(
+ StringIO(content),
+ parse_dates=["a"],
+ date_parser=lambda col: pd.to_datetime(col, utc=True),
+ )
+ df["a"]
.. _io.dayfirst:
@@ -1022,14 +986,13 @@ Note that ``infer_datetime_format`` is sensitive to ``dayfirst``. With
.. ipython:: python
# Try to infer the format for the index column
- df = pd.read_csv('foo.csv', index_col=0, parse_dates=True,
- infer_datetime_format=True)
+ df = pd.read_csv("foo.csv", index_col=0, parse_dates=True, infer_datetime_format=True)
df
.. ipython:: python
:suppress:
- os.remove('foo.csv')
+ os.remove("foo.csv")
International date formats
++++++++++++++++++++++++++
@@ -1040,19 +1003,16 @@ DD/MM/YYYY instead. For convenience, a ``dayfirst`` keyword is provided:
.. ipython:: python
:suppress:
- data = ("date,value,cat\n"
- "1/6/2000,5,a\n"
- "2/6/2000,10,b\n"
- "3/6/2000,15,c")
- with open('tmp.csv', 'w') as fh:
+ data = "date,value,cat\n1/6/2000,5,a\n2/6/2000,10,b\n3/6/2000,15,c"
+ with open("tmp.csv", "w") as fh:
fh.write(data)
.. ipython:: python
- print(open('tmp.csv').read())
+ print(open("tmp.csv").read())
- pd.read_csv('tmp.csv', parse_dates=[0])
- pd.read_csv('tmp.csv', dayfirst=True, parse_dates=[0])
+ pd.read_csv("tmp.csv", parse_dates=[0])
+ pd.read_csv("tmp.csv", dayfirst=True, parse_dates=[0])
Writing CSVs to binary file objects
+++++++++++++++++++++++++++++++++++
@@ -1084,14 +1044,16 @@ writing to a file). For example:
.. ipython:: python
- val = '0.3066101993807095471566981359501369297504425048828125'
- data = 'a,b,c\n1,2,{0}'.format(val)
- abs(pd.read_csv(StringIO(data), engine='c',
- float_precision=None)['c'][0] - float(val))
- abs(pd.read_csv(StringIO(data), engine='c',
- float_precision='high')['c'][0] - float(val))
- abs(pd.read_csv(StringIO(data), engine='c',
- float_precision='round_trip')['c'][0] - float(val))
+ val = "0.3066101993807095471566981359501369297504425048828125"
+ data = "a,b,c\n1,2,{0}".format(val)
+ abs(pd.read_csv(StringIO(data), engine="c", float_precision=None)["c"][0] - float(val))
+ abs(
+ pd.read_csv(StringIO(data), engine="c", float_precision="high")["c"][0] - float(val)
+ )
+ abs(
+ pd.read_csv(StringIO(data), engine="c", float_precision="round_trip")["c"][0]
+ - float(val)
+ )
.. _io.thousands:
@@ -1106,20 +1068,22 @@ correctly:
.. ipython:: python
:suppress:
- data = ("ID|level|category\n"
- "Patient1|123,000|x\n"
- "Patient2|23,000|y\n"
- "Patient3|1,234,018|z")
+ data = (
+ "ID|level|category\n"
+ "Patient1|123,000|x\n"
+ "Patient2|23,000|y\n"
+ "Patient3|1,234,018|z"
+ )
- with open('tmp.csv', 'w') as fh:
+ with open("tmp.csv", "w") as fh:
fh.write(data)
By default, numbers with a thousands separator will be parsed as strings:
.. ipython:: python
- print(open('tmp.csv').read())
- df = pd.read_csv('tmp.csv', sep='|')
+ print(open("tmp.csv").read())
+ df = pd.read_csv("tmp.csv", sep="|")
df
df.level.dtype
@@ -1128,8 +1092,8 @@ The ``thousands`` keyword allows integers to be parsed correctly:
.. ipython:: python
- print(open('tmp.csv').read())
- df = pd.read_csv('tmp.csv', sep='|', thousands=',')
+ print(open("tmp.csv").read())
+ df = pd.read_csv("tmp.csv", sep="|", thousands=",")
df
df.level.dtype
@@ -1137,7 +1101,7 @@ The ``thousands`` keyword allows integers to be parsed correctly:
.. ipython:: python
:suppress:
- os.remove('tmp.csv')
+ os.remove("tmp.csv")
.. _io.na_values:
@@ -1162,7 +1126,7 @@ Let us consider some examples:
.. code-block:: python
- pd.read_csv('path_to_file.csv', na_values=[5])
+ pd.read_csv("path_to_file.csv", na_values=[5])
In the example above ``5`` and ``5.0`` will be recognized as ``NaN``, in
addition to the defaults. A string will first be interpreted as a numerical
@@ -1170,19 +1134,19 @@ addition to the defaults. A string will first be interpreted as a numerical
.. code-block:: python
- pd.read_csv('path_to_file.csv', keep_default_na=False, na_values=[""])
+ pd.read_csv("path_to_file.csv", keep_default_na=False, na_values=[""])
Above, only an empty field will be recognized as ``NaN``.
.. code-block:: python
- pd.read_csv('path_to_file.csv', keep_default_na=False, na_values=["NA", "0"])
+ pd.read_csv("path_to_file.csv", keep_default_na=False, na_values=["NA", "0"])
Above, both ``NA`` and ``0`` as strings are ``NaN``.
.. code-block:: python
- pd.read_csv('path_to_file.csv', na_values=["Nope"])
+ pd.read_csv("path_to_file.csv", na_values=["Nope"])
The default values, in addition to the string ``"Nope"`` are recognized as
``NaN``.
@@ -1205,19 +1169,16 @@ as a ``Series``:
.. ipython:: python
:suppress:
- data = ("level\n"
- "Patient1,123000\n"
- "Patient2,23000\n"
- "Patient3,1234018")
+ data = "level\nPatient1,123000\nPatient2,23000\nPatient3,1234018"
- with open('tmp.csv', 'w') as fh:
+ with open("tmp.csv", "w") as fh:
fh.write(data)
.. ipython:: python
- print(open('tmp.csv').read())
+ print(open("tmp.csv").read())
- output = pd.read_csv('tmp.csv', squeeze=True)
+ output = pd.read_csv("tmp.csv", squeeze=True)
output
type(output)
@@ -1225,7 +1186,7 @@ as a ``Series``:
.. ipython:: python
:suppress:
- os.remove('tmp.csv')
+ os.remove("tmp.csv")
.. _io.boolean:
@@ -1239,12 +1200,10 @@ options as follows:
.. ipython:: python
- data = ('a,b,c\n'
- '1,Yes,2\n'
- '3,No,4')
+ data = "a,b,c\n1,Yes,2\n3,No,4"
print(data)
pd.read_csv(StringIO(data))
- pd.read_csv(StringIO(data), true_values=['Yes'], false_values=['No'])
+ pd.read_csv(StringIO(data), true_values=["Yes"], false_values=["No"])
.. _io.bad_lines:
@@ -1258,10 +1217,7 @@ too many fields will raise an error by default:
.. ipython:: python
:okexcept:
- data = ('a,b,c\n'
- '1,2,3\n'
- '4,5,6,7\n'
- '8,9,10')
+ data = "a,b,c\n1,2,3\n4,5,6,7\n8,9,10"
pd.read_csv(StringIO(data))
You can elect to skip bad lines:
@@ -1301,9 +1257,7 @@ or a :class:`python:csv.Dialect` instance.
.. ipython:: python
:suppress:
- data = ('label1,label2,label3\n'
- 'index1,"a,c,e\n'
- 'index2,b,d,f')
+ data = "label1,label2,label3\n" 'index1,"a,c,e\n' "index2,b,d,f"
Suppose you had data with unenclosed quotes:
@@ -1321,6 +1275,7 @@ We can get around this using ``dialect``:
:okwarning:
import csv
+
dia = csv.excel()
dia.quoting = csv.QUOTE_NONE
pd.read_csv(StringIO(data), dialect=dia)
@@ -1329,15 +1284,15 @@ All of the dialect options can be specified separately by keyword arguments:
.. ipython:: python
- data = 'a,b,c~1,2,3~4,5,6'
- pd.read_csv(StringIO(data), lineterminator='~')
+ data = "a,b,c~1,2,3~4,5,6"
+ pd.read_csv(StringIO(data), lineterminator="~")
Another common dialect option is ``skipinitialspace``, to skip any whitespace
after a delimiter:
.. ipython:: python
- data = 'a, b, c\n1, 2, 3\n4, 5, 6'
+ data = "a, b, c\n1, 2, 3\n4, 5, 6"
print(data)
pd.read_csv(StringIO(data), skipinitialspace=True)
@@ -1359,7 +1314,7 @@ should pass the ``escapechar`` option:
data = 'a,b\n"hello, \\"Bob\\", nice to see you",5'
print(data)
- pd.read_csv(StringIO(data), escapechar='\\')
+ pd.read_csv(StringIO(data), escapechar="\\")
.. _io.fwf_reader:
.. _io.fwf:
@@ -1386,12 +1341,14 @@ a different usage of the ``delimiter`` parameter:
.. ipython:: python
:suppress:
- f = open('bar.csv', 'w')
- data1 = ("id8141 360.242940 149.910199 11950.7\n"
- "id1594 444.953632 166.985655 11788.4\n"
- "id1849 364.136849 183.628767 11806.2\n"
- "id1230 413.836124 184.375703 11916.8\n"
- "id1948 502.953953 173.237159 12468.3")
+ f = open("bar.csv", "w")
+ data1 = (
+ "id8141 360.242940 149.910199 11950.7\n"
+ "id1594 444.953632 166.985655 11788.4\n"
+ "id1849 364.136849 183.628767 11806.2\n"
+ "id1230 413.836124 184.375703 11916.8\n"
+ "id1948 502.953953 173.237159 12468.3"
+ )
f.write(data1)
f.close()
@@ -1399,7 +1356,7 @@ Consider a typical fixed-width data file:
.. ipython:: python
- print(open('bar.csv').read())
+ print(open("bar.csv").read())
In order to parse this file into a ``DataFrame``, we simply need to supply the
column specifications to the ``read_fwf`` function along with the file name:
@@ -1408,7 +1365,7 @@ column specifications to the ``read_fwf`` function along with the file name:
# Column specifications are a list of half-intervals
colspecs = [(0, 6), (8, 20), (21, 33), (34, 43)]
- df = pd.read_fwf('bar.csv', colspecs=colspecs, header=None, index_col=0)
+ df = pd.read_fwf("bar.csv", colspecs=colspecs, header=None, index_col=0)
df
Note how the parser automatically picks column names X.<column number> when
@@ -1419,7 +1376,7 @@ column widths for contiguous columns:
# Widths are a list of integers
widths = [6, 14, 13, 10]
- df = pd.read_fwf('bar.csv', widths=widths, header=None)
+ df = pd.read_fwf("bar.csv", widths=widths, header=None)
df
The parser will take care of extra white spaces around the columns
@@ -1432,7 +1389,7 @@ is whitespace).
.. ipython:: python
- df = pd.read_fwf('bar.csv', header=None, index_col=0)
+ df = pd.read_fwf("bar.csv", header=None, index_col=0)
df
``read_fwf`` supports the ``dtype`` parameter for specifying the types of
@@ -1440,13 +1397,13 @@ parsed columns to be different from the inferred type.
.. ipython:: python
- pd.read_fwf('bar.csv', header=None, index_col=0).dtypes
- pd.read_fwf('bar.csv', header=None, dtype={2: 'object'}).dtypes
+ pd.read_fwf("bar.csv", header=None, index_col=0).dtypes
+ pd.read_fwf("bar.csv", header=None, dtype={2: "object"}).dtypes
.. ipython:: python
:suppress:
- os.remove('bar.csv')
+ os.remove("bar.csv")
Indexes
@@ -1458,8 +1415,8 @@ Files with an "implicit" index column
.. ipython:: python
:suppress:
- f = open('foo.csv', 'w')
- f.write('A,B,C\n20090101,a,1,2\n20090102,b,3,4\n20090103,c,4,5')
+ f = open("foo.csv", "w")
+ f.write("A,B,C\n20090101,a,1,2\n20090102,b,3,4\n20090103,c,4,5")
f.close()
Consider a file with one less entry in the header than the number of data
@@ -1467,27 +1424,27 @@ column:
.. ipython:: python
- print(open('foo.csv').read())
+ print(open("foo.csv").read())
In this special case, ``read_csv`` assumes that the first column is to be used
as the index of the ``DataFrame``:
.. ipython:: python
- pd.read_csv('foo.csv')
+ pd.read_csv("foo.csv")
Note that the dates weren't automatically parsed. In that case you would need
to do as before:
.. ipython:: python
- df = pd.read_csv('foo.csv', parse_dates=True)
+ df = pd.read_csv("foo.csv", parse_dates=True)
df.index
.. ipython:: python
:suppress:
- os.remove('foo.csv')
+ os.remove("foo.csv")
Reading an index with a ``MultiIndex``
@@ -1499,7 +1456,7 @@ Suppose you have data indexed by two columns:
.. ipython:: python
- print(open('data/mindex_ex.csv').read())
+ print(open("data/mindex_ex.csv").read())
The ``index_col`` argument to ``read_csv`` can take a list of
column numbers to turn multiple columns into a ``MultiIndex`` for the index of the
@@ -1523,10 +1480,11 @@ rows will skip the intervening rows.
.. ipython:: python
from pandas._testing import makeCustomDataframe as mkdf
+
df = mkdf(5, 3, r_idx_nlevels=2, c_idx_nlevels=4)
- df.to_csv('mi.csv')
- print(open('mi.csv').read())
- pd.read_csv('mi.csv', header=[0, 1, 2, 3], index_col=[0, 1])
+ df.to_csv("mi.csv")
+ print(open("mi.csv").read())
+ pd.read_csv("mi.csv", header=[0, 1, 2, 3], index_col=[0, 1])
``read_csv`` is also able to interpret a more common format
of multi-columns indices.
@@ -1535,14 +1493,14 @@ of multi-columns indices.
:suppress:
data = ",a,a,a,b,c,c\n,q,r,s,t,u,v\none,1,2,3,4,5,6\ntwo,7,8,9,10,11,12"
- fh = open('mi2.csv', 'w')
+ fh = open("mi2.csv", "w")
fh.write(data)
fh.close()
.. ipython:: python
- print(open('mi2.csv').read())
- pd.read_csv('mi2.csv', header=[0, 1], index_col=0)
+ print(open("mi2.csv").read())
+ pd.read_csv("mi2.csv", header=[0, 1], index_col=0)
Note: If an ``index_col`` is not specified (e.g. you don't have an index, or wrote it
with ``df.to_csv(..., index=False)``, then any ``names`` on the columns index will be *lost*.
@@ -1550,8 +1508,8 @@ with ``df.to_csv(..., index=False)``, then any ``names`` on the columns index wi
.. ipython:: python
:suppress:
- os.remove('mi.csv')
- os.remove('mi2.csv')
+ os.remove("mi.csv")
+ os.remove("mi2.csv")
.. _io.sniff:
@@ -1566,13 +1524,13 @@ class of the csv module. For this, you have to specify ``sep=None``.
:suppress:
df = pd.DataFrame(np.random.randn(10, 4))
- df.to_csv('tmp.sv', sep='|')
- df.to_csv('tmp2.sv', sep=':')
+ df.to_csv("tmp.sv", sep="|")
+ df.to_csv("tmp2.sv", sep=":")
.. ipython:: python
- print(open('tmp2.sv').read())
- pd.read_csv('tmp2.sv', sep=None, engine='python')
+ print(open("tmp2.sv").read())
+ pd.read_csv("tmp2.sv", sep=None, engine="python")
.. _io.multiple_files:
@@ -1593,8 +1551,8 @@ rather than reading the entire file into memory, such as the following:
.. ipython:: python
- print(open('tmp.sv').read())
- table = pd.read_csv('tmp.sv', sep='|')
+ print(open("tmp.sv").read())
+ table = pd.read_csv("tmp.sv", sep="|")
table
@@ -1603,7 +1561,7 @@ value will be an iterable object of type ``TextFileReader``:
.. ipython:: python
- reader = pd.read_csv('tmp.sv', sep='|', chunksize=4)
+ reader = pd.read_csv("tmp.sv", sep="|", chunksize=4)
reader
for chunk in reader:
@@ -1614,14 +1572,14 @@ Specifying ``iterator=True`` will also return the ``TextFileReader`` object:
.. ipython:: python
- reader = pd.read_csv('tmp.sv', sep='|', iterator=True)
+ reader = pd.read_csv("tmp.sv", sep="|", iterator=True)
reader.get_chunk(5)
.. ipython:: python
:suppress:
- os.remove('tmp.sv')
- os.remove('tmp2.sv')
+ os.remove("tmp.sv")
+ os.remove("tmp2.sv")
Specifying the parser engine
''''''''''''''''''''''''''''
@@ -1649,8 +1607,7 @@ functions - the following example shows reading a CSV file:
.. code-block:: python
- df = pd.read_csv('https://download.bls.gov/pub/time.series/cu/cu.item',
- sep='\t')
+ df = pd.read_csv("https://download.bls.gov/pub/time.series/cu/cu.item", sep="\t")
All URLs which are not local files or HTTP(s) are handled by
`fsspec`_, if installed, and its various filesystem implementations
@@ -1662,7 +1619,7 @@ S3 URLs require the `s3fs
.. code-block:: python
- df = pd.read_json('s3://pandas-test/adatafile.json')
+ df = pd.read_json("s3://pandas-test/adatafile.json")
When dealing with remote storage systems, you might need
extra configuration with environment variables or config files in
@@ -1683,9 +1640,11 @@ specifying an anonymous connection, such as
.. code-block:: python
- pd.read_csv("s3://ncei-wcsd-archive/data/processed/SH1305/18kHz/SaKe2013"
- "-D20130523-T080854_to_SaKe2013-D20130523-T085643.csv",
- storage_options={"anon": True})
+ pd.read_csv(
+ "s3://ncei-wcsd-archive/data/processed/SH1305/18kHz/SaKe2013"
+ "-D20130523-T080854_to_SaKe2013-D20130523-T085643.csv",
+ storage_options={"anon": True},
+ )
``fsspec`` also allows complex URLs, for accessing data in compressed
archives, local caching of files, and more. To locally cache the above
@@ -1693,9 +1652,11 @@ example, you would modify the call to
.. code-block:: python
- pd.read_csv("simplecache::s3://ncei-wcsd-archive/data/processed/SH1305/18kHz/"
- "SaKe2013-D20130523-T080854_to_SaKe2013-D20130523-T085643.csv",
- storage_options={"s3": {"anon": True}})
+ pd.read_csv(
+ "simplecache::s3://ncei-wcsd-archive/data/processed/SH1305/18kHz/"
+ "SaKe2013-D20130523-T080854_to_SaKe2013-D20130523-T085643.csv",
+ storage_options={"s3": {"anon": True}},
+ )
where we specify that the "anon" parameter is meant for the "s3" part of
the implementation, not to the caching implementation. Note that this caches to a temporary
@@ -1819,7 +1780,7 @@ Note ``NaN``'s, ``NaT``'s and ``None`` will be converted to ``null`` and ``datet
.. ipython:: python
- dfj = pd.DataFrame(np.random.randn(5, 2), columns=list('AB'))
+ dfj = pd.DataFrame(np.random.randn(5, 2), columns=list("AB"))
json = dfj.to_json()
json
@@ -1831,10 +1792,13 @@ file / string. Consider the following ``DataFrame`` and ``Series``:
.. ipython:: python
- dfjo = pd.DataFrame(dict(A=range(1, 4), B=range(4, 7), C=range(7, 10)),
- columns=list('ABC'), index=list('xyz'))
+ dfjo = pd.DataFrame(
+ dict(A=range(1, 4), B=range(4, 7), C=range(7, 10)),
+ columns=list("ABC"),
+ index=list("xyz"),
+ )
dfjo
- sjo = pd.Series(dict(x=15, y=16, z=17), name='D')
+ sjo = pd.Series(dict(x=15, y=16, z=17), name="D")
sjo
**Column oriented** (the default for ``DataFrame``) serializes the data as
@@ -1894,24 +1858,24 @@ Writing in ISO date format:
.. ipython:: python
- dfd = pd.DataFrame(np.random.randn(5, 2), columns=list('AB'))
- dfd['date'] = pd.Timestamp('20130101')
+ dfd = pd.DataFrame(np.random.randn(5, 2), columns=list("AB"))
+ dfd["date"] = pd.Timestamp("20130101")
dfd = dfd.sort_index(1, ascending=False)
- json = dfd.to_json(date_format='iso')
+ json = dfd.to_json(date_format="iso")
json
Writing in ISO date format, with microseconds:
.. ipython:: python
- json = dfd.to_json(date_format='iso', date_unit='us')
+ json = dfd.to_json(date_format="iso", date_unit="us")
json
Epoch timestamps, in seconds:
.. ipython:: python
- json = dfd.to_json(date_format='epoch', date_unit='s')
+ json = dfd.to_json(date_format="epoch", date_unit="s")
json
Writing to a file, with a date index and a date column:
@@ -1919,13 +1883,13 @@ Writing to a file, with a date index and a date column:
.. ipython:: python
dfj2 = dfj.copy()
- dfj2['date'] = pd.Timestamp('20130101')
- dfj2['ints'] = list(range(5))
- dfj2['bools'] = True
- dfj2.index = pd.date_range('20130101', periods=5)
- dfj2.to_json('test.json')
+ dfj2["date"] = pd.Timestamp("20130101")
+ dfj2["ints"] = list(range(5))
+ dfj2["bools"] = True
+ dfj2.index = pd.date_range("20130101", periods=5)
+ dfj2.to_json("test.json")
- with open('test.json') as fh:
+ with open("test.json") as fh:
print(fh.read())
Fallback behavior
@@ -2060,26 +2024,27 @@ Reading from a file:
.. ipython:: python
- pd.read_json('test.json')
+ pd.read_json("test.json")
Don't convert any data (but still convert axes and dates):
.. ipython:: python
- pd.read_json('test.json', dtype=object).dtypes
+ pd.read_json("test.json", dtype=object).dtypes
Specify dtypes for conversion:
.. ipython:: python
- pd.read_json('test.json', dtype={'A': 'float32', 'bools': 'int8'}).dtypes
+ pd.read_json("test.json", dtype={"A": "float32", "bools": "int8"}).dtypes
Preserve string indices:
.. ipython:: python
- si = pd.DataFrame(np.zeros((4, 4)), columns=list(range(4)),
- index=[str(i) for i in range(4)])
+ si = pd.DataFrame(
+ np.zeros((4, 4)), columns=list(range(4)), index=[str(i) for i in range(4)]
+ )
si
si.index
si.columns
@@ -2094,10 +2059,10 @@ Dates written in nanoseconds need to be read back in nanoseconds:
.. ipython:: python
- json = dfj2.to_json(date_unit='ns')
+ json = dfj2.to_json(date_unit="ns")
# Try to parse timestamps as milliseconds -> Won't Work
- dfju = pd.read_json(json, date_unit='ms')
+ dfju = pd.read_json(json, date_unit="ms")
dfju
# Let pandas detect the correct precision
@@ -2105,7 +2070,7 @@ Dates written in nanoseconds need to be read back in nanoseconds:
dfju
# Or specify that all timestamps are in nanoseconds
- dfju = pd.read_json(json, date_unit='ns')
+ dfju = pd.read_json(json, date_unit="ns")
dfju
The Numpy parameter
@@ -2127,7 +2092,7 @@ data:
randfloats = np.random.uniform(-100, 1000, 10000)
randfloats.shape = (1000, 10)
- dffloats = pd.DataFrame(randfloats, columns=list('ABCDEFGHIJ'))
+ dffloats = pd.DataFrame(randfloats, columns=list("ABCDEFGHIJ"))
jsonfloats = dffloats.to_json()
@@ -2174,7 +2139,7 @@ The speedup is less noticeable for smaller datasets:
.. ipython:: python
:suppress:
- os.remove('test.json')
+ os.remove("test.json")
.. _io.json_normalize:
@@ -2186,38 +2151,54 @@ into a flat table.
.. ipython:: python
- data = [{'id': 1, 'name': {'first': 'Coleen', 'last': 'Volk'}},
- {'name': {'given': 'Mose', 'family': 'Regner'}},
- {'id': 2, 'name': 'Faye Raker'}]
+ data = [
+ {"id": 1, "name": {"first": "Coleen", "last": "Volk"}},
+ {"name": {"given": "Mose", "family": "Regner"}},
+ {"id": 2, "name": "Faye Raker"},
+ ]
pd.json_normalize(data)
.. ipython:: python
- data = [{'state': 'Florida',
- 'shortname': 'FL',
- 'info': {'governor': 'Rick Scott'},
- 'county': [{'name': 'Dade', 'population': 12345},
- {'name': 'Broward', 'population': 40000},
- {'name': 'Palm Beach', 'population': 60000}]},
- {'state': 'Ohio',
- 'shortname': 'OH',
- 'info': {'governor': 'John Kasich'},
- 'county': [{'name': 'Summit', 'population': 1234},
- {'name': 'Cuyahoga', 'population': 1337}]}]
-
- pd.json_normalize(data, 'county', ['state', 'shortname', ['info', 'governor']])
+ data = [
+ {
+ "state": "Florida",
+ "shortname": "FL",
+ "info": {"governor": "Rick Scott"},
+ "county": [
+ {"name": "Dade", "population": 12345},
+ {"name": "Broward", "population": 40000},
+ {"name": "Palm Beach", "population": 60000},
+ ],
+ },
+ {
+ "state": "Ohio",
+ "shortname": "OH",
+ "info": {"governor": "John Kasich"},
+ "county": [
+ {"name": "Summit", "population": 1234},
+ {"name": "Cuyahoga", "population": 1337},
+ ],
+ },
+ ]
+
+ pd.json_normalize(data, "county", ["state", "shortname", ["info", "governor"]])
The max_level parameter provides more control over which level to end normalization.
With max_level=1 the following snippet normalizes until 1st nesting level of the provided dict.
.. ipython:: python
- data = [{'CreatedBy': {'Name': 'User001'},
- 'Lookup': {'TextField': 'Some text',
- 'UserField': {'Id': 'ID001',
- 'Name': 'Name001'}},
- 'Image': {'a': 'b'}
- }]
+ data = [
+ {
+ "CreatedBy": {"Name": "User001"},
+ "Lookup": {
+ "TextField": "Some text",
+ "UserField": {"Id": "ID001", "Name": "Name001"},
+ },
+ "Image": {"a": "b"},
+ }
+ ]
pd.json_normalize(data, max_level=1)
.. _io.jsonl:
@@ -2232,13 +2213,13 @@ For line-delimited json files, pandas can also return an iterator which reads in
.. ipython:: python
- jsonl = '''
+ jsonl = """
{"a": 1, "b": 2}
{"a": 3, "b": 4}
- '''
+ """
df = pd.read_json(jsonl, lines=True)
df
- df.to_json(orient='records', lines=True)
+ df.to_json(orient="records", lines=True)
# reader is an iterator that returns ``chunksize`` lines each iteration
reader = pd.read_json(StringIO(jsonl), lines=True, chunksize=1)
@@ -2258,12 +2239,16 @@ a JSON string with two fields, ``schema`` and ``data``.
.. ipython:: python
- df = pd.DataFrame({'A': [1, 2, 3],
- 'B': ['a', 'b', 'c'],
- 'C': pd.date_range('2016-01-01', freq='d', periods=3)},
- index=pd.Index(range(3), name='idx'))
+ df = pd.DataFrame(
+ {
+ "A": [1, 2, 3],
+ "B": ["a", "b", "c"],
+ "C": pd.date_range("2016-01-01", freq="d", periods=3),
+ },
+ index=pd.Index(range(3), name="idx"),
+ )
df
- df.to_json(orient='table', date_format="iso")
+ df.to_json(orient="table", date_format="iso")
The ``schema`` field contains the ``fields`` key, which itself contains
a list of column name to type pairs, including the ``Index`` or ``MultiIndex``
@@ -2302,7 +2287,8 @@ A few notes on the generated table schema:
.. ipython:: python
from pandas.io.json import build_table_schema
- s = pd.Series(pd.date_range('2016', periods=4))
+
+ s = pd.Series(pd.date_range("2016", periods=4))
build_table_schema(s)
* datetimes with a timezone (before serializing), include an additional field
@@ -2310,8 +2296,7 @@ A few notes on the generated table schema:
.. ipython:: python
- s_tz = pd.Series(pd.date_range('2016', periods=12,
- tz='US/Central'))
+ s_tz = pd.Series(pd.date_range("2016", periods=12, tz="US/Central"))
build_table_schema(s_tz)
* Periods are converted to timestamps before serialization, and so have the
@@ -2320,8 +2305,7 @@ A few notes on the generated table schema:
.. ipython:: python
- s_per = pd.Series(1, index=pd.period_range('2016', freq='A-DEC',
- periods=4))
+ s_per = pd.Series(1, index=pd.period_range("2016", freq="A-DEC", periods=4))
build_table_schema(s_per)
* Categoricals use the ``any`` type and an ``enum`` constraint listing
@@ -2329,7 +2313,7 @@ A few notes on the generated table schema:
.. ipython:: python
- s_cat = pd.Series(pd.Categorical(['a', 'b', 'a']))
+ s_cat = pd.Series(pd.Categorical(["a", "b", "a"]))
build_table_schema(s_cat)
* A ``primaryKey`` field, containing an array of labels, is included
@@ -2345,8 +2329,7 @@ A few notes on the generated table schema:
.. ipython:: python
- s_multi = pd.Series(1, index=pd.MultiIndex.from_product([('a', 'b'),
- (0, 1)]))
+ s_multi = pd.Series(1, index=pd.MultiIndex.from_product([("a", "b"), (0, 1)]))
build_table_schema(s_multi)
* The default naming roughly follows these rules:
@@ -2366,16 +2349,20 @@ round-trippable manner.
.. ipython:: python
- df = pd.DataFrame({'foo': [1, 2, 3, 4],
- 'bar': ['a', 'b', 'c', 'd'],
- 'baz': pd.date_range('2018-01-01', freq='d', periods=4),
- 'qux': pd.Categorical(['a', 'b', 'c', 'c'])
- }, index=pd.Index(range(4), name='idx'))
+ df = pd.DataFrame(
+ {
+ "foo": [1, 2, 3, 4],
+ "bar": ["a", "b", "c", "d"],
+ "baz": pd.date_range("2018-01-01", freq="d", periods=4),
+ "qux": pd.Categorical(["a", "b", "c", "c"]),
+ },
+ index=pd.Index(range(4), name="idx"),
+ )
df
df.dtypes
- df.to_json('test.json', orient='table')
- new_df = pd.read_json('test.json', orient='table')
+ df.to_json("test.json", orient="table")
+ new_df = pd.read_json("test.json", orient="table")
new_df
new_df.dtypes
@@ -2387,15 +2374,15 @@ indicate missing values and the subsequent read cannot distinguish the intent.
.. ipython:: python
:okwarning:
- df.index.name = 'index'
- df.to_json('test.json', orient='table')
- new_df = pd.read_json('test.json', orient='table')
+ df.index.name = "index"
+ df.to_json("test.json", orient="table")
+ new_df = pd.read_json("test.json", orient="table")
print(new_df.index.name)
.. ipython:: python
:suppress:
- os.remove('test.json')
+ os.remove("test.json")
.. _Table Schema: https://specs.frictionlessdata.io/table-schema/
@@ -2425,7 +2412,7 @@ Read a URL with no options:
.. ipython:: python
- url = 'https://www.fdic.gov/bank/individual/failed/banklist.html'
+ url = "https://www.fdic.gov/bank/individual/failed/banklist.html"
dfs = pd.read_html(url)
dfs
@@ -2440,11 +2427,11 @@ as a string:
.. ipython:: python
:suppress:
- file_path = os.path.abspath(os.path.join('source', '_static', 'banklist.html'))
+ file_path = os.path.abspath(os.path.join("source", "_static", "banklist.html"))
.. ipython:: python
- with open(file_path, 'r') as f:
+ with open(file_path, "r") as f:
dfs = pd.read_html(f.read())
dfs
@@ -2452,7 +2439,7 @@ You can even pass in an instance of ``StringIO`` if you so desire:
.. ipython:: python
- with open(file_path, 'r') as f:
+ with open(file_path, "r") as f:
sio = StringIO(f.read())
dfs = pd.read_html(sio)
@@ -2471,7 +2458,7 @@ Read a URL and match a table that contains specific text:
.. code-block:: python
- match = 'Metcalf Bank'
+ match = "Metcalf Bank"
df_list = pd.read_html(url, match=match)
Specify a header row (by default ``<th>`` or ``<td>`` elements located within a
@@ -2506,15 +2493,15 @@ Specify an HTML attribute:
.. code-block:: python
- dfs1 = pd.read_html(url, attrs={'id': 'table'})
- dfs2 = pd.read_html(url, attrs={'class': 'sortable'})
+ dfs1 = pd.read_html(url, attrs={"id": "table"})
+ dfs2 = pd.read_html(url, attrs={"class": "sortable"})
print(np.array_equal(dfs1[0], dfs2[0])) # Should be True
Specify values that should be converted to NaN:
.. code-block:: python
- dfs = pd.read_html(url, na_values=['No Acquirer'])
+ dfs = pd.read_html(url, na_values=["No Acquirer"])
Specify whether to keep the default set of NaN values:
@@ -2529,22 +2516,21 @@ columns to strings.
.. code-block:: python
- url_mcc = 'https://en.wikipedia.org/wiki/Mobile_country_code'
- dfs = pd.read_html(url_mcc, match='Telekom Albania', header=0,
- converters={'MNC': str})
+ url_mcc = "https://en.wikipedia.org/wiki/Mobile_country_code"
+ dfs = pd.read_html(url_mcc, match="Telekom Albania", header=0, converters={"MNC": str})
Use some combination of the above:
.. code-block:: python
- dfs = pd.read_html(url, match='Metcalf Bank', index_col=0)
+ dfs = pd.read_html(url, match="Metcalf Bank", index_col=0)
Read in pandas ``to_html`` output (with some loss of floating point precision):
.. code-block:: python
df = pd.DataFrame(np.random.randn(2, 2))
- s = df.to_html(float_format='{0:.40g}'.format)
+ s = df.to_html(float_format="{0:.40g}".format)
dfin = pd.read_html(s, index_col=0)
The ``lxml`` backend will raise an error on a failed parse if that is the only
@@ -2554,13 +2540,13 @@ for example, the function expects a sequence of strings. You may use:
.. code-block:: python
- dfs = pd.read_html(url, 'Metcalf Bank', index_col=0, flavor=['lxml'])
+ dfs = pd.read_html(url, "Metcalf Bank", index_col=0, flavor=["lxml"])
Or you could pass ``flavor='lxml'`` without a list:
.. code-block:: python
- dfs = pd.read_html(url, 'Metcalf Bank', index_col=0, flavor='lxml')
+ dfs = pd.read_html(url, "Metcalf Bank", index_col=0, flavor="lxml")
However, if you have bs4 and html5lib installed and pass ``None`` or ``['lxml',
'bs4']`` then the parse will most likely succeed. Note that *as soon as a parse
@@ -2568,7 +2554,7 @@ succeeds, the function will return*.
.. code-block:: python
- dfs = pd.read_html(url, 'Metcalf Bank', index_col=0, flavor=['lxml', 'bs4'])
+ dfs = pd.read_html(url, "Metcalf Bank", index_col=0, flavor=["lxml", "bs4"])
.. _io.html:
@@ -2590,8 +2576,8 @@ in the method ``to_string`` described above.
:suppress:
def write_html(df, filename, *args, **kwargs):
- static = os.path.abspath(os.path.join('source', '_static'))
- with open(os.path.join(static, filename + '.html'), 'w') as f:
+ static = os.path.abspath(os.path.join("source", "_static"))
+ with open(os.path.join(static, filename + ".html"), "w") as f:
df.to_html(f, *args, **kwargs)
.. ipython:: python
@@ -2603,7 +2589,7 @@ in the method ``to_string`` described above.
.. ipython:: python
:suppress:
- write_html(df, 'basic')
+ write_html(df, "basic")
HTML:
@@ -2619,7 +2605,7 @@ The ``columns`` argument will limit the columns shown:
.. ipython:: python
:suppress:
- write_html(df, 'columns', columns=[0])
+ write_html(df, "columns", columns=[0])
HTML:
@@ -2631,12 +2617,12 @@ point values:
.. ipython:: python
- print(df.to_html(float_format='{0:.10f}'.format))
+ print(df.to_html(float_format="{0:.10f}".format))
.. ipython:: python
:suppress:
- write_html(df, 'float_format', float_format='{0:.10f}'.format)
+ write_html(df, "float_format", float_format="{0:.10f}".format)
HTML:
@@ -2653,7 +2639,7 @@ off:
.. ipython:: python
:suppress:
- write_html(df, 'nobold', bold_rows=False)
+ write_html(df, "nobold", bold_rows=False)
.. raw:: html
:file: ../_static/nobold.html
@@ -2664,7 +2650,7 @@ table CSS classes. Note that these classes are *appended* to the existing
.. ipython:: python
- print(df.to_html(classes=['awesome_table_class', 'even_more_awesome_class']))
+ print(df.to_html(classes=["awesome_table_class", "even_more_awesome_class"]))
The ``render_links`` argument provides the ability to add hyperlinks to cells
that contain URLs.
@@ -2673,15 +2659,18 @@ that contain URLs.
.. ipython:: python
- url_df = pd.DataFrame({
- 'name': ['Python', 'Pandas'],
- 'url': ['https://www.python.org/', 'https://pandas.pydata.org']})
+ url_df = pd.DataFrame(
+ {
+ "name": ["Python", "Pandas"],
+ "url": ["https://www.python.org/", "https://pandas.pydata.org"],
+ }
+ )
print(url_df.to_html(render_links=True))
.. ipython:: python
:suppress:
- write_html(url_df, 'render_links', render_links=True)
+ write_html(url_df, "render_links", render_links=True)
HTML:
@@ -2694,14 +2683,14 @@ Finally, the ``escape`` argument allows you to control whether the
.. ipython:: python
- df = pd.DataFrame({'a': list('&<>'), 'b': np.random.randn(3)})
+ df = pd.DataFrame({"a": list("&<>"), "b": np.random.randn(3)})
.. ipython:: python
:suppress:
- write_html(df, 'escape')
- write_html(df, 'noescape', escape=False)
+ write_html(df, "escape")
+ write_html(df, "noescape", escape=False)
Escaped:
@@ -2828,7 +2817,7 @@ file, and the ``sheet_name`` indicating which sheet to parse.
.. code-block:: python
# Returns a DataFrame
- pd.read_excel('path_to_file.xls', sheet_name='Sheet1')
+ pd.read_excel("path_to_file.xls", sheet_name="Sheet1")
.. _io.excel.excelfile_class:
@@ -2843,16 +2832,16 @@ read into memory only once.
.. code-block:: python
- xlsx = pd.ExcelFile('path_to_file.xls')
- df = pd.read_excel(xlsx, 'Sheet1')
+ xlsx = pd.ExcelFile("path_to_file.xls")
+ df = pd.read_excel(xlsx, "Sheet1")
The ``ExcelFile`` class can also be used as a context manager.
.. code-block:: python
- with pd.ExcelFile('path_to_file.xls') as xls:
- df1 = pd.read_excel(xls, 'Sheet1')
- df2 = pd.read_excel(xls, 'Sheet2')
+ with pd.ExcelFile("path_to_file.xls") as xls:
+ df1 = pd.read_excel(xls, "Sheet1")
+ df2 = pd.read_excel(xls, "Sheet2")
The ``sheet_names`` property will generate
a list of the sheet names in the file.
@@ -2864,10 +2853,9 @@ different parameters:
data = {}
# For when Sheet1's format differs from Sheet2
- with pd.ExcelFile('path_to_file.xls') as xls:
- data['Sheet1'] = pd.read_excel(xls, 'Sheet1', index_col=None,
- na_values=['NA'])
- data['Sheet2'] = pd.read_excel(xls, 'Sheet2', index_col=1)
+ with pd.ExcelFile("path_to_file.xls") as xls:
+ data["Sheet1"] = pd.read_excel(xls, "Sheet1", index_col=None, na_values=["NA"])
+ data["Sheet2"] = pd.read_excel(xls, "Sheet2", index_col=1)
Note that if the same parsing parameters are used for all sheets, a list
of sheet names can simply be passed to ``read_excel`` with no loss in performance.
@@ -2876,15 +2864,14 @@ of sheet names can simply be passed to ``read_excel`` with no loss in performanc
# using the ExcelFile class
data = {}
- with pd.ExcelFile('path_to_file.xls') as xls:
- data['Sheet1'] = pd.read_excel(xls, 'Sheet1', index_col=None,
- na_values=['NA'])
- data['Sheet2'] = pd.read_excel(xls, 'Sheet2', index_col=None,
- na_values=['NA'])
+ with pd.ExcelFile("path_to_file.xls") as xls:
+ data["Sheet1"] = pd.read_excel(xls, "Sheet1", index_col=None, na_values=["NA"])
+ data["Sheet2"] = pd.read_excel(xls, "Sheet2", index_col=None, na_values=["NA"])
# equivalent using the read_excel function
- data = pd.read_excel('path_to_file.xls', ['Sheet1', 'Sheet2'],
- index_col=None, na_values=['NA'])
+ data = pd.read_excel(
+ "path_to_file.xls", ["Sheet1", "Sheet2"], index_col=None, na_values=["NA"]
+ )
``ExcelFile`` can also be called with a ``xlrd.book.Book`` object
as a parameter. This allows the user to control how the excel file is read.
@@ -2894,10 +2881,11 @@ with ``on_demand=True``.
.. code-block:: python
import xlrd
- xlrd_book = xlrd.open_workbook('path_to_file.xls', on_demand=True)
+
+ xlrd_book = xlrd.open_workbook("path_to_file.xls", on_demand=True)
with pd.ExcelFile(xlrd_book) as xls:
- df1 = pd.read_excel(xls, 'Sheet1')
- df2 = pd.read_excel(xls, 'Sheet2')
+ df1 = pd.read_excel(xls, "Sheet1")
+ df2 = pd.read_excel(xls, "Sheet2")
.. _io.excel.specifying_sheets:
@@ -2919,35 +2907,35 @@ Specifying sheets
.. code-block:: python
# Returns a DataFrame
- pd.read_excel('path_to_file.xls', 'Sheet1', index_col=None, na_values=['NA'])
+ pd.read_excel("path_to_file.xls", "Sheet1", index_col=None, na_values=["NA"])
Using the sheet index:
.. code-block:: python
# Returns a DataFrame
- pd.read_excel('path_to_file.xls', 0, index_col=None, na_values=['NA'])
+ pd.read_excel("path_to_file.xls", 0, index_col=None, na_values=["NA"])
Using all default values:
.. code-block:: python
# Returns a DataFrame
- pd.read_excel('path_to_file.xls')
+ pd.read_excel("path_to_file.xls")
Using None to get all sheets:
.. code-block:: python
# Returns a dictionary of DataFrames
- pd.read_excel('path_to_file.xls', sheet_name=None)
+ pd.read_excel("path_to_file.xls", sheet_name=None)
Using a list to get multiple sheets:
.. code-block:: python
# Returns the 1st and 4th sheet, as a dictionary of DataFrames.
- pd.read_excel('path_to_file.xls', sheet_name=['Sheet1', 3])
+ pd.read_excel("path_to_file.xls", sheet_name=["Sheet1", 3])
``read_excel`` can read more than one sheet, by setting ``sheet_name`` to either
a list of sheet names, a list of sheet positions, or ``None`` to read all sheets.
@@ -2968,10 +2956,12 @@ For example, to read in a ``MultiIndex`` index without names:
.. ipython:: python
- df = pd.DataFrame({'a': [1, 2, 3, 4], 'b': [5, 6, 7, 8]},
- index=pd.MultiIndex.from_product([['a', 'b'], ['c', 'd']]))
- df.to_excel('path_to_file.xlsx')
- df = pd.read_excel('path_to_file.xlsx', index_col=[0, 1])
+ df = pd.DataFrame(
+ {"a": [1, 2, 3, 4], "b": [5, 6, 7, 8]},
+ index=pd.MultiIndex.from_product([["a", "b"], ["c", "d"]]),
+ )
+ df.to_excel("path_to_file.xlsx")
+ df = pd.read_excel("path_to_file.xlsx", index_col=[0, 1])
df
If the index has level names, they will parsed as well, using the same
@@ -2979,9 +2969,9 @@ parameters.
.. ipython:: python
- df.index = df.index.set_names(['lvl1', 'lvl2'])
- df.to_excel('path_to_file.xlsx')
- df = pd.read_excel('path_to_file.xlsx', index_col=[0, 1])
+ df.index = df.index.set_names(["lvl1", "lvl2"])
+ df.to_excel("path_to_file.xlsx")
+ df = pd.read_excel("path_to_file.xlsx", index_col=[0, 1])
df
@@ -2990,16 +2980,15 @@ should be passed to ``index_col`` and ``header``:
.. ipython:: python
- df.columns = pd.MultiIndex.from_product([['a'], ['b', 'd']],
- names=['c1', 'c2'])
- df.to_excel('path_to_file.xlsx')
- df = pd.read_excel('path_to_file.xlsx', index_col=[0, 1], header=[0, 1])
+ df.columns = pd.MultiIndex.from_product([["a"], ["b", "d"]], names=["c1", "c2"])
+ df.to_excel("path_to_file.xlsx")
+ df = pd.read_excel("path_to_file.xlsx", index_col=[0, 1], header=[0, 1])
df
.. ipython:: python
:suppress:
- os.remove('path_to_file.xlsx')
+ os.remove("path_to_file.xlsx")
Parsing specific columns
@@ -3018,14 +3007,14 @@ You can specify a comma-delimited set of Excel columns and ranges as a string:
.. code-block:: python
- pd.read_excel('path_to_file.xls', 'Sheet1', usecols='A,C:E')
+ pd.read_excel("path_to_file.xls", "Sheet1", usecols="A,C:E")
If ``usecols`` is a list of integers, then it is assumed to be the file column
indices to be parsed.
.. code-block:: python
- pd.read_excel('path_to_file.xls', 'Sheet1', usecols=[0, 2, 3])
+ pd.read_excel("path_to_file.xls", "Sheet1", usecols=[0, 2, 3])
Element order is ignored, so ``usecols=[0, 1]`` is the same as ``[1, 0]``.
@@ -3037,7 +3026,7 @@ document header row(s). Those strings define which columns will be parsed:
.. code-block:: python
- pd.read_excel('path_to_file.xls', 'Sheet1', usecols=['foo', 'bar'])
+ pd.read_excel("path_to_file.xls", "Sheet1", usecols=["foo", "bar"])
Element order is ignored, so ``usecols=['baz', 'joe']`` is the same as ``['joe', 'baz']``.
@@ -3048,7 +3037,7 @@ the column names, returning names where the callable function evaluates to ``Tru
.. code-block:: python
- pd.read_excel('path_to_file.xls', 'Sheet1', usecols=lambda x: x.isalpha())
+ pd.read_excel("path_to_file.xls", "Sheet1", usecols=lambda x: x.isalpha())
Parsing dates
+++++++++++++
@@ -3060,7 +3049,7 @@ use the ``parse_dates`` keyword to parse those strings to datetimes:
.. code-block:: python
- pd.read_excel('path_to_file.xls', 'Sheet1', parse_dates=['date_strings'])
+ pd.read_excel("path_to_file.xls", "Sheet1", parse_dates=["date_strings"])
Cell converters
@@ -3071,7 +3060,7 @@ option. For instance, to convert a column to boolean:
.. code-block:: python
- pd.read_excel('path_to_file.xls', 'Sheet1', converters={'MyBools': bool})
+ pd.read_excel("path_to_file.xls", "Sheet1", converters={"MyBools": bool})
This options handles missing values and treats exceptions in the converters
as missing data. Transformations are applied cell by cell rather than to the
@@ -3086,7 +3075,7 @@ missing data to recover integer dtype:
return int(x) if x else -1
- pd.read_excel('path_to_file.xls', 'Sheet1', converters={'MyInts': cfun})
+ pd.read_excel("path_to_file.xls", "Sheet1", converters={"MyInts": cfun})
Dtype specifications
++++++++++++++++++++
@@ -3098,7 +3087,7 @@ no type inference, use the type ``str`` or ``object``.
.. code-block:: python
- pd.read_excel('path_to_file.xls', dtype={'MyInts': 'int64', 'MyText': str})
+ pd.read_excel("path_to_file.xls", dtype={"MyInts": "int64", "MyText": str})
.. _io.excel_writer:
@@ -3116,7 +3105,7 @@ written. For example:
.. code-block:: python
- df.to_excel('path_to_file.xlsx', sheet_name='Sheet1')
+ df.to_excel("path_to_file.xlsx", sheet_name="Sheet1")
Files with a ``.xls`` extension will be written using ``xlwt`` and those with a
``.xlsx`` extension will be written using ``xlsxwriter`` (if available) or
@@ -3129,16 +3118,16 @@ row instead of the first. You can place it in the first row by setting the
.. code-block:: python
- df.to_excel('path_to_file.xlsx', index_label='label', merge_cells=False)
+ df.to_excel("path_to_file.xlsx", index_label="label", merge_cells=False)
In order to write separate ``DataFrames`` to separate sheets in a single Excel file,
one can pass an :class:`~pandas.io.excel.ExcelWriter`.
.. code-block:: python
- with pd.ExcelWriter('path_to_file.xlsx') as writer:
- df1.to_excel(writer, sheet_name='Sheet1')
- df2.to_excel(writer, sheet_name='Sheet2')
+ with pd.ExcelWriter("path_to_file.xlsx") as writer:
+ df1.to_excel(writer, sheet_name="Sheet1")
+ df2.to_excel(writer, sheet_name="Sheet2")
.. note::
@@ -3164,8 +3153,8 @@ Pandas supports writing Excel files to buffer-like objects such as ``StringIO``
bio = BytesIO()
# By setting the 'engine' in the ExcelWriter constructor.
- writer = pd.ExcelWriter(bio, engine='xlsxwriter')
- df.to_excel(writer, sheet_name='Sheet1')
+ writer = pd.ExcelWriter(bio, engine="xlsxwriter")
+ df.to_excel(writer, sheet_name="Sheet1")
# Save the workbook
writer.save()
@@ -3214,16 +3203,17 @@ argument to ``to_excel`` and to ``ExcelWriter``. The built-in engines are:
.. code-block:: python
# By setting the 'engine' in the DataFrame 'to_excel()' methods.
- df.to_excel('path_to_file.xlsx', sheet_name='Sheet1', engine='xlsxwriter')
+ df.to_excel("path_to_file.xlsx", sheet_name="Sheet1", engine="xlsxwriter")
# By setting the 'engine' in the ExcelWriter constructor.
- writer = pd.ExcelWriter('path_to_file.xlsx', engine='xlsxwriter')
+ writer = pd.ExcelWriter("path_to_file.xlsx", engine="xlsxwriter")
# Or via pandas configuration.
from pandas import options # noqa: E402
- options.io.excel.xlsx.writer = 'xlsxwriter'
- df.to_excel('path_to_file.xlsx', sheet_name='Sheet1')
+ options.io.excel.xlsx.writer = "xlsxwriter"
+
+ df.to_excel("path_to_file.xlsx", sheet_name="Sheet1")
.. _io.excel.style:
@@ -3254,7 +3244,7 @@ OpenDocument spreadsheets match what can be done for `Excel files`_ using
.. code-block:: python
# Returns a DataFrame
- pd.read_excel('path_to_file.ods', engine='odf')
+ pd.read_excel("path_to_file.ods", engine="odf")
.. note::
@@ -3277,7 +3267,7 @@ in files and will return floats instead.
.. code-block:: python
# Returns a DataFrame
- pd.read_excel('path_to_file.xlsb', engine='pyxlsb')
+ pd.read_excel("path_to_file.xlsb", engine="pyxlsb")
.. note::
@@ -3353,7 +3343,7 @@ All pandas objects are equipped with ``to_pickle`` methods which use Python's
.. ipython:: python
df
- df.to_pickle('foo.pkl')
+ df.to_pickle("foo.pkl")
The ``read_pickle`` function in the ``pandas`` namespace can be used to load
any pickled pandas object (or any other pickled object) from file:
@@ -3361,12 +3351,12 @@ any pickled pandas object (or any other pickled object) from file:
.. ipython:: python
- pd.read_pickle('foo.pkl')
+ pd.read_pickle("foo.pkl")
.. ipython:: python
:suppress:
- os.remove('foo.pkl')
+ os.remove("foo.pkl")
.. warning::
@@ -3400,10 +3390,13 @@ the underlying compression library.
.. ipython:: python
- df = pd.DataFrame({
- 'A': np.random.randn(1000),
- 'B': 'foo',
- 'C': pd.date_range('20130101', periods=1000, freq='s')})
+ df = pd.DataFrame(
+ {
+ "A": np.random.randn(1000),
+ "B": "foo",
+ "C": pd.date_range("20130101", periods=1000, freq="s"),
+ }
+ )
df
Using an explicit compression type:
@@ -3438,10 +3431,7 @@ Passing options to the compression protocol in order to speed up compression:
.. ipython:: python
- df.to_pickle(
- "data.pkl.gz",
- compression={"method": "gzip", 'compresslevel': 1}
- )
+ df.to_pickle("data.pkl.gz", compression={"method": "gzip", "compresslevel": 1})
.. ipython:: python
:suppress:
@@ -3462,11 +3452,13 @@ Example pyarrow usage:
.. code-block:: python
- >>> import pandas as pd
- >>> import pyarrow as pa
- >>> df = pd.DataFrame({'A': [1, 2, 3]})
- >>> context = pa.default_serialization_context()
- >>> df_bytestring = context.serialize(df).to_buffer().to_pybytes()
+ import pandas as pd
+ import pyarrow as pa
+
+ df = pd.DataFrame({"A": [1, 2, 3]})
+
+ context = pa.default_serialization_context()
+ df_bytestring = context.serialize(df).to_buffer().to_pybytes()
For documentation on pyarrow, see `here <https://arrow.apache.org/docs/python/index.html>`__.
@@ -3492,11 +3484,11 @@ for some advanced strategies
:suppress:
:okexcept:
- os.remove('store.h5')
+ os.remove("store.h5")
.. ipython:: python
- store = pd.HDFStore('store.h5')
+ store = pd.HDFStore("store.h5")
print(store)
Objects can be written to the file just like adding key-value pairs to a
@@ -3504,15 +3496,14 @@ dict:
.. ipython:: python
- index = pd.date_range('1/1/2000', periods=8)
- s = pd.Series(np.random.randn(5), index=['a', 'b', 'c', 'd', 'e'])
- df = pd.DataFrame(np.random.randn(8, 3), index=index,
- columns=['A', 'B', 'C'])
+ index = pd.date_range("1/1/2000", periods=8)
+ s = pd.Series(np.random.randn(5), index=["a", "b", "c", "d", "e"])
+ df = pd.DataFrame(np.random.randn(8, 3), index=index, columns=["A", "B", "C"])
# store.put('s', s) is an equivalent method
- store['s'] = s
+ store["s"] = s
- store['df'] = df
+ store["df"] = df
store
@@ -3521,7 +3512,7 @@ In a current or later Python session, you can retrieve stored objects:
.. ipython:: python
# store.get('df') is an equivalent method
- store['df']
+ store["df"]
# dotted (attribute) access provides get as well
store.df
@@ -3531,7 +3522,7 @@ Deletion of the object specified by the key:
.. ipython:: python
# store.remove('df') is an equivalent method
- del store['df']
+ del store["df"]
store
@@ -3544,14 +3535,14 @@ Closing a Store and using a context manager:
store.is_open
# Working with, and automatically closing the store using a context manager
- with pd.HDFStore('store.h5') as store:
+ with pd.HDFStore("store.h5") as store:
store.keys()
.. ipython:: python
:suppress:
store.close()
- os.remove('store.h5')
+ os.remove("store.h5")
@@ -3563,15 +3554,15 @@ similar to how ``read_csv`` and ``to_csv`` work.
.. ipython:: python
- df_tl = pd.DataFrame({'A': list(range(5)), 'B': list(range(5))})
- df_tl.to_hdf('store_tl.h5', 'table', append=True)
- pd.read_hdf('store_tl.h5', 'table', where=['index>2'])
+ df_tl = pd.DataFrame({"A": list(range(5)), "B": list(range(5))})
+ df_tl.to_hdf("store_tl.h5", "table", append=True)
+ pd.read_hdf("store_tl.h5", "table", where=["index>2"])
.. ipython:: python
:suppress:
:okexcept:
- os.remove('store_tl.h5')
+ os.remove("store_tl.h5")
HDFStore will by default not drop rows that are all missing. This behavior can be changed by setting ``dropna=True``.
@@ -3579,24 +3570,23 @@ HDFStore will by default not drop rows that are all missing. This behavior can b
.. ipython:: python
- df_with_missing = pd.DataFrame({'col1': [0, np.nan, 2],
- 'col2': [1, np.nan, np.nan]})
+ df_with_missing = pd.DataFrame({"col1": [0, np.nan, 2], "col2": [1, np.nan, np.nan]})
df_with_missing
- df_with_missing.to_hdf('file.h5', 'df_with_missing',
- format='table', mode='w')
+ df_with_missing.to_hdf("file.h5", "df_with_missing", format="table", mode="w")
- pd.read_hdf('file.h5', 'df_with_missing')
+ pd.read_hdf("file.h5", "df_with_missing")
- df_with_missing.to_hdf('file.h5', 'df_with_missing',
- format='table', mode='w', dropna=True)
- pd.read_hdf('file.h5', 'df_with_missing')
+ df_with_missing.to_hdf(
+ "file.h5", "df_with_missing", format="table", mode="w", dropna=True
+ )
+ pd.read_hdf("file.h5", "df_with_missing")
.. ipython:: python
:suppress:
- os.remove('file.h5')
+ os.remove("file.h5")
.. _io.hdf5-fixed:
@@ -3642,21 +3632,21 @@ enable ``put/append/to_hdf`` to by default store in the ``table`` format.
:suppress:
:okexcept:
- os.remove('store.h5')
+ os.remove("store.h5")
.. ipython:: python
- store = pd.HDFStore('store.h5')
+ store = pd.HDFStore("store.h5")
df1 = df[0:4]
df2 = df[4:]
# append data (creates a table automatically)
- store.append('df', df1)
- store.append('df', df2)
+ store.append("df", df1)
+ store.append("df", df2)
store
# select the entire object
- store.select('df')
+ store.select("df")
# the type of stored data
store.root.df._v_attrs.pandas_type
@@ -3679,16 +3669,16 @@ everything in the sub-store and **below**, so be *careful*.
.. ipython:: python
- store.put('foo/bar/bah', df)
- store.append('food/orange', df)
- store.append('food/apple', df)
+ store.put("foo/bar/bah", df)
+ store.append("food/orange", df)
+ store.append("food/apple", df)
store
# a list of keys are returned
store.keys()
# remove all nodes under this level
- store.remove('food')
+ store.remove("food")
store
@@ -3702,10 +3692,10 @@ will yield a tuple for each group key along with the relative keys of its conten
for (path, subgroups, subkeys) in store.walk():
for subgroup in subgroups:
- print('GROUP: {}/{}'.format(path, subgroup))
+ print("GROUP: {}/{}".format(path, subgroup))
for subkey in subkeys:
- key = '/'.join([path, subkey])
- print('KEY: {}'.format(key))
+ key = "/".join([path, subkey])
+ print("KEY: {}".format(key))
print(store.get(key))
@@ -3729,7 +3719,7 @@ will yield a tuple for each group key along with the relative keys of its conten
.. ipython:: python
- store['foo/bar/bah']
+ store["foo/bar/bah"]
.. _io.hdf5-types:
@@ -3753,19 +3743,22 @@ defaults to ``nan``.
.. ipython:: python
- df_mixed = pd.DataFrame({'A': np.random.randn(8),
- 'B': np.random.randn(8),
- 'C': np.array(np.random.randn(8), dtype='float32'),
- 'string': 'string',
- 'int': 1,
- 'bool': True,
- 'datetime64': pd.Timestamp('20010102')},
- index=list(range(8)))
- df_mixed.loc[df_mixed.index[3:5],
- ['A', 'B', 'string', 'datetime64']] = np.nan
+ df_mixed = pd.DataFrame(
+ {
+ "A": np.random.randn(8),
+ "B": np.random.randn(8),
+ "C": np.array(np.random.randn(8), dtype="float32"),
+ "string": "string",
+ "int": 1,
+ "bool": True,
+ "datetime64": pd.Timestamp("20010102"),
+ },
+ index=list(range(8)),
+ )
+ df_mixed.loc[df_mixed.index[3:5], ["A", "B", "string", "datetime64"]] = np.nan
- store.append('df_mixed', df_mixed, min_itemsize={'values': 50})
- df_mixed1 = store.select('df_mixed')
+ store.append("df_mixed", df_mixed, min_itemsize={"values": 50})
+ df_mixed1 = store.select("df_mixed")
df_mixed1
df_mixed1.dtypes.value_counts()
@@ -3780,20 +3773,19 @@ storing/selecting from homogeneous index ``DataFrames``.
.. ipython:: python
- index = pd.MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
- ['one', 'two', 'three']],
- codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
- [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
- names=['foo', 'bar'])
- df_mi = pd.DataFrame(np.random.randn(10, 3), index=index,
- columns=['A', 'B', 'C'])
+ index = pd.MultiIndex(
+ levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
+ codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
+ names=["foo", "bar"],
+ )
+ df_mi = pd.DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
df_mi
- store.append('df_mi', df_mi)
- store.select('df_mi')
+ store.append("df_mi", df_mi)
+ store.select("df_mi")
# the levels are automatically included as data columns
- store.select('df_mi', 'foo=bar')
+ store.select("df_mi", "foo=bar")
.. note::
The ``index`` keyword is reserved and cannot be use as a level name.
@@ -3870,7 +3862,7 @@ The right-hand side of the sub-expression (after a comparison operator) can be:
.. code-block:: python
string = "HolyMoly'"
- store.select('df', 'index == string')
+ store.select("df", "index == string")
instead of this
@@ -3887,7 +3879,7 @@ The right-hand side of the sub-expression (after a comparison operator) can be:
.. code-block:: python
- store.select('df', 'index == %r' % string)
+ store.select("df", "index == %r" % string)
which will quote ``string``.
@@ -3896,21 +3888,24 @@ Here are some examples:
.. ipython:: python
- dfq = pd.DataFrame(np.random.randn(10, 4), columns=list('ABCD'),
- index=pd.date_range('20130101', periods=10))
- store.append('dfq', dfq, format='table', data_columns=True)
+ dfq = pd.DataFrame(
+ np.random.randn(10, 4),
+ columns=list("ABCD"),
+ index=pd.date_range("20130101", periods=10),
+ )
+ store.append("dfq", dfq, format="table", data_columns=True)
Use boolean expressions, with in-line function evaluation.
.. ipython:: python
- store.select('dfq', "index>pd.Timestamp('20130104') & columns=['A', 'B']")
+ store.select("dfq", "index>pd.Timestamp('20130104') & columns=['A', 'B']")
Use inline column reference.
.. ipython:: python
- store.select('dfq', where="A>0 or C>0")
+ store.select("dfq", where="A>0 or C>0")
The ``columns`` keyword can be supplied to select a list of columns to be
returned, this is equivalent to passing a
@@ -3918,7 +3913,7 @@ returned, this is equivalent to passing a
.. ipython:: python
- store.select('df', "columns=['A', 'B']")
+ store.select("df", "columns=['A', 'B']")
``start`` and ``stop`` parameters can be specified to limit the total search
space. These are in terms of the total number of rows in a table.
@@ -3944,14 +3939,19 @@ specified in the format: ``<float>(<unit>)``, where float may be signed (and fra
.. ipython:: python
from datetime import timedelta
- dftd = pd.DataFrame({'A': pd.Timestamp('20130101'),
- 'B': [pd.Timestamp('20130101') + timedelta(days=i,
- seconds=10)
- for i in range(10)]})
- dftd['C'] = dftd['A'] - dftd['B']
+
+ dftd = pd.DataFrame(
+ {
+ "A": pd.Timestamp("20130101"),
+ "B": [
+ pd.Timestamp("20130101") + timedelta(days=i, seconds=10) for i in range(10)
+ ],
+ }
+ )
+ dftd["C"] = dftd["A"] - dftd["B"]
dftd
- store.append('dftd', dftd, data_columns=True)
- store.select('dftd', "C<'-3.5D'")
+ store.append("dftd", dftd, data_columns=True)
+ store.select("dftd", "C<'-3.5D'")
.. _io.query_multi:
@@ -3963,7 +3963,7 @@ Selecting from a ``MultiIndex`` can be achieved by using the name of the level.
.. ipython:: python
df_mi.index.names
- store.select('df_mi', "foo=baz and bar=two")
+ store.select("df_mi", "foo=baz and bar=two")
If the ``MultiIndex`` levels names are ``None``, the levels are automatically made available via
the ``level_n`` keyword with ``n`` the level of the ``MultiIndex`` you want to select from.
@@ -3974,8 +3974,7 @@ the ``level_n`` keyword with ``n`` the level of the ``MultiIndex`` you want to s
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
)
- df_mi_2 = pd.DataFrame(np.random.randn(10, 3),
- index=index, columns=["A", "B", "C"])
+ df_mi_2 = pd.DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
df_mi_2
store.append("df_mi_2", df_mi_2)
@@ -4006,7 +4005,7 @@ indexed dimension as the ``where``.
i.optlevel, i.kind
# change an index by passing new parameters
- store.create_table_index('df', optlevel=9, kind='full')
+ store.create_table_index("df", optlevel=9, kind="full")
i = store.root.df.table.cols.index.index
i.optlevel, i.kind
@@ -4014,20 +4013,20 @@ Oftentimes when appending large amounts of data to a store, it is useful to turn
.. ipython:: python
- df_1 = pd.DataFrame(np.random.randn(10, 2), columns=list('AB'))
- df_2 = pd.DataFrame(np.random.randn(10, 2), columns=list('AB'))
+ df_1 = pd.DataFrame(np.random.randn(10, 2), columns=list("AB"))
+ df_2 = pd.DataFrame(np.random.randn(10, 2), columns=list("AB"))
- st = pd.HDFStore('appends.h5', mode='w')
- st.append('df', df_1, data_columns=['B'], index=False)
- st.append('df', df_2, data_columns=['B'], index=False)
- st.get_storer('df').table
+ st = pd.HDFStore("appends.h5", mode="w")
+ st.append("df", df_1, data_columns=["B"], index=False)
+ st.append("df", df_2, data_columns=["B"], index=False)
+ st.get_storer("df").table
Then create the index when finished appending.
.. ipython:: python
- st.create_table_index('df', columns=['B'], optlevel=9, kind='full')
- st.get_storer('df').table
+ st.create_table_index("df", columns=["B"], optlevel=9, kind="full")
+ st.get_storer("df").table
st.close()
@@ -4035,7 +4034,7 @@ Then create the index when finished appending.
:suppress:
:okexcept:
- os.remove('appends.h5')
+ os.remove("appends.h5")
See `here <https://stackoverflow.com/questions/17893370/ptrepack-sortby-needs-full-index>`__ for how to create a completely-sorted-index (CSI) on an existing store.
@@ -4054,22 +4053,22 @@ be ``data_columns``.
.. ipython:: python
df_dc = df.copy()
- df_dc['string'] = 'foo'
- df_dc.loc[df_dc.index[4:6], 'string'] = np.nan
- df_dc.loc[df_dc.index[7:9], 'string'] = 'bar'
- df_dc['string2'] = 'cool'
- df_dc.loc[df_dc.index[1:3], ['B', 'C']] = 1.0
+ df_dc["string"] = "foo"
+ df_dc.loc[df_dc.index[4:6], "string"] = np.nan
+ df_dc.loc[df_dc.index[7:9], "string"] = "bar"
+ df_dc["string2"] = "cool"
+ df_dc.loc[df_dc.index[1:3], ["B", "C"]] = 1.0
df_dc
# on-disk operations
- store.append('df_dc', df_dc, data_columns=['B', 'C', 'string', 'string2'])
- store.select('df_dc', where='B > 0')
+ store.append("df_dc", df_dc, data_columns=["B", "C", "string", "string2"])
+ store.select("df_dc", where="B > 0")
# getting creative
- store.select('df_dc', 'B > 0 & C > 0 & string == foo')
+ store.select("df_dc", "B > 0 & C > 0 & string == foo")
# this is in-memory version of this type of selection
- df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == 'foo')]
+ df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == "foo")]
# we have automagically created this index and the B/C/string/string2
# columns are stored separately as ``PyTables`` columns
@@ -4090,7 +4089,7 @@ The default is 50,000 rows returned in a chunk.
.. ipython:: python
- for df in store.select('df', chunksize=3):
+ for df in store.select("df", chunksize=3):
print(df)
.. note::
@@ -4100,7 +4099,7 @@ The default is 50,000 rows returned in a chunk.
.. code-block:: python
- for df in pd.read_hdf('store.h5', 'df', chunksize=3):
+ for df in pd.read_hdf("store.h5", "df", chunksize=3):
print(df)
Note, that the chunksize keyword applies to the **source** rows. So if you
@@ -4112,18 +4111,20 @@ chunks.
.. ipython:: python
- dfeq = pd.DataFrame({'number': np.arange(1, 11)})
+ dfeq = pd.DataFrame({"number": np.arange(1, 11)})
dfeq
- store.append('dfeq', dfeq, data_columns=['number'])
+ store.append("dfeq", dfeq, data_columns=["number"])
+
def chunks(l, n):
- return [l[i:i + n] for i in range(0, len(l), n)]
+ return [l[i: i + n] for i in range(0, len(l), n)]
+
evens = [2, 4, 6, 8, 10]
- coordinates = store.select_as_coordinates('dfeq', 'number=evens')
+ coordinates = store.select_as_coordinates("dfeq", "number=evens")
for c in chunks(coordinates, 2):
- print(store.select('dfeq', where=c))
+ print(store.select("dfeq", where=c))
Advanced queries
++++++++++++++++
@@ -4138,8 +4139,8 @@ These do not currently accept the ``where`` selector.
.. ipython:: python
- store.select_column('df_dc', 'index')
- store.select_column('df_dc', 'string')
+ store.select_column("df_dc", "index")
+ store.select_column("df_dc", "string")
.. _io.hdf5-selecting_coordinates:
@@ -4152,12 +4153,13 @@ Sometimes you want to get the coordinates (a.k.a the index locations) of your qu
.. ipython:: python
- df_coord = pd.DataFrame(np.random.randn(1000, 2),
- index=pd.date_range('20000101', periods=1000))
- store.append('df_coord', df_coord)
- c = store.select_as_coordinates('df_coord', 'index > 20020101')
+ df_coord = pd.DataFrame(
+ np.random.randn(1000, 2), index=pd.date_range("20000101", periods=1000)
+ )
+ store.append("df_coord", df_coord)
+ c = store.select_as_coordinates("df_coord", "index > 20020101")
c
- store.select('df_coord', where=c)
+ store.select("df_coord", where=c)
.. _io.hdf5-where_mask:
@@ -4170,12 +4172,13 @@ a datetimeindex which are 5.
.. ipython:: python
- df_mask = pd.DataFrame(np.random.randn(1000, 2),
- index=pd.date_range('20000101', periods=1000))
- store.append('df_mask', df_mask)
- c = store.select_column('df_mask', 'index')
+ df_mask = pd.DataFrame(
+ np.random.randn(1000, 2), index=pd.date_range("20000101", periods=1000)
+ )
+ store.append("df_mask", df_mask)
+ c = store.select_column("df_mask", "index")
where = c[pd.DatetimeIndex(c).month == 5].index
- store.select('df_mask', where=where)
+ store.select("df_mask", where=where)
Storer object
^^^^^^^^^^^^^
@@ -4186,7 +4189,7 @@ of rows in an object.
.. ipython:: python
- store.get_storer('df_dc').nrows
+ store.get_storer("df_dc").nrows
Multiple table queries
@@ -4219,24 +4222,26 @@ results.
.. ipython:: python
- df_mt = pd.DataFrame(np.random.randn(8, 6),
- index=pd.date_range('1/1/2000', periods=8),
- columns=['A', 'B', 'C', 'D', 'E', 'F'])
- df_mt['foo'] = 'bar'
- df_mt.loc[df_mt.index[1], ('A', 'B')] = np.nan
+ df_mt = pd.DataFrame(
+ np.random.randn(8, 6),
+ index=pd.date_range("1/1/2000", periods=8),
+ columns=["A", "B", "C", "D", "E", "F"],
+ )
+ df_mt["foo"] = "bar"
+ df_mt.loc[df_mt.index[1], ("A", "B")] = np.nan
# you can also create the tables individually
- store.append_to_multiple({'df1_mt': ['A', 'B'], 'df2_mt': None},
- df_mt, selector='df1_mt')
+ store.append_to_multiple(
+ {"df1_mt": ["A", "B"], "df2_mt": None}, df_mt, selector="df1_mt"
+ )
store
# individual tables were created
- store.select('df1_mt')
- store.select('df2_mt')
+ store.select("df1_mt")
+ store.select("df2_mt")
# as a multiple
- store.select_as_multiple(['df1_mt', 'df2_mt'], where=['A>0', 'B>0'],
- selector='df1_mt')
+ store.select_as_multiple(["df1_mt", "df2_mt"], where=["A>0", "B>0"], selector="df1_mt")
Delete from a table
@@ -4345,14 +4350,15 @@ Enable compression for all objects within the file:
.. code-block:: python
- store_compressed = pd.HDFStore('store_compressed.h5', complevel=9,
- complib='blosc:blosclz')
+ store_compressed = pd.HDFStore(
+ "store_compressed.h5", complevel=9, complib="blosc:blosclz"
+ )
Or on-the-fly compression (this only applies to tables) in stores where compression is not enabled:
.. code-block:: python
- store.append('df', df, complib='zlib', complevel=5)
+ store.append("df", df, complib="zlib", complevel=5)
.. _io.hdf5-ptrepack:
@@ -4441,13 +4447,14 @@ stored in a more efficient manner.
.. ipython:: python
- dfcat = pd.DataFrame({'A': pd.Series(list('aabbcdba')).astype('category'),
- 'B': np.random.randn(8)})
+ dfcat = pd.DataFrame(
+ {"A": pd.Series(list("aabbcdba")).astype("category"), "B": np.random.randn(8)}
+ )
dfcat
dfcat.dtypes
- cstore = pd.HDFStore('cats.h5', mode='w')
- cstore.append('dfcat', dfcat, format='table', data_columns=['A'])
- result = cstore.select('dfcat', where="A in ['b', 'c']")
+ cstore = pd.HDFStore("cats.h5", mode="w")
+ cstore.append("dfcat", dfcat, format="table", data_columns=["A"])
+ result = cstore.select("dfcat", where="A in ['b', 'c']")
result
result.dtypes
@@ -4456,7 +4463,7 @@ stored in a more efficient manner.
:okexcept:
cstore.close()
- os.remove('cats.h5')
+ os.remove("cats.h5")
String columns
@@ -4483,17 +4490,17 @@ Passing a ``min_itemsize`` dict will cause all passed columns to be created as *
.. ipython:: python
- dfs = pd.DataFrame({'A': 'foo', 'B': 'bar'}, index=list(range(5)))
+ dfs = pd.DataFrame({"A": "foo", "B": "bar"}, index=list(range(5)))
dfs
# A and B have a size of 30
- store.append('dfs', dfs, min_itemsize=30)
- store.get_storer('dfs').table
+ store.append("dfs", dfs, min_itemsize=30)
+ store.get_storer("dfs").table
# A is created as a data_column with a size of 30
# B is size is calculated
- store.append('dfs2', dfs, min_itemsize={'A': 30})
- store.get_storer('dfs2').table
+ store.append("dfs2", dfs, min_itemsize={"A": 30})
+ store.get_storer("dfs2").table
**nan_rep**
@@ -4502,15 +4509,15 @@ You could inadvertently turn an actual ``nan`` value into a missing value.
.. ipython:: python
- dfss = pd.DataFrame({'A': ['foo', 'bar', 'nan']})
+ dfss = pd.DataFrame({"A": ["foo", "bar", "nan"]})
dfss
- store.append('dfss', dfss)
- store.select('dfss')
+ store.append("dfss", dfss)
+ store.select("dfss")
# here you need to specify a different nan rep
- store.append('dfss2', dfss, nan_rep='_nan_')
- store.select('dfss2')
+ store.append("dfss2", dfss, nan_rep="_nan_")
+ store.select("dfss2")
.. _io.external_compatibility:
@@ -4529,21 +4536,25 @@ It is possible to write an ``HDFStore`` object that can easily be imported into
.. ipython:: python
- df_for_r = pd.DataFrame({"first": np.random.rand(100),
- "second": np.random.rand(100),
- "class": np.random.randint(0, 2, (100, ))},
- index=range(100))
+ df_for_r = pd.DataFrame(
+ {
+ "first": np.random.rand(100),
+ "second": np.random.rand(100),
+ "class": np.random.randint(0, 2, (100,)),
+ },
+ index=range(100),
+ )
df_for_r.head()
- store_export = pd.HDFStore('export.h5')
- store_export.append('df_for_r', df_for_r, data_columns=df_dc.columns)
+ store_export = pd.HDFStore("export.h5")
+ store_export.append("df_for_r", df_for_r, data_columns=df_dc.columns)
store_export
.. ipython:: python
:suppress:
store_export.close()
- os.remove('export.h5')
+ os.remove("export.h5")
In R this file can be read into a ``data.frame`` object using the ``rhdf5``
library. The following example function reads the corresponding column names
@@ -4630,7 +4641,7 @@ Performance
:suppress:
store.close()
- os.remove('store.h5')
+ os.remove("store.h5")
.. _io.feather:
@@ -4660,21 +4671,26 @@ See the `Full Documentation <https://github.com/wesm/feather>`__.
:suppress:
import warnings
+
# This can be removed once building with pyarrow >=0.15.0
warnings.filterwarnings("ignore", "The Sparse", FutureWarning)
.. ipython:: python
- df = pd.DataFrame({'a': list('abc'),
- 'b': list(range(1, 4)),
- 'c': np.arange(3, 6).astype('u1'),
- 'd': np.arange(4.0, 7.0, dtype='float64'),
- 'e': [True, False, True],
- 'f': pd.Categorical(list('abc')),
- 'g': pd.date_range('20130101', periods=3),
- 'h': pd.date_range('20130101', periods=3, tz='US/Eastern'),
- 'i': pd.date_range('20130101', periods=3, freq='ns')})
+ df = pd.DataFrame(
+ {
+ "a": list("abc"),
+ "b": list(range(1, 4)),
+ "c": np.arange(3, 6).astype("u1"),
+ "d": np.arange(4.0, 7.0, dtype="float64"),
+ "e": [True, False, True],
+ "f": pd.Categorical(list("abc")),
+ "g": pd.date_range("20130101", periods=3),
+ "h": pd.date_range("20130101", periods=3, tz="US/Eastern"),
+ "i": pd.date_range("20130101", periods=3, freq="ns"),
+ }
+ )
df
df.dtypes
@@ -4683,13 +4699,13 @@ Write to a feather file.
.. ipython:: python
- df.to_feather('example.feather')
+ df.to_feather("example.feather")
Read from a feather file.
.. ipython:: python
- result = pd.read_feather('example.feather')
+ result = pd.read_feather("example.feather")
result
# we preserve dtypes
@@ -4698,7 +4714,7 @@ Read from a feather file.
.. ipython:: python
:suppress:
- os.remove('example.feather')
+ os.remove("example.feather")
.. _io.parquet:
@@ -4743,15 +4759,19 @@ See the documentation for `pyarrow <https://arrow.apache.org/docs/python/>`__ an
.. ipython:: python
- df = pd.DataFrame({'a': list('abc'),
- 'b': list(range(1, 4)),
- 'c': np.arange(3, 6).astype('u1'),
- 'd': np.arange(4.0, 7.0, dtype='float64'),
- 'e': [True, False, True],
- 'f': pd.date_range('20130101', periods=3),
- 'g': pd.date_range('20130101', periods=3, tz='US/Eastern'),
- 'h': pd.Categorical(list('abc')),
- 'i': pd.Categorical(list('abc'), ordered=True)})
+ df = pd.DataFrame(
+ {
+ "a": list("abc"),
+ "b": list(range(1, 4)),
+ "c": np.arange(3, 6).astype("u1"),
+ "d": np.arange(4.0, 7.0, dtype="float64"),
+ "e": [True, False, True],
+ "f": pd.date_range("20130101", periods=3),
+ "g": pd.date_range("20130101", periods=3, tz="US/Eastern"),
+ "h": pd.Categorical(list("abc")),
+ "i": pd.Categorical(list("abc"), ordered=True),
+ }
+ )
df
df.dtypes
@@ -4761,15 +4781,15 @@ Write to a parquet file.
.. ipython:: python
:okwarning:
- df.to_parquet('example_pa.parquet', engine='pyarrow')
- df.to_parquet('example_fp.parquet', engine='fastparquet')
+ df.to_parquet("example_pa.parquet", engine="pyarrow")
+ df.to_parquet("example_fp.parquet", engine="fastparquet")
Read from a parquet file.
.. ipython:: python
- result = pd.read_parquet('example_fp.parquet', engine='fastparquet')
- result = pd.read_parquet('example_pa.parquet', engine='pyarrow')
+ result = pd.read_parquet("example_fp.parquet", engine="fastparquet")
+ result = pd.read_parquet("example_pa.parquet", engine="pyarrow")
result.dtypes
@@ -4777,18 +4797,16 @@ Read only certain columns of a parquet file.
.. ipython:: python
- result = pd.read_parquet('example_fp.parquet',
- engine='fastparquet', columns=['a', 'b'])
- result = pd.read_parquet('example_pa.parquet',
- engine='pyarrow', columns=['a', 'b'])
+ result = pd.read_parquet("example_fp.parquet", engine="fastparquet", columns=["a", "b"])
+ result = pd.read_parquet("example_pa.parquet", engine="pyarrow", columns=["a", "b"])
result.dtypes
.. ipython:: python
:suppress:
- os.remove('example_pa.parquet')
- os.remove('example_fp.parquet')
+ os.remove("example_pa.parquet")
+ os.remove("example_fp.parquet")
Handling indexes
@@ -4799,8 +4817,8 @@ more columns in the output file. Thus, this code:
.. ipython:: python
- df = pd.DataFrame({'a': [1, 2], 'b': [3, 4]})
- df.to_parquet('test.parquet', engine='pyarrow')
+ df = pd.DataFrame({"a": [1, 2], "b": [3, 4]})
+ df.to_parquet("test.parquet", engine="pyarrow")
creates a parquet file with *three* columns if you use ``pyarrow`` for serialization:
``a``, ``b``, and ``__index_level_0__``. If you're using ``fastparquet``, the
@@ -4815,7 +4833,7 @@ If you want to omit a dataframe's indexes when writing, pass ``index=False`` to
.. ipython:: python
- df.to_parquet('test.parquet', index=False)
+ df.to_parquet("test.parquet", index=False)
This creates a parquet file with just the two expected columns, ``a`` and ``b``.
If your ``DataFrame`` has a custom index, you won't get it back when you load
@@ -4827,7 +4845,7 @@ underlying engine's default behavior.
.. ipython:: python
:suppress:
- os.remove('test.parquet')
+ os.remove("test.parquet")
Partitioning Parquet files
@@ -4839,9 +4857,8 @@ Parquet supports partitioning of data based on the values of one or more columns
.. ipython:: python
- df = pd.DataFrame({'a': [0, 0, 1, 1], 'b': [0, 1, 0, 1]})
- df.to_parquet(path='test', engine='pyarrow',
- partition_cols=['a'], compression=None)
+ df = pd.DataFrame({"a": [0, 0, 1, 1], "b": [0, 1, 0, 1]})
+ df.to_parquet(path="test", engine="pyarrow", partition_cols=["a"], compression=None)
The ``path`` specifies the parent directory to which data will be saved.
The ``partition_cols`` are the column names by which the dataset will be partitioned.
@@ -4863,8 +4880,9 @@ The above example creates a partitioned dataset that may look like:
:suppress:
from shutil import rmtree
+
try:
- rmtree('test')
+ rmtree("test")
except OSError:
pass
@@ -4932,15 +4950,16 @@ below and the SQLAlchemy `documentation <https://docs.sqlalchemy.org/en/latest/c
.. ipython:: python
from sqlalchemy import create_engine
+
# Create your engine.
- engine = create_engine('sqlite:///:memory:')
+ engine = create_engine("sqlite:///:memory:")
If you want to manage your own connections you can pass one of those instead:
.. code-block:: python
with engine.connect() as conn, conn.begin():
- data = pd.read_sql_table('data', conn)
+ data = pd.read_sql_table("data", conn)
Writing DataFrames
''''''''''''''''''
@@ -4963,17 +4982,20 @@ the database using :func:`~pandas.DataFrame.to_sql`.
:suppress:
import datetime
- c = ['id', 'Date', 'Col_1', 'Col_2', 'Col_3']
- d = [(26, datetime.datetime(2010, 10, 18), 'X', 27.5, True),
- (42, datetime.datetime(2010, 10, 19), 'Y', -12.5, False),
- (63, datetime.datetime(2010, 10, 20), 'Z', 5.73, True)]
+
+ c = ["id", "Date", "Col_1", "Col_2", "Col_3"]
+ d = [
+ (26, datetime.datetime(2010, 10, 18), "X", 27.5, True),
+ (42, datetime.datetime(2010, 10, 19), "Y", -12.5, False),
+ (63, datetime.datetime(2010, 10, 20), "Z", 5.73, True),
+ ]
data = pd.DataFrame(d, columns=c)
.. ipython:: python
data
- data.to_sql('data', engine)
+ data.to_sql("data", engine)
With some databases, writing large DataFrames can result in errors due to
packet size limitations being exceeded. This can be avoided by setting the
@@ -4982,7 +5004,7 @@ writes ``data`` to the database in batches of 1000 rows at a time:
.. ipython:: python
- data.to_sql('data_chunked', engine, chunksize=1000)
+ data.to_sql("data_chunked", engine, chunksize=1000)
SQL data types
++++++++++++++
@@ -5001,7 +5023,8 @@ default ``Text`` type for string columns:
.. ipython:: python
from sqlalchemy.types import String
- data.to_sql('data_dtype', engine, dtype={'Col_1': String})
+
+ data.to_sql("data_dtype", engine, dtype={"Col_1": String})
.. note::
@@ -5119,7 +5142,7 @@ table name and optionally a subset of columns to read.
.. ipython:: python
- pd.read_sql_table('data', engine)
+ pd.read_sql_table("data", engine)
.. note::
@@ -5138,23 +5161,22 @@ and specify a subset of columns to be read.
.. ipython:: python
- pd.read_sql_table('data', engine, index_col='id')
- pd.read_sql_table('data', engine, columns=['Col_1', 'Col_2'])
+ pd.read_sql_table("data", engine, index_col="id")
+ pd.read_sql_table("data", engine, columns=["Col_1", "Col_2"])
And you can explicitly force columns to be parsed as dates:
.. ipython:: python
- pd.read_sql_table('data', engine, parse_dates=['Date'])
+ pd.read_sql_table("data", engine, parse_dates=["Date"])
If needed you can explicitly specify a format string, or a dict of arguments
to pass to :func:`pandas.to_datetime`:
.. code-block:: python
- pd.read_sql_table('data', engine, parse_dates={'Date': '%Y-%m-%d'})
- pd.read_sql_table('data', engine,
- parse_dates={'Date': {'format': '%Y-%m-%d %H:%M:%S'}})
+ pd.read_sql_table("data", engine, parse_dates={"Date": "%Y-%m-%d"})
+ pd.read_sql_table("data", engine, parse_dates={"Date": {"format": "%Y-%m-%d %H:%M:%S"}})
You can check if a table exists using :func:`~pandas.io.sql.has_table`
@@ -5169,8 +5191,8 @@ have schema's). For example:
.. code-block:: python
- df.to_sql('table', engine, schema='other_schema')
- pd.read_sql_table('table', engine, schema='other_schema')
+ df.to_sql("table", engine, schema="other_schema")
+ pd.read_sql_table("table", engine, schema="other_schema")
Querying
''''''''
@@ -5182,7 +5204,7 @@ which are database-agnostic.
.. ipython:: python
- pd.read_sql_query('SELECT * FROM data', engine)
+ pd.read_sql_query("SELECT * FROM data", engine)
Of course, you can specify a more "complex" query.
@@ -5195,13 +5217,12 @@ Specifying this will return an iterator through chunks of the query result:
.. ipython:: python
- df = pd.DataFrame(np.random.randn(20, 3), columns=list('abc'))
- df.to_sql('data_chunks', engine, index=False)
+ df = pd.DataFrame(np.random.randn(20, 3), columns=list("abc"))
+ df.to_sql("data_chunks", engine, index=False)
.. ipython:: python
- for chunk in pd.read_sql_query("SELECT * FROM data_chunks",
- engine, chunksize=5):
+ for chunk in pd.read_sql_query("SELECT * FROM data_chunks", engine, chunksize=5):
print(chunk)
You can also run a plain query without creating a ``DataFrame`` with
@@ -5213,9 +5234,11 @@ variant appropriate for your database.
.. code-block:: python
from pandas.io import sql
- sql.execute('SELECT * FROM table_name', engine)
- sql.execute('INSERT INTO table_name VALUES(?, ?, ?)', engine,
- params=[('id', 1, 12.2, True)])
+
+ sql.execute("SELECT * FROM table_name", engine)
+ sql.execute(
+ "INSERT INTO table_name VALUES(?, ?, ?)", engine, params=[("id", 1, 12.2, True)]
+ )
Engine connection examples
@@ -5229,20 +5252,20 @@ connecting to.
from sqlalchemy import create_engine
- engine = create_engine('postgresql://scott:tiger@localhost:5432/mydatabase')
+ engine = create_engine("postgresql://scott:tiger@localhost:5432/mydatabase")
- engine = create_engine('mysql+mysqldb://scott:tiger@localhost/foo')
+ engine = create_engine("mysql+mysqldb://scott:tiger@localhost/foo")
- engine = create_engine('oracle://scott:tiger@127.0.0.1:1521/sidname')
+ engine = create_engine("oracle://scott:tiger@127.0.0.1:1521/sidname")
- engine = create_engine('mssql+pyodbc://mydsn')
+ engine = create_engine("mssql+pyodbc://mydsn")
# sqlite://<nohostname>/<path>
# where <path> is relative:
- engine = create_engine('sqlite:///foo.db')
+ engine = create_engine("sqlite:///foo.db")
# or absolute, starting with a slash:
- engine = create_engine('sqlite:////absolute/path/to/foo.db')
+ engine = create_engine("sqlite:////absolute/path/to/foo.db")
For more information see the examples the SQLAlchemy `documentation <https://docs.sqlalchemy.org/en/latest/core/engines.html>`__
@@ -5257,21 +5280,25 @@ Use :func:`sqlalchemy.text` to specify query parameters in a backend-neutral way
.. ipython:: python
import sqlalchemy as sa
- pd.read_sql(sa.text('SELECT * FROM data where Col_1=:col1'),
- engine, params={'col1': 'X'})
+
+ pd.read_sql(
+ sa.text("SELECT * FROM data where Col_1=:col1"), engine, params={"col1": "X"}
+ )
If you have an SQLAlchemy description of your database you can express where conditions using SQLAlchemy expressions
.. ipython:: python
metadata = sa.MetaData()
- data_table = sa.Table('data', metadata,
- sa.Column('index', sa.Integer),
- sa.Column('Date', sa.DateTime),
- sa.Column('Col_1', sa.String),
- sa.Column('Col_2', sa.Float),
- sa.Column('Col_3', sa.Boolean),
- )
+ data_table = sa.Table(
+ "data",
+ metadata,
+ sa.Column("index", sa.Integer),
+ sa.Column("Date", sa.DateTime),
+ sa.Column("Col_1", sa.String),
+ sa.Column("Col_2", sa.Float),
+ sa.Column("Col_3", sa.Boolean),
+ )
pd.read_sql(sa.select([data_table]).where(data_table.c.Col_3 is True), engine)
@@ -5280,8 +5307,9 @@ You can combine SQLAlchemy expressions with parameters passed to :func:`read_sql
.. ipython:: python
import datetime as dt
- expr = sa.select([data_table]).where(data_table.c.Date > sa.bindparam('date'))
- pd.read_sql(expr, engine, params={'date': dt.datetime(2010, 10, 18)})
+
+ expr = sa.select([data_table]).where(data_table.c.Date > sa.bindparam("date"))
+ pd.read_sql(expr, engine, params={"date": dt.datetime(2010, 10, 18)})
Sqlite fallback
@@ -5296,13 +5324,14 @@ You can create connections like so:
.. code-block:: python
import sqlite3
- con = sqlite3.connect(':memory:')
+
+ con = sqlite3.connect(":memory:")
And then issue the following queries:
.. code-block:: python
- data.to_sql('data', con)
+ data.to_sql("data", con)
pd.read_sql_query("SELECT * FROM data", con)
@@ -5339,8 +5368,8 @@ into a .dta file. The format version of this file is always 115 (Stata 12).
.. ipython:: python
- df = pd.DataFrame(np.random.randn(10, 2), columns=list('AB'))
- df.to_stata('stata.dta')
+ df = pd.DataFrame(np.random.randn(10, 2), columns=list("AB"))
+ df.to_stata("stata.dta")
*Stata* data files have limited data type support; only strings with
244 or fewer characters, ``int8``, ``int16``, ``int32``, ``float32``
@@ -5390,7 +5419,7 @@ be used to read the file incrementally.
.. ipython:: python
- pd.read_stata('stata.dta')
+ pd.read_stata("stata.dta")
Specifying a ``chunksize`` yields a
:class:`~pandas.io.stata.StataReader` instance that can be used to
@@ -5399,7 +5428,7 @@ object can be used as an iterator.
.. ipython:: python
- reader = pd.read_stata('stata.dta', chunksize=3)
+ reader = pd.read_stata("stata.dta", chunksize=3)
for df in reader:
print(df.shape)
@@ -5409,7 +5438,7 @@ For more fine-grained control, use ``iterator=True`` and specify
.. ipython:: python
- reader = pd.read_stata('stata.dta', iterator=True)
+ reader = pd.read_stata("stata.dta", iterator=True)
chunk1 = reader.read(5)
chunk2 = reader.read(5)
@@ -5441,7 +5470,7 @@ values will have ``object`` data type.
.. ipython:: python
:suppress:
- os.remove('stata.dta')
+ os.remove("stata.dta")
.. _io.stata-categorical:
@@ -5513,7 +5542,7 @@ Read a SAS7BDAT file:
.. code-block:: python
- df = pd.read_sas('sas_data.sas7bdat')
+ df = pd.read_sas("sas_data.sas7bdat")
Obtain an iterator and read an XPORT file 100,000 lines at a time:
@@ -5522,7 +5551,8 @@ Obtain an iterator and read an XPORT file 100,000 lines at a time:
def do_something(chunk):
pass
- rdr = pd.read_sas('sas_xport.xpt', chunk=100000)
+
+ rdr = pd.read_sas("sas_xport.xpt", chunk=100000)
for chunk in rdr:
do_something(chunk)
@@ -5556,15 +5586,14 @@ Read an SPSS file:
.. code-block:: python
- df = pd.read_spss('spss_data.sav')
+ df = pd.read_spss("spss_data.sav")
Extract a subset of columns contained in ``usecols`` from an SPSS file and
avoid converting categorical columns into ``pd.Categorical``:
.. code-block:: python
- df = pd.read_spss('spss_data.sav', usecols=['foo', 'bar'],
- convert_categoricals=False)
+ df = pd.read_spss("spss_data.sav", usecols=["foo", "bar"], convert_categoricals=False)
More information about the SAV and ZSAV file formats is available here_.
@@ -5622,78 +5651,99 @@ Given the next test set:
import os
sz = 1000000
- df = pd.DataFrame({'A': np.random.randn(sz), 'B': [1] * sz})
+ df = pd.DataFrame({"A": np.random.randn(sz), "B": [1] * sz})
sz = 1000000
np.random.seed(42)
- df = pd.DataFrame({'A': np.random.randn(sz), 'B': [1] * sz})
+ df = pd.DataFrame({"A": np.random.randn(sz), "B": [1] * sz})
+
def test_sql_write(df):
- if os.path.exists('test.sql'):
- os.remove('test.sql')
- sql_db = sqlite3.connect('test.sql')
- df.to_sql(name='test_table', con=sql_db)
+ if os.path.exists("test.sql"):
+ os.remove("test.sql")
+ sql_db = sqlite3.connect("test.sql")
+ df.to_sql(name="test_table", con=sql_db)
sql_db.close()
+
def test_sql_read():
- sql_db = sqlite3.connect('test.sql')
+ sql_db = sqlite3.connect("test.sql")
pd.read_sql_query("select * from test_table", sql_db)
sql_db.close()
+
def test_hdf_fixed_write(df):
- df.to_hdf('test_fixed.hdf', 'test', mode='w')
+ df.to_hdf("test_fixed.hdf", "test", mode="w")
+
def test_hdf_fixed_read():
- pd.read_hdf('test_fixed.hdf', 'test')
+ pd.read_hdf("test_fixed.hdf", "test")
+
def test_hdf_fixed_write_compress(df):
- df.to_hdf('test_fixed_compress.hdf', 'test', mode='w', complib='blosc')
+ df.to_hdf("test_fixed_compress.hdf", "test", mode="w", complib="blosc")
+
def test_hdf_fixed_read_compress():
- pd.read_hdf('test_fixed_compress.hdf', 'test')
+ pd.read_hdf("test_fixed_compress.hdf", "test")
+
def test_hdf_table_write(df):
- df.to_hdf('test_table.hdf', 'test', mode='w', format='table')
+ df.to_hdf("test_table.hdf", "test", mode="w", format="table")
+
def test_hdf_table_read():
- pd.read_hdf('test_table.hdf', 'test')
+ pd.read_hdf("test_table.hdf", "test")
+
def test_hdf_table_write_compress(df):
- df.to_hdf('test_table_compress.hdf', 'test', mode='w',
- complib='blosc', format='table')
+ df.to_hdf(
+ "test_table_compress.hdf", "test", mode="w", complib="blosc", format="table"
+ )
+
def test_hdf_table_read_compress():
- pd.read_hdf('test_table_compress.hdf', 'test')
+ pd.read_hdf("test_table_compress.hdf", "test")
+
def test_csv_write(df):
- df.to_csv('test.csv', mode='w')
+ df.to_csv("test.csv", mode="w")
+
def test_csv_read():
- pd.read_csv('test.csv', index_col=0)
+ pd.read_csv("test.csv", index_col=0)
+
def test_feather_write(df):
- df.to_feather('test.feather')
+ df.to_feather("test.feather")
+
def test_feather_read():
- pd.read_feather('test.feather')
+ pd.read_feather("test.feather")
+
def test_pickle_write(df):
- df.to_pickle('test.pkl')
+ df.to_pickle("test.pkl")
+
def test_pickle_read():
- pd.read_pickle('test.pkl')
+ pd.read_pickle("test.pkl")
+
def test_pickle_write_compress(df):
- df.to_pickle('test.pkl.compress', compression='xz')
+ df.to_pickle("test.pkl.compress", compression="xz")
+
def test_pickle_read_compress():
- pd.read_pickle('test.pkl.compress', compression='xz')
+ pd.read_pickle("test.pkl.compress", compression="xz")
+
def test_parquet_write(df):
- df.to_parquet('test.parquet')
+ df.to_parquet("test.parquet")
+
def test_parquet_read():
- pd.read_parquet('test.parquet')
+ pd.read_parquet("test.parquet")
When writing, the top-three functions in terms of speed are ``test_feather_write``, ``test_hdf_fixed_write`` and ``test_hdf_fixed_write_compress``.
diff --git a/doc/source/user_guide/text.rst b/doc/source/user_guide/text.rst
index dd6ac37d88f08..2ada09117273d 100644
--- a/doc/source/user_guide/text.rst
+++ b/doc/source/user_guide/text.rst
@@ -46,20 +46,20 @@ infer a list of strings to
.. ipython:: python
- pd.Series(['a', 'b', 'c'])
+ pd.Series(["a", "b", "c"])
To explicitly request ``string`` dtype, specify the ``dtype``
.. ipython:: python
- pd.Series(['a', 'b', 'c'], dtype="string")
- pd.Series(['a', 'b', 'c'], dtype=pd.StringDtype())
+ pd.Series(["a", "b", "c"], dtype="string")
+ pd.Series(["a", "b", "c"], dtype=pd.StringDtype())
Or ``astype`` after the ``Series`` or ``DataFrame`` is created
.. ipython:: python
- s = pd.Series(['a', 'b', 'c'])
+ s = pd.Series(["a", "b", "c"])
s
s.astype("string")
@@ -71,7 +71,7 @@ it will be converted to ``string`` dtype:
.. ipython:: python
- s = pd.Series(['a', 2, np.nan], dtype="string")
+ s = pd.Series(["a", 2, np.nan], dtype="string")
s
type(s[1])
@@ -147,15 +147,16 @@ the equivalent (scalar) built-in string methods:
.. ipython:: python
- s = pd.Series(['A', 'B', 'C', 'Aaba', 'Baca', np.nan, 'CABA', 'dog', 'cat'],
- dtype="string")
+ s = pd.Series(
+ ["A", "B", "C", "Aaba", "Baca", np.nan, "CABA", "dog", "cat"], dtype="string"
+ )
s.str.lower()
s.str.upper()
s.str.len()
.. ipython:: python
- idx = pd.Index([' jack', 'jill ', ' jesse ', 'frank'])
+ idx = pd.Index([" jack", "jill ", " jesse ", "frank"])
idx.str.strip()
idx.str.lstrip()
idx.str.rstrip()
@@ -166,8 +167,9 @@ leading or trailing whitespace:
.. ipython:: python
- df = pd.DataFrame(np.random.randn(3, 2),
- columns=[' Column A ', ' Column B '], index=range(3))
+ df = pd.DataFrame(
+ np.random.randn(3, 2), columns=[" Column A ", " Column B "], index=range(3)
+ )
df
Since ``df.columns`` is an Index object, we can use the ``.str`` accessor
@@ -183,7 +185,7 @@ and replacing any remaining whitespaces with underscores:
.. ipython:: python
- df.columns = df.columns.str.strip().str.lower().str.replace(' ', '_')
+ df.columns = df.columns.str.strip().str.lower().str.replace(" ", "_")
df
.. note::
@@ -221,21 +223,21 @@ Methods like ``split`` return a Series of lists:
.. ipython:: python
- s2 = pd.Series(['a_b_c', 'c_d_e', np.nan, 'f_g_h'], dtype="string")
- s2.str.split('_')
+ s2 = pd.Series(["a_b_c", "c_d_e", np.nan, "f_g_h"], dtype="string")
+ s2.str.split("_")
Elements in the split lists can be accessed using ``get`` or ``[]`` notation:
.. ipython:: python
- s2.str.split('_').str.get(1)
- s2.str.split('_').str[1]
+ s2.str.split("_").str.get(1)
+ s2.str.split("_").str[1]
It is easy to expand this to return a DataFrame using ``expand``.
.. ipython:: python
- s2.str.split('_', expand=True)
+ s2.str.split("_", expand=True)
When original ``Series`` has :class:`StringDtype`, the output columns will all
be :class:`StringDtype` as well.
@@ -244,25 +246,25 @@ It is also possible to limit the number of splits:
.. ipython:: python
- s2.str.split('_', expand=True, n=1)
+ s2.str.split("_", expand=True, n=1)
``rsplit`` is similar to ``split`` except it works in the reverse direction,
i.e., from the end of the string to the beginning of the string:
.. ipython:: python
- s2.str.rsplit('_', expand=True, n=1)
+ s2.str.rsplit("_", expand=True, n=1)
``replace`` by default replaces `regular expressions
<https://docs.python.org/3/library/re.html>`__:
.. ipython:: python
- s3 = pd.Series(['A', 'B', 'C', 'Aaba', 'Baca',
- '', np.nan, 'CABA', 'dog', 'cat'],
- dtype="string")
+ s3 = pd.Series(
+ ["A", "B", "C", "Aaba", "Baca", "", np.nan, "CABA", "dog", "cat"], dtype="string"
+ )
s3
- s3.str.replace('^.a|dog', 'XX-XX ', case=False)
+ s3.str.replace("^.a|dog", "XX-XX ", case=False)
Some caution must be taken to keep regular expressions in mind! For example, the
following code will cause trouble because of the regular expression meaning of
@@ -271,16 +273,16 @@ following code will cause trouble because of the regular expression meaning of
.. ipython:: python
# Consider the following badly formatted financial data
- dollars = pd.Series(['12', '-$10', '$10,000'], dtype="string")
+ dollars = pd.Series(["12", "-$10", "$10,000"], dtype="string")
# This does what you'd naively expect:
- dollars.str.replace('$', '')
+ dollars.str.replace("$", "")
# But this doesn't:
- dollars.str.replace('-$', '-')
+ dollars.str.replace("-$", "-")
# We need to escape the special character (for >1 len patterns)
- dollars.str.replace(r'-\$', '-')
+ dollars.str.replace(r"-\$", "-")
If you do want literal replacement of a string (equivalent to
:meth:`str.replace`), you can set the optional ``regex`` parameter to
@@ -290,8 +292,8 @@ and ``repl`` must be strings:
.. ipython:: python
# These lines are equivalent
- dollars.str.replace(r'-\$', '-')
- dollars.str.replace('-$', '-', regex=False)
+ dollars.str.replace(r"-\$", "-")
+ dollars.str.replace("-$", "-", regex=False)
The ``replace`` method can also take a callable as replacement. It is called
on every ``pat`` using :func:`re.sub`. The callable should expect one
@@ -300,22 +302,24 @@ positional argument (a regex object) and return a string.
.. ipython:: python
# Reverse every lowercase alphabetic word
- pat = r'[a-z]+'
+ pat = r"[a-z]+"
+
def repl(m):
return m.group(0)[::-1]
- pd.Series(['foo 123', 'bar baz', np.nan],
- dtype="string").str.replace(pat, repl)
+
+ pd.Series(["foo 123", "bar baz", np.nan], dtype="string").str.replace(pat, repl)
# Using regex groups
pat = r"(?P<one>\w+) (?P<two>\w+) (?P<three>\w+)"
+
def repl(m):
- return m.group('two').swapcase()
+ return m.group("two").swapcase()
- pd.Series(['Foo Bar Baz', np.nan],
- dtype="string").str.replace(pat, repl)
+
+ pd.Series(["Foo Bar Baz", np.nan], dtype="string").str.replace(pat, repl)
The ``replace`` method also accepts a compiled regular expression object
from :func:`re.compile` as a pattern. All flags should be included in the
@@ -324,8 +328,9 @@ compiled regular expression object.
.. ipython:: python
import re
- regex_pat = re.compile(r'^.a|dog', flags=re.IGNORECASE)
- s3.str.replace(regex_pat, 'XX-XX ')
+
+ regex_pat = re.compile(r"^.a|dog", flags=re.IGNORECASE)
+ s3.str.replace(regex_pat, "XX-XX ")
Including a ``flags`` argument when calling ``replace`` with a compiled
regular expression object will raise a ``ValueError``.
@@ -352,8 +357,8 @@ The content of a ``Series`` (or ``Index``) can be concatenated:
.. ipython:: python
- s = pd.Series(['a', 'b', 'c', 'd'], dtype="string")
- s.str.cat(sep=',')
+ s = pd.Series(["a", "b", "c", "d"], dtype="string")
+ s.str.cat(sep=",")
If not specified, the keyword ``sep`` for the separator defaults to the empty string, ``sep=''``:
@@ -365,9 +370,9 @@ By default, missing values are ignored. Using ``na_rep``, they can be given a re
.. ipython:: python
- t = pd.Series(['a', 'b', np.nan, 'd'], dtype="string")
- t.str.cat(sep=',')
- t.str.cat(sep=',', na_rep='-')
+ t = pd.Series(["a", "b", np.nan, "d"], dtype="string")
+ t.str.cat(sep=",")
+ t.str.cat(sep=",", na_rep="-")
Concatenating a Series and something list-like into a Series
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -376,14 +381,14 @@ The first argument to :meth:`~Series.str.cat` can be a list-like object, provide
.. ipython:: python
- s.str.cat(['A', 'B', 'C', 'D'])
+ s.str.cat(["A", "B", "C", "D"])
Missing values on either side will result in missing values in the result as well, *unless* ``na_rep`` is specified:
.. ipython:: python
s.str.cat(t)
- s.str.cat(t, na_rep='-')
+ s.str.cat(t, na_rep="-")
Concatenating a Series and something array-like into a Series
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -395,7 +400,7 @@ The parameter ``others`` can also be two-dimensional. In this case, the number o
d = pd.concat([t, s], axis=1)
s
d
- s.str.cat(d, na_rep='-')
+ s.str.cat(d, na_rep="-")
Concatenating a Series and an indexed object into a Series, with alignment
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -406,12 +411,11 @@ the ``join``-keyword.
.. ipython:: python
:okwarning:
- u = pd.Series(['b', 'd', 'a', 'c'], index=[1, 3, 0, 2],
- dtype="string")
+ u = pd.Series(["b", "d", "a", "c"], index=[1, 3, 0, 2], dtype="string")
s
u
s.str.cat(u)
- s.str.cat(u, join='left')
+ s.str.cat(u, join="left")
.. warning::
@@ -423,12 +427,11 @@ In particular, alignment also means that the different lengths do not need to co
.. ipython:: python
- v = pd.Series(['z', 'a', 'b', 'd', 'e'], index=[-1, 0, 1, 3, 4],
- dtype="string")
+ v = pd.Series(["z", "a", "b", "d", "e"], index=[-1, 0, 1, 3, 4], dtype="string")
s
v
- s.str.cat(v, join='left', na_rep='-')
- s.str.cat(v, join='outer', na_rep='-')
+ s.str.cat(v, join="left", na_rep="-")
+ s.str.cat(v, join="outer", na_rep="-")
The same alignment can be used when ``others`` is a ``DataFrame``:
@@ -437,7 +440,7 @@ The same alignment can be used when ``others`` is a ``DataFrame``:
f = d.loc[[3, 2, 1, 0], :]
s
f
- s.str.cat(f, join='left', na_rep='-')
+ s.str.cat(f, join="left", na_rep="-")
Concatenating a Series and many objects into a Series
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -449,7 +452,7 @@ can be combined in a list-like container (including iterators, ``dict``-views, e
s
u
- s.str.cat([u, u.to_numpy()], join='left')
+ s.str.cat([u, u.to_numpy()], join="left")
All elements without an index (e.g. ``np.ndarray``) within the passed list-like must match in length to the calling ``Series`` (or ``Index``),
but ``Series`` and ``Index`` may have arbitrary length (as long as alignment is not disabled with ``join=None``):
@@ -457,7 +460,7 @@ but ``Series`` and ``Index`` may have arbitrary length (as long as alignment is
.. ipython:: python
v
- s.str.cat([v, u, u.to_numpy()], join='outer', na_rep='-')
+ s.str.cat([v, u, u.to_numpy()], join="outer", na_rep="-")
If using ``join='right'`` on a list-like of ``others`` that contains different indexes,
the union of these indexes will be used as the basis for the final concatenation:
@@ -466,7 +469,7 @@ the union of these indexes will be used as the basis for the final concatenation
u.loc[[3]]
v.loc[[-1, 0]]
- s.str.cat([u.loc[[3]], v.loc[[-1, 0]]], join='right', na_rep='-')
+ s.str.cat([u.loc[[3]], v.loc[[-1, 0]]], join="right", na_rep="-")
Indexing with ``.str``
----------------------
@@ -479,9 +482,9 @@ of the string, the result will be a ``NaN``.
.. ipython:: python
- s = pd.Series(['A', 'B', 'C', 'Aaba', 'Baca', np.nan,
- 'CABA', 'dog', 'cat'],
- dtype="string")
+ s = pd.Series(
+ ["A", "B", "C", "Aaba", "Baca", np.nan, "CABA", "dog", "cat"], dtype="string"
+ )
s.str[0]
s.str[1]
@@ -512,8 +515,7 @@ DataFrame with one column per group.
.. ipython:: python
- pd.Series(['a1', 'b2', 'c3'],
- dtype="string").str.extract(r'([ab])(\d)', expand=False)
+ pd.Series(["a1", "b2", "c3"], dtype="string").str.extract(r"([ab])(\d)", expand=False)
Elements that do not match return a row filled with ``NaN``. Thus, a
Series of messy strings can be "converted" into a like-indexed Series
@@ -526,16 +528,15 @@ Named groups like
.. ipython:: python
- pd.Series(['a1', 'b2', 'c3'],
- dtype="string").str.extract(r'(?P<letter>[ab])(?P<digit>\d)',
- expand=False)
+ pd.Series(["a1", "b2", "c3"], dtype="string").str.extract(
+ r"(?P<letter>[ab])(?P<digit>\d)", expand=False
+ )
and optional groups like
.. ipython:: python
- pd.Series(['a1', 'b2', '3'],
- dtype="string").str.extract(r'([ab])?(\d)', expand=False)
+ pd.Series(["a1", "b2", "3"], dtype="string").str.extract(r"([ab])?(\d)", expand=False)
can also be used. Note that any capture group names in the regular
expression will be used for column names; otherwise capture group
@@ -546,23 +547,20 @@ with one column if ``expand=True``.
.. ipython:: python
- pd.Series(['a1', 'b2', 'c3'],
- dtype="string").str.extract(r'[ab](\d)', expand=True)
+ pd.Series(["a1", "b2", "c3"], dtype="string").str.extract(r"[ab](\d)", expand=True)
It returns a Series if ``expand=False``.
.. ipython:: python
- pd.Series(['a1', 'b2', 'c3'],
- dtype="string").str.extract(r'[ab](\d)', expand=False)
+ pd.Series(["a1", "b2", "c3"], dtype="string").str.extract(r"[ab](\d)", expand=False)
Calling on an ``Index`` with a regex with exactly one capture group
returns a ``DataFrame`` with one column if ``expand=True``.
.. ipython:: python
- s = pd.Series(["a1", "b2", "c3"], ["A11", "B22", "C33"],
- dtype="string")
+ s = pd.Series(["a1", "b2", "c3"], ["A11", "B22", "C33"], dtype="string")
s
s.index.str.extract("(?P<letter>[a-zA-Z])", expand=True)
@@ -607,10 +605,9 @@ Unlike ``extract`` (which returns only the first match),
.. ipython:: python
- s = pd.Series(["a1a2", "b1", "c1"], index=["A", "B", "C"],
- dtype="string")
+ s = pd.Series(["a1a2", "b1", "c1"], index=["A", "B", "C"], dtype="string")
s
- two_groups = '(?P<letter>[a-z])(?P<digit>[0-9])'
+ two_groups = "(?P<letter>[a-z])(?P<digit>[0-9])"
s.str.extract(two_groups, expand=True)
the ``extractall`` method returns every match. The result of
@@ -626,7 +623,7 @@ When each subject string in the Series has exactly one match,
.. ipython:: python
- s = pd.Series(['a3', 'b3', 'c2'], dtype="string")
+ s = pd.Series(["a3", "b3", "c2"], dtype="string")
s
then ``extractall(pat).xs(0, level='match')`` gives the same result as
@@ -657,23 +654,20 @@ You can check whether elements contain a pattern:
.. ipython:: python
- pattern = r'[0-9][a-z]'
- pd.Series(['1', '2', '3a', '3b', '03c', '4dx'],
- dtype="string").str.contains(pattern)
+ pattern = r"[0-9][a-z]"
+ pd.Series(["1", "2", "3a", "3b", "03c", "4dx"], dtype="string").str.contains(pattern)
Or whether elements match a pattern:
.. ipython:: python
- pd.Series(['1', '2', '3a', '3b', '03c', '4dx'],
- dtype="string").str.match(pattern)
+ pd.Series(["1", "2", "3a", "3b", "03c", "4dx"], dtype="string").str.match(pattern)
.. versionadded:: 1.1.0
.. ipython:: python
- pd.Series(['1', '2', '3a', '3b', '03c', '4dx'],
- dtype="string").str.fullmatch(pattern)
+ pd.Series(["1", "2", "3a", "3b", "03c", "4dx"], dtype="string").str.fullmatch(pattern)
.. note::
@@ -695,9 +689,10 @@ True or False:
.. ipython:: python
- s4 = pd.Series(['A', 'B', 'C', 'Aaba', 'Baca', np.nan, 'CABA', 'dog', 'cat'],
- dtype="string")
- s4.str.contains('A', na=False)
+ s4 = pd.Series(
+ ["A", "B", "C", "Aaba", "Baca", np.nan, "CABA", "dog", "cat"], dtype="string"
+ )
+ s4.str.contains("A", na=False)
.. _text.indicator:
@@ -709,15 +704,15 @@ For example if they are separated by a ``'|'``:
.. ipython:: python
- s = pd.Series(['a', 'a|b', np.nan, 'a|c'], dtype="string")
- s.str.get_dummies(sep='|')
+ s = pd.Series(["a", "a|b", np.nan, "a|c"], dtype="string")
+ s.str.get_dummies(sep="|")
String ``Index`` also supports ``get_dummies`` which returns a ``MultiIndex``.
.. ipython:: python
- idx = pd.Index(['a', 'a|b', np.nan, 'a|c'])
- idx.str.get_dummies(sep='|')
+ idx = pd.Index(["a", "a|b", np.nan, "a|c"])
+ idx.str.get_dummies(sep="|")
See also :func:`~pandas.get_dummies`.
| Actually, contrary to https://github.com/pandas-dev/pandas/pull/36700#issuecomment-700957831 there is quite a lot more clean up to do (so maybe could make for some good first issues @simonjayhawkins), it's just that other docs contain blocks that blacken-docs can't parse (e.g., raw output or ipython code containing magic commands). A workaround is to comment them out, parse the doc, and then uncomment (the commented blocks would then have to be edited manually).
Is it possible to render blocks like this as pure code in rst which are allowed to raise?
```python
.. code-block:: python
>>> pd.DataFrame(np.random.randn(10, 2)).to_hdf('test_fixed.h5', 'df')
>>> pd.read_hdf('test_fixed.h5', 'df', where='index>5')
TypeError: cannot pass a where specification when reading a fixed format.
this store must be selected in its entirety
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/36734 | 2020-09-30T01:01:17Z | 2020-10-01T01:14:22Z | 2020-10-01T01:14:21Z | 2020-10-01T12:13:43Z |
CI: disable ARM build | diff --git a/.travis.yml b/.travis.yml
index 81cd461dd2c87..2ef8e0e03aaf8 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -46,16 +46,16 @@ matrix:
- env:
- JOB="3.7" ENV_FILE="ci/deps/travis-37.yaml" PATTERN="(not slow and not network and not clipboard)"
- - arch: arm64
- env:
- - JOB="3.7, arm64" PYTEST_WORKERS=1 ENV_FILE="ci/deps/travis-37-arm64.yaml" PATTERN="(not slow and not network and not clipboard and not arm_slow)"
-
- env:
- JOB="3.7, locale" ENV_FILE="ci/deps/travis-37-locale.yaml" PATTERN="((not slow and not network and not clipboard) or (single and db))" LOCALE_OVERRIDE="zh_CN.UTF-8" SQL="1"
services:
- mysql
- postgresql
+ - arch: arm64
+ env:
+ - JOB="3.7, arm64" PYTEST_WORKERS=1 ENV_FILE="ci/deps/travis-37-arm64.yaml" PATTERN="(not slow and not network and not clipboard and not arm_slow)"
+
- env:
# Enabling Deprecations when running tests
# PANDAS_TESTING_MODE="deprecate" causes DeprecationWarning messages to be displayed in the logs
@@ -65,6 +65,12 @@ matrix:
- mysql
- postgresql
+ allow_failures:
+ # Moved to allowed_failures 2020-09-29 due to timeouts https://github.com/pandas-dev/pandas/issues/36719
+ - arch: arm64
+ env:
+ - JOB="3.7, arm64" PYTEST_WORKERS=1 ENV_FILE="ci/deps/travis-37-arm64.yaml" PATTERN="(not slow and not network and not clipboard and not arm_slow)"
+
before_install:
- echo "before_install"
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
xref #36719 | https://api.github.com/repos/pandas-dev/pandas/pulls/36733 | 2020-09-30T00:55:39Z | 2020-09-30T19:55:14Z | 2020-09-30T19:55:14Z | 2020-09-30T20:59:15Z |
EA: tighten TimedeltaArray._from_sequence signature | diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py
index 6ca57e7872910..c97c7da375fd4 100644
--- a/pandas/core/arrays/timedeltas.py
+++ b/pandas/core/arrays/timedeltas.py
@@ -214,6 +214,19 @@ def _simple_new(
@classmethod
def _from_sequence(
+ cls, data, dtype=TD64NS_DTYPE, copy: bool = False
+ ) -> "TimedeltaArray":
+ if dtype:
+ _validate_td64_dtype(dtype)
+
+ data, inferred_freq = sequence_to_td64ns(data, copy=copy, unit=None)
+ freq, _ = dtl.validate_inferred_freq(None, inferred_freq, False)
+
+ result = cls._simple_new(data, freq=freq)
+ return result
+
+ @classmethod
+ def _from_sequence_not_strict(
cls,
data,
dtype=TD64NS_DTYPE,
diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py
index 854c4e33eca01..858387f2e1600 100644
--- a/pandas/core/indexes/timedeltas.py
+++ b/pandas/core/indexes/timedeltas.py
@@ -153,7 +153,7 @@ def __new__(
# - Cases checked above all return/raise before reaching here - #
- tdarr = TimedeltaArray._from_sequence(
+ tdarr = TimedeltaArray._from_sequence_not_strict(
data, freq=freq, unit=unit, dtype=dtype, copy=copy
)
return cls._simple_new(tdarr, name=name)
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
xref #36718 | https://api.github.com/repos/pandas-dev/pandas/pulls/36731 | 2020-09-29T22:16:24Z | 2020-10-02T23:14:04Z | 2020-10-02T23:14:03Z | 2020-10-12T08:50:21Z |
[BUG]: Rolling selected too large windows with PeriodIndex | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index 25fac48397c68..77cf6d61ff7e6 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -407,6 +407,7 @@ Groupby/resample/rolling
- Bug in :meth:`DataFrameGroupBy.apply` raising error with ``np.nan`` group(s) when ``dropna=False`` (:issue:`35889`)
- Bug in :meth:`Rolling.sum()` returned wrong values when dtypes where mixed between float and integer and axis was equal to one (:issue:`20649`, :issue:`35596`)
- Bug in :meth:`Rolling.count` returned ``np.nan`` with :class:`pandas.api.indexers.FixedForwardWindowIndexer` as window, ``min_periods=0`` and only missing values in window (:issue:`35579`)
+- Bug where :class:`pandas.core.window.Rolling` produces incorrect window sizes when using a ``PeriodIndex`` (:issue:`34225`)
Reshaping
^^^^^^^^^
diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py
index f207ea4cd67d4..39f1839ba559d 100644
--- a/pandas/core/window/rolling.py
+++ b/pandas/core/window/rolling.py
@@ -1932,7 +1932,6 @@ def validate(self):
):
self._validate_monotonic()
- freq = self._validate_freq()
# we don't allow center
if self.center:
@@ -1943,7 +1942,7 @@ def validate(self):
# this will raise ValueError on non-fixed freqs
self.win_freq = self.window
- self.window = freq.nanos
+ self.window = self._determine_window_length()
self.win_type = "freq"
# min_periods must be an integer
@@ -1963,6 +1962,16 @@ def validate(self):
"closed only implemented for datetimelike and offset based windows"
)
+ def _determine_window_length(self) -> Union[int, float]:
+ """
+ Calculate freq for PeriodIndexes based on Index freq. Can not use
+ nanos, because asi8 of PeriodIndex is not in nanos
+ """
+ freq = self._validate_freq()
+ if isinstance(self._on, ABCPeriodIndex):
+ return freq.nanos / (self._on.freq.nanos / self._on.freq.n)
+ return freq.nanos
+
def _validate_monotonic(self):
"""
Validate monotonic (increasing or decreasing).
diff --git a/pandas/tests/window/test_rolling.py b/pandas/tests/window/test_rolling.py
index 5ed5e99db8ab4..eaee276c7a388 100644
--- a/pandas/tests/window/test_rolling.py
+++ b/pandas/tests/window/test_rolling.py
@@ -837,3 +837,34 @@ def test_rolling_on_df_transposed():
result = df.T.rolling(min_periods=1, window=2).sum().T
tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.parametrize(
+ ("index", "window"),
+ [
+ (
+ pd.period_range(start="2020-01-01 08:00", end="2020-01-01 08:08", freq="T"),
+ "2T",
+ ),
+ (
+ pd.period_range(
+ start="2020-01-01 08:00", end="2020-01-01 12:00", freq="30T"
+ ),
+ "1h",
+ ),
+ ],
+)
+@pytest.mark.parametrize(
+ ("func", "values"),
+ [
+ ("min", [np.nan, 0, 0, 1, 2, 3, 4, 5, 6]),
+ ("max", [np.nan, 0, 1, 2, 3, 4, 5, 6, 7]),
+ ("sum", [np.nan, 0, 1, 3, 5, 7, 9, 11, 13]),
+ ],
+)
+def test_rolling_period_index(index, window, func, values):
+ # GH: 34225
+ ds = pd.Series([0, 1, 2, 3, 4, 5, 6, 7, 8], index=index)
+ result = getattr(ds.rolling(window, closed="left"), func)()
+ expected = pd.Series(values, index=index)
+ tm.assert_series_equal(result, expected)
| - [x] closes #34225
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
``asi8`` of PeriodIndex is not in nanoseconds, so we can not set ``self.window=freq.nanos`` | https://api.github.com/repos/pandas-dev/pandas/pulls/36730 | 2020-09-29T22:13:38Z | 2020-10-03T00:59:34Z | 2020-10-03T00:59:34Z | 2023-04-27T19:52:09Z |
PERF: using murmur hash for float64 khash-tables | diff --git a/asv_bench/benchmarks/hash_functions.py b/asv_bench/benchmarks/hash_functions.py
new file mode 100644
index 0000000000000..17bf434acf38a
--- /dev/null
+++ b/asv_bench/benchmarks/hash_functions.py
@@ -0,0 +1,164 @@
+import numpy as np
+
+import pandas as pd
+
+
+class IsinAlmostFullWithRandomInt:
+ params = [
+ [np.float64, np.int64, np.uint64, np.object],
+ range(10, 21),
+ ]
+ param_names = ["dtype", "exponent"]
+
+ def setup(self, dtype, exponent):
+ M = 3 * 2 ** (exponent - 2)
+ # 0.77-the maximal share of occupied buckets
+ np.random.seed(42)
+ self.s = pd.Series(np.random.randint(0, M, M)).astype(dtype)
+ self.values = np.random.randint(0, M, M).astype(dtype)
+ self.values_outside = self.values + M
+
+ def time_isin(self, dtype, exponent):
+ self.s.isin(self.values)
+
+ def time_isin_outside(self, dtype, exponent):
+ self.s.isin(self.values_outside)
+
+
+class IsinWithRandomFloat:
+ params = [
+ [np.float64, np.object],
+ [
+ 1_300,
+ 2_000,
+ 7_000,
+ 8_000,
+ 70_000,
+ 80_000,
+ 750_000,
+ 900_000,
+ ],
+ ]
+ param_names = ["dtype", "M"]
+
+ def setup(self, dtype, M):
+ np.random.seed(42)
+ self.values = np.random.rand(M)
+ self.s = pd.Series(self.values).astype(dtype)
+ np.random.shuffle(self.values)
+ self.values_outside = self.values + 0.1
+
+ def time_isin(self, dtype, M):
+ self.s.isin(self.values)
+
+ def time_isin_outside(self, dtype, M):
+ self.s.isin(self.values_outside)
+
+
+class IsinWithArangeSorted:
+ params = [
+ [np.float64, np.int64, np.uint64, np.object],
+ [
+ 1_000,
+ 2_000,
+ 8_000,
+ 100_000,
+ 1_000_000,
+ ],
+ ]
+ param_names = ["dtype", "M"]
+
+ def setup(self, dtype, M):
+ self.s = pd.Series(np.arange(M)).astype(dtype)
+ self.values = np.arange(M).astype(dtype)
+
+ def time_isin(self, dtype, M):
+ self.s.isin(self.values)
+
+
+class IsinWithArange:
+ params = [
+ [np.float64, np.int64, np.uint64, np.object],
+ [
+ 1_000,
+ 2_000,
+ 8_000,
+ ],
+ [-2, 0, 2],
+ ]
+ param_names = ["dtype", "M", "offset_factor"]
+
+ def setup(self, dtype, M, offset_factor):
+ offset = int(M * offset_factor)
+ np.random.seed(42)
+ tmp = pd.Series(np.random.randint(offset, M + offset, 10 ** 6))
+ self.s = tmp.astype(dtype)
+ self.values = np.arange(M).astype(dtype)
+
+ def time_isin(self, dtype, M, offset_factor):
+ self.s.isin(self.values)
+
+
+class Float64GroupIndex:
+ # GH28303
+ def setup(self):
+ self.df = pd.date_range(
+ start="1/1/2018", end="1/2/2018", periods=1e6
+ ).to_frame()
+ self.group_index = np.round(self.df.index.astype(int) / 1e9)
+
+ def time_groupby(self):
+ self.df.groupby(self.group_index).last()
+
+
+class UniqueAndFactorizeArange:
+ params = range(4, 16)
+ param_names = ["exponent"]
+
+ def setup(self, exponent):
+ a = np.arange(10 ** 4, dtype="float64")
+ self.a2 = (a + 10 ** exponent).repeat(100)
+
+ def time_factorize(self, exponent):
+ pd.factorize(self.a2)
+
+ def time_unique(self, exponent):
+ pd.unique(self.a2)
+
+
+class NumericSeriesIndexing:
+
+ params = [
+ (pd.Int64Index, pd.UInt64Index, pd.Float64Index),
+ (10 ** 4, 10 ** 5, 5 * 10 ** 5, 10 ** 6, 5 * 10 ** 6),
+ ]
+ param_names = ["index_dtype", "N"]
+
+ def setup(self, index, N):
+ vals = np.array(list(range(55)) + [54] + list(range(55, N - 1)))
+ indices = index(vals)
+ self.data = pd.Series(np.arange(N), index=indices)
+
+ def time_loc_slice(self, index, N):
+ # trigger building of mapping
+ self.data.loc[:800]
+
+
+class NumericSeriesIndexingShuffled:
+
+ params = [
+ (pd.Int64Index, pd.UInt64Index, pd.Float64Index),
+ (10 ** 4, 10 ** 5, 5 * 10 ** 5, 10 ** 6, 5 * 10 ** 6),
+ ]
+ param_names = ["index_dtype", "N"]
+
+ def setup(self, index, N):
+ vals = np.array(list(range(55)) + [54] + list(range(55, N - 1)))
+ np.random.seed(42)
+ np.random.shuffle(vals)
+ indices = index(vals)
+ self.data = pd.Series(np.arange(N), index=indices)
+
+ def time_loc_slice(self, index, N):
+ # trigger building of mapping
+ self.data.loc[:800]
diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index 09cb024cbd95c..54ca602ff3c86 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -376,6 +376,7 @@ Performance improvements
- Reduced peak memory usage in :meth:`DataFrame.to_pickle` when using ``protocol=5`` in python 3.8+ (:issue:`34244`)
- faster ``dir`` calls when many index labels, e.g. ``dir(ser)`` (:issue:`37450`)
- Performance improvement in :class:`ExpandingGroupby` (:issue:`37064`)
+- Performance improvement in :meth:`pd.DataFrame.groupby` for ``float`` ``dtype`` (:issue:`28303`), changes of the underlying hash-function can lead to changes in float based indexes sort ordering for ties (e.g. :meth:`pd.Index.value_counts`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/_libs/src/klib/khash.h b/pandas/_libs/src/klib/khash.h
index 916838d1e9584..61a4e80ea8cbc 100644
--- a/pandas/_libs/src/klib/khash.h
+++ b/pandas/_libs/src/klib/khash.h
@@ -143,10 +143,86 @@ typedef khint_t khiter_t;
#define __ac_set_isboth_false(flag, i) __ac_set_isempty_false(flag, i)
#define __ac_set_isdel_true(flag, i) ((void)0)
+
+// specializations of https://github.com/aappleby/smhasher/blob/master/src/MurmurHash2.cpp
+khint32_t PANDAS_INLINE murmur2_32to32(khint32_t k){
+ const khint32_t SEED = 0xc70f6907UL;
+ // 'm' and 'r' are mixing constants generated offline.
+ // They're not really 'magic', they just happen to work well.
+ const khint32_t M_32 = 0x5bd1e995;
+ const int R_32 = 24;
+
+ // Initialize the hash to a 'random' value
+ khint32_t h = SEED ^ 4;
+
+ //handle 4 bytes:
+ k *= M_32;
+ k ^= k >> R_32;
+ k *= M_32;
+
+ h *= M_32;
+ h ^= k;
+
+ // Do a few final mixes of the hash to ensure the "last few
+ // bytes" are well-incorporated. (Really needed here?)
+ h ^= h >> 13;
+ h *= M_32;
+ h ^= h >> 15;
+ return h;
+}
+
+// it is possible to have a special x64-version, which would need less operations, but
+// using 32bit version always has also some benifits:
+// - one code for 32bit and 64bit builds
+// - the same case for 32bit and 64bit builds
+// - no performance difference could be measured compared to a possible x64-version
+
+khint32_t PANDAS_INLINE murmur2_32_32to32(khint32_t k1, khint32_t k2){
+ const khint32_t SEED = 0xc70f6907UL;
+ // 'm' and 'r' are mixing constants generated offline.
+ // They're not really 'magic', they just happen to work well.
+ const khint32_t M_32 = 0x5bd1e995;
+ const int R_32 = 24;
+
+ // Initialize the hash to a 'random' value
+ khint32_t h = SEED ^ 4;
+
+ //handle first 4 bytes:
+ k1 *= M_32;
+ k1 ^= k1 >> R_32;
+ k1 *= M_32;
+
+ h *= M_32;
+ h ^= k1;
+
+ //handle second 4 bytes:
+ k2 *= M_32;
+ k2 ^= k2 >> R_32;
+ k2 *= M_32;
+
+ h *= M_32;
+ h ^= k2;
+
+ // Do a few final mixes of the hash to ensure the "last few
+ // bytes" are well-incorporated.
+ h ^= h >> 13;
+ h *= M_32;
+ h ^= h >> 15;
+ return h;
+}
+
+khint32_t PANDAS_INLINE murmur2_64to32(khint64_t k){
+ khint32_t k1 = (khint32_t)k;
+ khint32_t k2 = (khint32_t)(k >> 32);
+
+ return murmur2_32_32to32(k1, k2);
+}
+
+
#ifdef KHASH_LINEAR
#define __ac_inc(k, m) 1
#else
-#define __ac_inc(k, m) (((k)>>3 ^ (k)<<3) | 1) & (m)
+#define __ac_inc(k, m) (murmur2_32to32(k) | 1) & (m)
#endif
#define __ac_fsize(m) ((m) < 32? 1 : (m)>>5)
diff --git a/pandas/_libs/src/klib/khash_python.h b/pandas/_libs/src/klib/khash_python.h
index 2b46d30c3adb6..aebc229abddd2 100644
--- a/pandas/_libs/src/klib/khash_python.h
+++ b/pandas/_libs/src/klib/khash_python.h
@@ -13,25 +13,31 @@
// is 64 bits the truncation causes collission issues. Given all that, we use our own
// simple hash, viewing the double bytes as an int64 and using khash's default
// hash for 64 bit integers.
-// GH 13436
+// GH 13436 showed that _Py_HashDouble doesn't work well with khash
+// GH 28303 showed, that the simple xoring-version isn't good enough
+// See GH 36729 for evaluation of the currently used murmur2-hash version
+
khint64_t PANDAS_INLINE asint64(double key) {
- khint64_t val;
- memcpy(&val, &key, sizeof(double));
- return val;
+ khint64_t val;
+ memcpy(&val, &key, sizeof(double));
+ return val;
}
-// correct for all inputs but not -0.0 and NaNs
-#define kh_float64_hash_func_0_NAN(key) (khint32_t)((asint64(key))>>33^(asint64(key))^(asint64(key))<<11)
-
-// correct for all inputs but not NaNs
-#define kh_float64_hash_func_NAN(key) ((key) == 0.0 ? \
- kh_float64_hash_func_0_NAN(0.0) : \
- kh_float64_hash_func_0_NAN(key))
+#define ZERO_HASH 0
+#define NAN_HASH 0
-// correct for all
-#define kh_float64_hash_func(key) ((key) != (key) ? \
- kh_float64_hash_func_NAN(Py_NAN) : \
- kh_float64_hash_func_NAN(key))
+khint32_t PANDAS_INLINE kh_float64_hash_func(double val){
+ // 0.0 and -0.0 should have the same hash:
+ if (val == 0.0){
+ return ZERO_HASH;
+ }
+ // all nans should have the same hash:
+ if ( val!=val ){
+ return NAN_HASH;
+ }
+ khint64_t as_int = asint64(val);
+ return murmur2_64to32(as_int);
+}
#define kh_float64_hash_equal(a, b) ((a) == (b) || ((b) != (b) && (a) != (a)))
diff --git a/pandas/core/base.py b/pandas/core/base.py
index 4760b92ad5fec..b3366cca37617 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -982,9 +982,9 @@ def value_counts(
>>> index = pd.Index([3, 1, 2, 3, 4, np.nan])
>>> index.value_counts()
3.0 2
- 1.0 1
2.0 1
4.0 1
+ 1.0 1
dtype: int64
With `normalize` set to `True`, returns the relative frequency by
@@ -993,9 +993,9 @@ def value_counts(
>>> s = pd.Series([3, 1, 2, 3, 4, np.nan])
>>> s.value_counts(normalize=True)
3.0 0.4
- 1.0 0.2
2.0 0.2
4.0 0.2
+ 1.0 0.2
dtype: float64
**bins**
@@ -1017,10 +1017,10 @@ def value_counts(
>>> s.value_counts(dropna=False)
3.0 2
- 1.0 1
2.0 1
- 4.0 1
NaN 1
+ 4.0 1
+ 1.0 1
dtype: int64
"""
result = value_counts(
diff --git a/pandas/tests/base/test_value_counts.py b/pandas/tests/base/test_value_counts.py
index 1a6cba1ace35f..e9713e38f9874 100644
--- a/pandas/tests/base/test_value_counts.py
+++ b/pandas/tests/base/test_value_counts.py
@@ -232,18 +232,14 @@ def test_value_counts_datetime64(index_or_series):
# with NaT
s = df["dt"].copy()
- s = klass(list(s.values) + [pd.NaT])
+ s = klass(list(s.values) + [pd.NaT] * 4)
result = s.value_counts()
assert result.index.dtype == "datetime64[ns]"
tm.assert_series_equal(result, expected_s)
result = s.value_counts(dropna=False)
- # GH 35922. NaN-like now sorts to the beginning of duplicate counts
- idx = pd.to_datetime(
- ["2010-01-01 00:00:00", "2008-09-09 00:00:00", pd.NaT, "2009-01-01 00:00:00"]
- )
- expected_s = Series([3, 2, 1, 1], index=idx)
+ expected_s = pd.concat([Series([4], index=DatetimeIndex([pd.NaT])), expected_s])
tm.assert_series_equal(result, expected_s)
unique = s.unique()
diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py
index 88286448de900..34b7d0e73e914 100644
--- a/pandas/tests/test_algos.py
+++ b/pandas/tests/test_algos.py
@@ -1173,12 +1173,12 @@ def test_dropna(self):
)
tm.assert_series_equal(
- Series([True, True, False, None]).value_counts(dropna=True),
- Series([2, 1], index=[True, False]),
+ Series([True] * 3 + [False] * 2 + [None] * 5).value_counts(dropna=True),
+ Series([3, 2], index=[True, False]),
)
tm.assert_series_equal(
- Series([True, True, False, None]).value_counts(dropna=False),
- Series([2, 1, 1], index=[True, np.nan, False]),
+ Series([True] * 5 + [False] * 3 + [None] * 2).value_counts(dropna=False),
+ Series([5, 3, 2], index=[True, False, np.nan]),
)
tm.assert_series_equal(
Series([10.3, 5.0, 5.0]).value_counts(dropna=True),
@@ -1194,26 +1194,24 @@ def test_dropna(self):
Series([2, 1], index=[5.0, 10.3]),
)
- # 32-bit linux has a different ordering
- if IS64:
- result = Series([10.3, 5.0, 5.0, None]).value_counts(dropna=False)
- expected = Series([2, 1, 1], index=[5.0, np.nan, 10.3])
- tm.assert_series_equal(result, expected)
+ result = Series([10.3, 10.3, 5.0, 5.0, 5.0, None]).value_counts(dropna=False)
+ expected = Series([3, 2, 1], index=[5.0, 10.3, np.nan])
+ tm.assert_series_equal(result, expected)
def test_value_counts_normalized(self):
# GH12558
- s = Series([1, 2, np.nan, np.nan, np.nan])
+ s = Series([1] * 2 + [2] * 3 + [np.nan] * 5)
dtypes = (np.float64, object, "M8[ns]")
for t in dtypes:
s_typed = s.astype(t)
result = s_typed.value_counts(normalize=True, dropna=False)
expected = Series(
- [0.6, 0.2, 0.2], index=Series([np.nan, 1.0, 2.0], dtype=t)
+ [0.5, 0.3, 0.2], index=Series([np.nan, 2.0, 1.0], dtype=t)
)
tm.assert_series_equal(result, expected)
result = s_typed.value_counts(normalize=True, dropna=True)
- expected = Series([0.5, 0.5], index=Series([1.0, 2.0], dtype=t))
+ expected = Series([0.6, 0.4], index=Series([2.0, 1.0], dtype=t))
tm.assert_series_equal(result, expected)
def test_value_counts_uint64(self):
| - [x] closes #28303
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
The currently used hash-function can lead to many collisions (see #28303 or [this comment](https://github.com/pandas-dev/pandas/pull/36611#issuecomment-699551943)) for series like 0.0, 1.0, 2.0, ... n.
This PR uses a specialization (for a simple double-value) of [murmur2-hash](https://github.com/aappleby/smhasher/blob/61a0530f28277f2e850bfc39600ce61d02b518de/src/MurmurHash2.cpp#L37), which is used in [stdc++](https://github.com/gcc-mirror/gcc/blob/41d6b10e96a1de98e90a7c0378437c3255814b16/libstdc%2B%2B-v3/libsupc%2B%2B/hash_bytes.cc#L25) and [libc++](https://github.com/llvm/llvm-project/blob/1cfde143e82aeb47cffba436ba7b5302d8e14193/libcxx/include/utility#L977) and more or less state of the art.
An alternative would be to use Python's `_Py_HashDouble`, but because it has the property: `hash(1.0)=1`, `hash(2.0)=2` and so on: there is no desirable [avalanche effect ](https://en.wikipedia.org/wiki/Avalanche_effect), which is not an issue for Python's `dict` implementation, but problematic for khash as it uses a different strategy for collision handling. See #13436 as `_Py_HashDouble` was replaced through the simple hash-function used until now. | https://api.github.com/repos/pandas-dev/pandas/pulls/36729 | 2020-09-29T21:24:54Z | 2020-11-14T16:15:26Z | 2020-11-14T16:15:26Z | 2020-11-14T16:16:15Z |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.