id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
19,500
pandas-dev/pandas
pandas/core/dtypes/inference.py
is_nested_list_like
def is_nested_list_like(obj): """ Check if the object is list-like, and that all of its elements are also list-like. .. versionadded:: 0.20.0 Parameters ---------- obj : The object to check Returns ------- is_list_like : bool Whether `obj` has list-like properties. Examples -------- >>> is_nested_list_like([[1, 2, 3]]) True >>> is_nested_list_like([{1, 2, 3}, {1, 2, 3}]) True >>> is_nested_list_like(["foo"]) False >>> is_nested_list_like([]) False >>> is_nested_list_like([[1, 2, 3], 1]) False Notes ----- This won't reliably detect whether a consumable iterator (e. g. a generator) is a nested-list-like without consuming the iterator. To avoid consuming it, we always return False if the outer container doesn't define `__len__`. See Also -------- is_list_like """ return (is_list_like(obj) and hasattr(obj, '__len__') and len(obj) > 0 and all(is_list_like(item) for item in obj))
python
def is_nested_list_like(obj): """ Check if the object is list-like, and that all of its elements are also list-like. .. versionadded:: 0.20.0 Parameters ---------- obj : The object to check Returns ------- is_list_like : bool Whether `obj` has list-like properties. Examples -------- >>> is_nested_list_like([[1, 2, 3]]) True >>> is_nested_list_like([{1, 2, 3}, {1, 2, 3}]) True >>> is_nested_list_like(["foo"]) False >>> is_nested_list_like([]) False >>> is_nested_list_like([[1, 2, 3], 1]) False Notes ----- This won't reliably detect whether a consumable iterator (e. g. a generator) is a nested-list-like without consuming the iterator. To avoid consuming it, we always return False if the outer container doesn't define `__len__`. See Also -------- is_list_like """ return (is_list_like(obj) and hasattr(obj, '__len__') and len(obj) > 0 and all(is_list_like(item) for item in obj))
[ "def", "is_nested_list_like", "(", "obj", ")", ":", "return", "(", "is_list_like", "(", "obj", ")", "and", "hasattr", "(", "obj", ",", "'__len__'", ")", "and", "len", "(", "obj", ")", ">", "0", "and", "all", "(", "is_list_like", "(", "item", ")", "for", "item", "in", "obj", ")", ")" ]
Check if the object is list-like, and that all of its elements are also list-like. .. versionadded:: 0.20.0 Parameters ---------- obj : The object to check Returns ------- is_list_like : bool Whether `obj` has list-like properties. Examples -------- >>> is_nested_list_like([[1, 2, 3]]) True >>> is_nested_list_like([{1, 2, 3}, {1, 2, 3}]) True >>> is_nested_list_like(["foo"]) False >>> is_nested_list_like([]) False >>> is_nested_list_like([[1, 2, 3], 1]) False Notes ----- This won't reliably detect whether a consumable iterator (e. g. a generator) is a nested-list-like without consuming the iterator. To avoid consuming it, we always return False if the outer container doesn't define `__len__`. See Also -------- is_list_like
[ "Check", "if", "the", "object", "is", "list", "-", "like", "and", "that", "all", "of", "its", "elements", "are", "also", "list", "-", "like", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/dtypes/inference.py#L329-L370
19,501
pandas-dev/pandas
pandas/core/dtypes/inference.py
is_dict_like
def is_dict_like(obj): """ Check if the object is dict-like. Parameters ---------- obj : The object to check Returns ------- is_dict_like : bool Whether `obj` has dict-like properties. Examples -------- >>> is_dict_like({1: 2}) True >>> is_dict_like([1, 2, 3]) False >>> is_dict_like(dict) False >>> is_dict_like(dict()) True """ dict_like_attrs = ("__getitem__", "keys", "__contains__") return (all(hasattr(obj, attr) for attr in dict_like_attrs) # [GH 25196] exclude classes and not isinstance(obj, type))
python
def is_dict_like(obj): """ Check if the object is dict-like. Parameters ---------- obj : The object to check Returns ------- is_dict_like : bool Whether `obj` has dict-like properties. Examples -------- >>> is_dict_like({1: 2}) True >>> is_dict_like([1, 2, 3]) False >>> is_dict_like(dict) False >>> is_dict_like(dict()) True """ dict_like_attrs = ("__getitem__", "keys", "__contains__") return (all(hasattr(obj, attr) for attr in dict_like_attrs) # [GH 25196] exclude classes and not isinstance(obj, type))
[ "def", "is_dict_like", "(", "obj", ")", ":", "dict_like_attrs", "=", "(", "\"__getitem__\"", ",", "\"keys\"", ",", "\"__contains__\"", ")", "return", "(", "all", "(", "hasattr", "(", "obj", ",", "attr", ")", "for", "attr", "in", "dict_like_attrs", ")", "# [GH 25196] exclude classes", "and", "not", "isinstance", "(", "obj", ",", "type", ")", ")" ]
Check if the object is dict-like. Parameters ---------- obj : The object to check Returns ------- is_dict_like : bool Whether `obj` has dict-like properties. Examples -------- >>> is_dict_like({1: 2}) True >>> is_dict_like([1, 2, 3]) False >>> is_dict_like(dict) False >>> is_dict_like(dict()) True
[ "Check", "if", "the", "object", "is", "dict", "-", "like", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/dtypes/inference.py#L373-L400
19,502
pandas-dev/pandas
pandas/core/dtypes/inference.py
is_sequence
def is_sequence(obj): """ Check if the object is a sequence of objects. String types are not included as sequences here. Parameters ---------- obj : The object to check Returns ------- is_sequence : bool Whether `obj` is a sequence of objects. Examples -------- >>> l = [1, 2, 3] >>> >>> is_sequence(l) True >>> is_sequence(iter(l)) False """ try: iter(obj) # Can iterate over it. len(obj) # Has a length associated with it. return not isinstance(obj, (str, bytes)) except (TypeError, AttributeError): return False
python
def is_sequence(obj): """ Check if the object is a sequence of objects. String types are not included as sequences here. Parameters ---------- obj : The object to check Returns ------- is_sequence : bool Whether `obj` is a sequence of objects. Examples -------- >>> l = [1, 2, 3] >>> >>> is_sequence(l) True >>> is_sequence(iter(l)) False """ try: iter(obj) # Can iterate over it. len(obj) # Has a length associated with it. return not isinstance(obj, (str, bytes)) except (TypeError, AttributeError): return False
[ "def", "is_sequence", "(", "obj", ")", ":", "try", ":", "iter", "(", "obj", ")", "# Can iterate over it.", "len", "(", "obj", ")", "# Has a length associated with it.", "return", "not", "isinstance", "(", "obj", ",", "(", "str", ",", "bytes", ")", ")", "except", "(", "TypeError", ",", "AttributeError", ")", ":", "return", "False" ]
Check if the object is a sequence of objects. String types are not included as sequences here. Parameters ---------- obj : The object to check Returns ------- is_sequence : bool Whether `obj` is a sequence of objects. Examples -------- >>> l = [1, 2, 3] >>> >>> is_sequence(l) True >>> is_sequence(iter(l)) False
[ "Check", "if", "the", "object", "is", "a", "sequence", "of", "objects", ".", "String", "types", "are", "not", "included", "as", "sequences", "here", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/dtypes/inference.py#L462-L491
19,503
pandas-dev/pandas
pandas/core/indexes/datetimes.py
date_range
def date_range(start=None, end=None, periods=None, freq=None, tz=None, normalize=False, name=None, closed=None, **kwargs): """ Return a fixed frequency DatetimeIndex. Parameters ---------- start : str or datetime-like, optional Left bound for generating dates. end : str or datetime-like, optional Right bound for generating dates. periods : integer, optional Number of periods to generate. freq : str or DateOffset, default 'D' Frequency strings can have multiples, e.g. '5H'. See :ref:`here <timeseries.offset_aliases>` for a list of frequency aliases. tz : str or tzinfo, optional Time zone name for returning localized DatetimeIndex, for example 'Asia/Hong_Kong'. By default, the resulting DatetimeIndex is timezone-naive. normalize : bool, default False Normalize start/end dates to midnight before generating date range. name : str, default None Name of the resulting DatetimeIndex. closed : {None, 'left', 'right'}, optional Make the interval closed with respect to the given frequency to the 'left', 'right', or both sides (None, the default). **kwargs For compatibility. Has no effect on the result. Returns ------- rng : DatetimeIndex See Also -------- DatetimeIndex : An immutable container for datetimes. timedelta_range : Return a fixed frequency TimedeltaIndex. period_range : Return a fixed frequency PeriodIndex. interval_range : Return a fixed frequency IntervalIndex. Notes ----- Of the four parameters ``start``, ``end``, ``periods``, and ``freq``, exactly three must be specified. If ``freq`` is omitted, the resulting ``DatetimeIndex`` will have ``periods`` linearly spaced elements between ``start`` and ``end`` (closed on both sides). To learn more about the frequency strings, please see `this link <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__. Examples -------- **Specifying the values** The next four examples generate the same `DatetimeIndex`, but vary the combination of `start`, `end` and `periods`. Specify `start` and `end`, with the default daily frequency. >>> pd.date_range(start='1/1/2018', end='1/08/2018') DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04', '2018-01-05', '2018-01-06', '2018-01-07', '2018-01-08'], dtype='datetime64[ns]', freq='D') Specify `start` and `periods`, the number of periods (days). >>> pd.date_range(start='1/1/2018', periods=8) DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04', '2018-01-05', '2018-01-06', '2018-01-07', '2018-01-08'], dtype='datetime64[ns]', freq='D') Specify `end` and `periods`, the number of periods (days). >>> pd.date_range(end='1/1/2018', periods=8) DatetimeIndex(['2017-12-25', '2017-12-26', '2017-12-27', '2017-12-28', '2017-12-29', '2017-12-30', '2017-12-31', '2018-01-01'], dtype='datetime64[ns]', freq='D') Specify `start`, `end`, and `periods`; the frequency is generated automatically (linearly spaced). >>> pd.date_range(start='2018-04-24', end='2018-04-27', periods=3) DatetimeIndex(['2018-04-24 00:00:00', '2018-04-25 12:00:00', '2018-04-27 00:00:00'], dtype='datetime64[ns]', freq=None) **Other Parameters** Changed the `freq` (frequency) to ``'M'`` (month end frequency). >>> pd.date_range(start='1/1/2018', periods=5, freq='M') DatetimeIndex(['2018-01-31', '2018-02-28', '2018-03-31', '2018-04-30', '2018-05-31'], dtype='datetime64[ns]', freq='M') Multiples are allowed >>> pd.date_range(start='1/1/2018', periods=5, freq='3M') DatetimeIndex(['2018-01-31', '2018-04-30', '2018-07-31', '2018-10-31', '2019-01-31'], dtype='datetime64[ns]', freq='3M') `freq` can also be specified as an Offset object. >>> pd.date_range(start='1/1/2018', periods=5, freq=pd.offsets.MonthEnd(3)) DatetimeIndex(['2018-01-31', '2018-04-30', '2018-07-31', '2018-10-31', '2019-01-31'], dtype='datetime64[ns]', freq='3M') Specify `tz` to set the timezone. >>> pd.date_range(start='1/1/2018', periods=5, tz='Asia/Tokyo') DatetimeIndex(['2018-01-01 00:00:00+09:00', '2018-01-02 00:00:00+09:00', '2018-01-03 00:00:00+09:00', '2018-01-04 00:00:00+09:00', '2018-01-05 00:00:00+09:00'], dtype='datetime64[ns, Asia/Tokyo]', freq='D') `closed` controls whether to include `start` and `end` that are on the boundary. The default includes boundary points on either end. >>> pd.date_range(start='2017-01-01', end='2017-01-04', closed=None) DatetimeIndex(['2017-01-01', '2017-01-02', '2017-01-03', '2017-01-04'], dtype='datetime64[ns]', freq='D') Use ``closed='left'`` to exclude `end` if it falls on the boundary. >>> pd.date_range(start='2017-01-01', end='2017-01-04', closed='left') DatetimeIndex(['2017-01-01', '2017-01-02', '2017-01-03'], dtype='datetime64[ns]', freq='D') Use ``closed='right'`` to exclude `start` if it falls on the boundary. >>> pd.date_range(start='2017-01-01', end='2017-01-04', closed='right') DatetimeIndex(['2017-01-02', '2017-01-03', '2017-01-04'], dtype='datetime64[ns]', freq='D') """ if freq is None and com._any_none(periods, start, end): freq = 'D' dtarr = DatetimeArray._generate_range( start=start, end=end, periods=periods, freq=freq, tz=tz, normalize=normalize, closed=closed, **kwargs) return DatetimeIndex._simple_new( dtarr, tz=dtarr.tz, freq=dtarr.freq, name=name)
python
def date_range(start=None, end=None, periods=None, freq=None, tz=None, normalize=False, name=None, closed=None, **kwargs): """ Return a fixed frequency DatetimeIndex. Parameters ---------- start : str or datetime-like, optional Left bound for generating dates. end : str or datetime-like, optional Right bound for generating dates. periods : integer, optional Number of periods to generate. freq : str or DateOffset, default 'D' Frequency strings can have multiples, e.g. '5H'. See :ref:`here <timeseries.offset_aliases>` for a list of frequency aliases. tz : str or tzinfo, optional Time zone name for returning localized DatetimeIndex, for example 'Asia/Hong_Kong'. By default, the resulting DatetimeIndex is timezone-naive. normalize : bool, default False Normalize start/end dates to midnight before generating date range. name : str, default None Name of the resulting DatetimeIndex. closed : {None, 'left', 'right'}, optional Make the interval closed with respect to the given frequency to the 'left', 'right', or both sides (None, the default). **kwargs For compatibility. Has no effect on the result. Returns ------- rng : DatetimeIndex See Also -------- DatetimeIndex : An immutable container for datetimes. timedelta_range : Return a fixed frequency TimedeltaIndex. period_range : Return a fixed frequency PeriodIndex. interval_range : Return a fixed frequency IntervalIndex. Notes ----- Of the four parameters ``start``, ``end``, ``periods``, and ``freq``, exactly three must be specified. If ``freq`` is omitted, the resulting ``DatetimeIndex`` will have ``periods`` linearly spaced elements between ``start`` and ``end`` (closed on both sides). To learn more about the frequency strings, please see `this link <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__. Examples -------- **Specifying the values** The next four examples generate the same `DatetimeIndex`, but vary the combination of `start`, `end` and `periods`. Specify `start` and `end`, with the default daily frequency. >>> pd.date_range(start='1/1/2018', end='1/08/2018') DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04', '2018-01-05', '2018-01-06', '2018-01-07', '2018-01-08'], dtype='datetime64[ns]', freq='D') Specify `start` and `periods`, the number of periods (days). >>> pd.date_range(start='1/1/2018', periods=8) DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04', '2018-01-05', '2018-01-06', '2018-01-07', '2018-01-08'], dtype='datetime64[ns]', freq='D') Specify `end` and `periods`, the number of periods (days). >>> pd.date_range(end='1/1/2018', periods=8) DatetimeIndex(['2017-12-25', '2017-12-26', '2017-12-27', '2017-12-28', '2017-12-29', '2017-12-30', '2017-12-31', '2018-01-01'], dtype='datetime64[ns]', freq='D') Specify `start`, `end`, and `periods`; the frequency is generated automatically (linearly spaced). >>> pd.date_range(start='2018-04-24', end='2018-04-27', periods=3) DatetimeIndex(['2018-04-24 00:00:00', '2018-04-25 12:00:00', '2018-04-27 00:00:00'], dtype='datetime64[ns]', freq=None) **Other Parameters** Changed the `freq` (frequency) to ``'M'`` (month end frequency). >>> pd.date_range(start='1/1/2018', periods=5, freq='M') DatetimeIndex(['2018-01-31', '2018-02-28', '2018-03-31', '2018-04-30', '2018-05-31'], dtype='datetime64[ns]', freq='M') Multiples are allowed >>> pd.date_range(start='1/1/2018', periods=5, freq='3M') DatetimeIndex(['2018-01-31', '2018-04-30', '2018-07-31', '2018-10-31', '2019-01-31'], dtype='datetime64[ns]', freq='3M') `freq` can also be specified as an Offset object. >>> pd.date_range(start='1/1/2018', periods=5, freq=pd.offsets.MonthEnd(3)) DatetimeIndex(['2018-01-31', '2018-04-30', '2018-07-31', '2018-10-31', '2019-01-31'], dtype='datetime64[ns]', freq='3M') Specify `tz` to set the timezone. >>> pd.date_range(start='1/1/2018', periods=5, tz='Asia/Tokyo') DatetimeIndex(['2018-01-01 00:00:00+09:00', '2018-01-02 00:00:00+09:00', '2018-01-03 00:00:00+09:00', '2018-01-04 00:00:00+09:00', '2018-01-05 00:00:00+09:00'], dtype='datetime64[ns, Asia/Tokyo]', freq='D') `closed` controls whether to include `start` and `end` that are on the boundary. The default includes boundary points on either end. >>> pd.date_range(start='2017-01-01', end='2017-01-04', closed=None) DatetimeIndex(['2017-01-01', '2017-01-02', '2017-01-03', '2017-01-04'], dtype='datetime64[ns]', freq='D') Use ``closed='left'`` to exclude `end` if it falls on the boundary. >>> pd.date_range(start='2017-01-01', end='2017-01-04', closed='left') DatetimeIndex(['2017-01-01', '2017-01-02', '2017-01-03'], dtype='datetime64[ns]', freq='D') Use ``closed='right'`` to exclude `start` if it falls on the boundary. >>> pd.date_range(start='2017-01-01', end='2017-01-04', closed='right') DatetimeIndex(['2017-01-02', '2017-01-03', '2017-01-04'], dtype='datetime64[ns]', freq='D') """ if freq is None and com._any_none(periods, start, end): freq = 'D' dtarr = DatetimeArray._generate_range( start=start, end=end, periods=periods, freq=freq, tz=tz, normalize=normalize, closed=closed, **kwargs) return DatetimeIndex._simple_new( dtarr, tz=dtarr.tz, freq=dtarr.freq, name=name)
[ "def", "date_range", "(", "start", "=", "None", ",", "end", "=", "None", ",", "periods", "=", "None", ",", "freq", "=", "None", ",", "tz", "=", "None", ",", "normalize", "=", "False", ",", "name", "=", "None", ",", "closed", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "freq", "is", "None", "and", "com", ".", "_any_none", "(", "periods", ",", "start", ",", "end", ")", ":", "freq", "=", "'D'", "dtarr", "=", "DatetimeArray", ".", "_generate_range", "(", "start", "=", "start", ",", "end", "=", "end", ",", "periods", "=", "periods", ",", "freq", "=", "freq", ",", "tz", "=", "tz", ",", "normalize", "=", "normalize", ",", "closed", "=", "closed", ",", "*", "*", "kwargs", ")", "return", "DatetimeIndex", ".", "_simple_new", "(", "dtarr", ",", "tz", "=", "dtarr", ".", "tz", ",", "freq", "=", "dtarr", ".", "freq", ",", "name", "=", "name", ")" ]
Return a fixed frequency DatetimeIndex. Parameters ---------- start : str or datetime-like, optional Left bound for generating dates. end : str or datetime-like, optional Right bound for generating dates. periods : integer, optional Number of periods to generate. freq : str or DateOffset, default 'D' Frequency strings can have multiples, e.g. '5H'. See :ref:`here <timeseries.offset_aliases>` for a list of frequency aliases. tz : str or tzinfo, optional Time zone name for returning localized DatetimeIndex, for example 'Asia/Hong_Kong'. By default, the resulting DatetimeIndex is timezone-naive. normalize : bool, default False Normalize start/end dates to midnight before generating date range. name : str, default None Name of the resulting DatetimeIndex. closed : {None, 'left', 'right'}, optional Make the interval closed with respect to the given frequency to the 'left', 'right', or both sides (None, the default). **kwargs For compatibility. Has no effect on the result. Returns ------- rng : DatetimeIndex See Also -------- DatetimeIndex : An immutable container for datetimes. timedelta_range : Return a fixed frequency TimedeltaIndex. period_range : Return a fixed frequency PeriodIndex. interval_range : Return a fixed frequency IntervalIndex. Notes ----- Of the four parameters ``start``, ``end``, ``periods``, and ``freq``, exactly three must be specified. If ``freq`` is omitted, the resulting ``DatetimeIndex`` will have ``periods`` linearly spaced elements between ``start`` and ``end`` (closed on both sides). To learn more about the frequency strings, please see `this link <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__. Examples -------- **Specifying the values** The next four examples generate the same `DatetimeIndex`, but vary the combination of `start`, `end` and `periods`. Specify `start` and `end`, with the default daily frequency. >>> pd.date_range(start='1/1/2018', end='1/08/2018') DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04', '2018-01-05', '2018-01-06', '2018-01-07', '2018-01-08'], dtype='datetime64[ns]', freq='D') Specify `start` and `periods`, the number of periods (days). >>> pd.date_range(start='1/1/2018', periods=8) DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04', '2018-01-05', '2018-01-06', '2018-01-07', '2018-01-08'], dtype='datetime64[ns]', freq='D') Specify `end` and `periods`, the number of periods (days). >>> pd.date_range(end='1/1/2018', periods=8) DatetimeIndex(['2017-12-25', '2017-12-26', '2017-12-27', '2017-12-28', '2017-12-29', '2017-12-30', '2017-12-31', '2018-01-01'], dtype='datetime64[ns]', freq='D') Specify `start`, `end`, and `periods`; the frequency is generated automatically (linearly spaced). >>> pd.date_range(start='2018-04-24', end='2018-04-27', periods=3) DatetimeIndex(['2018-04-24 00:00:00', '2018-04-25 12:00:00', '2018-04-27 00:00:00'], dtype='datetime64[ns]', freq=None) **Other Parameters** Changed the `freq` (frequency) to ``'M'`` (month end frequency). >>> pd.date_range(start='1/1/2018', periods=5, freq='M') DatetimeIndex(['2018-01-31', '2018-02-28', '2018-03-31', '2018-04-30', '2018-05-31'], dtype='datetime64[ns]', freq='M') Multiples are allowed >>> pd.date_range(start='1/1/2018', periods=5, freq='3M') DatetimeIndex(['2018-01-31', '2018-04-30', '2018-07-31', '2018-10-31', '2019-01-31'], dtype='datetime64[ns]', freq='3M') `freq` can also be specified as an Offset object. >>> pd.date_range(start='1/1/2018', periods=5, freq=pd.offsets.MonthEnd(3)) DatetimeIndex(['2018-01-31', '2018-04-30', '2018-07-31', '2018-10-31', '2019-01-31'], dtype='datetime64[ns]', freq='3M') Specify `tz` to set the timezone. >>> pd.date_range(start='1/1/2018', periods=5, tz='Asia/Tokyo') DatetimeIndex(['2018-01-01 00:00:00+09:00', '2018-01-02 00:00:00+09:00', '2018-01-03 00:00:00+09:00', '2018-01-04 00:00:00+09:00', '2018-01-05 00:00:00+09:00'], dtype='datetime64[ns, Asia/Tokyo]', freq='D') `closed` controls whether to include `start` and `end` that are on the boundary. The default includes boundary points on either end. >>> pd.date_range(start='2017-01-01', end='2017-01-04', closed=None) DatetimeIndex(['2017-01-01', '2017-01-02', '2017-01-03', '2017-01-04'], dtype='datetime64[ns]', freq='D') Use ``closed='left'`` to exclude `end` if it falls on the boundary. >>> pd.date_range(start='2017-01-01', end='2017-01-04', closed='left') DatetimeIndex(['2017-01-01', '2017-01-02', '2017-01-03'], dtype='datetime64[ns]', freq='D') Use ``closed='right'`` to exclude `start` if it falls on the boundary. >>> pd.date_range(start='2017-01-01', end='2017-01-04', closed='right') DatetimeIndex(['2017-01-02', '2017-01-03', '2017-01-04'], dtype='datetime64[ns]', freq='D')
[ "Return", "a", "fixed", "frequency", "DatetimeIndex", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/datetimes.py#L1398-L1545
19,504
pandas-dev/pandas
pandas/core/indexes/datetimes.py
bdate_range
def bdate_range(start=None, end=None, periods=None, freq='B', tz=None, normalize=True, name=None, weekmask=None, holidays=None, closed=None, **kwargs): """ Return a fixed frequency DatetimeIndex, with business day as the default frequency Parameters ---------- start : string or datetime-like, default None Left bound for generating dates. end : string or datetime-like, default None Right bound for generating dates. periods : integer, default None Number of periods to generate. freq : string or DateOffset, default 'B' (business daily) Frequency strings can have multiples, e.g. '5H'. tz : string or None Time zone name for returning localized DatetimeIndex, for example Asia/Beijing. normalize : bool, default False Normalize start/end dates to midnight before generating date range. name : string, default None Name of the resulting DatetimeIndex. weekmask : string or None, default None Weekmask of valid business days, passed to ``numpy.busdaycalendar``, only used when custom frequency strings are passed. The default value None is equivalent to 'Mon Tue Wed Thu Fri'. .. versionadded:: 0.21.0 holidays : list-like or None, default None Dates to exclude from the set of valid business days, passed to ``numpy.busdaycalendar``, only used when custom frequency strings are passed. .. versionadded:: 0.21.0 closed : string, default None Make the interval closed with respect to the given frequency to the 'left', 'right', or both sides (None). **kwargs For compatibility. Has no effect on the result. Returns ------- DatetimeIndex Notes ----- Of the four parameters: ``start``, ``end``, ``periods``, and ``freq``, exactly three must be specified. Specifying ``freq`` is a requirement for ``bdate_range``. Use ``date_range`` if specifying ``freq`` is not desired. To learn more about the frequency strings, please see `this link <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__. Examples -------- Note how the two weekend days are skipped in the result. >>> pd.bdate_range(start='1/1/2018', end='1/08/2018') DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04', '2018-01-05', '2018-01-08'], dtype='datetime64[ns]', freq='B') """ if freq is None: msg = 'freq must be specified for bdate_range; use date_range instead' raise TypeError(msg) if is_string_like(freq) and freq.startswith('C'): try: weekmask = weekmask or 'Mon Tue Wed Thu Fri' freq = prefix_mapping[freq](holidays=holidays, weekmask=weekmask) except (KeyError, TypeError): msg = 'invalid custom frequency string: {freq}'.format(freq=freq) raise ValueError(msg) elif holidays or weekmask: msg = ('a custom frequency string is required when holidays or ' 'weekmask are passed, got frequency {freq}').format(freq=freq) raise ValueError(msg) return date_range(start=start, end=end, periods=periods, freq=freq, tz=tz, normalize=normalize, name=name, closed=closed, **kwargs)
python
def bdate_range(start=None, end=None, periods=None, freq='B', tz=None, normalize=True, name=None, weekmask=None, holidays=None, closed=None, **kwargs): """ Return a fixed frequency DatetimeIndex, with business day as the default frequency Parameters ---------- start : string or datetime-like, default None Left bound for generating dates. end : string or datetime-like, default None Right bound for generating dates. periods : integer, default None Number of periods to generate. freq : string or DateOffset, default 'B' (business daily) Frequency strings can have multiples, e.g. '5H'. tz : string or None Time zone name for returning localized DatetimeIndex, for example Asia/Beijing. normalize : bool, default False Normalize start/end dates to midnight before generating date range. name : string, default None Name of the resulting DatetimeIndex. weekmask : string or None, default None Weekmask of valid business days, passed to ``numpy.busdaycalendar``, only used when custom frequency strings are passed. The default value None is equivalent to 'Mon Tue Wed Thu Fri'. .. versionadded:: 0.21.0 holidays : list-like or None, default None Dates to exclude from the set of valid business days, passed to ``numpy.busdaycalendar``, only used when custom frequency strings are passed. .. versionadded:: 0.21.0 closed : string, default None Make the interval closed with respect to the given frequency to the 'left', 'right', or both sides (None). **kwargs For compatibility. Has no effect on the result. Returns ------- DatetimeIndex Notes ----- Of the four parameters: ``start``, ``end``, ``periods``, and ``freq``, exactly three must be specified. Specifying ``freq`` is a requirement for ``bdate_range``. Use ``date_range`` if specifying ``freq`` is not desired. To learn more about the frequency strings, please see `this link <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__. Examples -------- Note how the two weekend days are skipped in the result. >>> pd.bdate_range(start='1/1/2018', end='1/08/2018') DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04', '2018-01-05', '2018-01-08'], dtype='datetime64[ns]', freq='B') """ if freq is None: msg = 'freq must be specified for bdate_range; use date_range instead' raise TypeError(msg) if is_string_like(freq) and freq.startswith('C'): try: weekmask = weekmask or 'Mon Tue Wed Thu Fri' freq = prefix_mapping[freq](holidays=holidays, weekmask=weekmask) except (KeyError, TypeError): msg = 'invalid custom frequency string: {freq}'.format(freq=freq) raise ValueError(msg) elif holidays or weekmask: msg = ('a custom frequency string is required when holidays or ' 'weekmask are passed, got frequency {freq}').format(freq=freq) raise ValueError(msg) return date_range(start=start, end=end, periods=periods, freq=freq, tz=tz, normalize=normalize, name=name, closed=closed, **kwargs)
[ "def", "bdate_range", "(", "start", "=", "None", ",", "end", "=", "None", ",", "periods", "=", "None", ",", "freq", "=", "'B'", ",", "tz", "=", "None", ",", "normalize", "=", "True", ",", "name", "=", "None", ",", "weekmask", "=", "None", ",", "holidays", "=", "None", ",", "closed", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "freq", "is", "None", ":", "msg", "=", "'freq must be specified for bdate_range; use date_range instead'", "raise", "TypeError", "(", "msg", ")", "if", "is_string_like", "(", "freq", ")", "and", "freq", ".", "startswith", "(", "'C'", ")", ":", "try", ":", "weekmask", "=", "weekmask", "or", "'Mon Tue Wed Thu Fri'", "freq", "=", "prefix_mapping", "[", "freq", "]", "(", "holidays", "=", "holidays", ",", "weekmask", "=", "weekmask", ")", "except", "(", "KeyError", ",", "TypeError", ")", ":", "msg", "=", "'invalid custom frequency string: {freq}'", ".", "format", "(", "freq", "=", "freq", ")", "raise", "ValueError", "(", "msg", ")", "elif", "holidays", "or", "weekmask", ":", "msg", "=", "(", "'a custom frequency string is required when holidays or '", "'weekmask are passed, got frequency {freq}'", ")", ".", "format", "(", "freq", "=", "freq", ")", "raise", "ValueError", "(", "msg", ")", "return", "date_range", "(", "start", "=", "start", ",", "end", "=", "end", ",", "periods", "=", "periods", ",", "freq", "=", "freq", ",", "tz", "=", "tz", ",", "normalize", "=", "normalize", ",", "name", "=", "name", ",", "closed", "=", "closed", ",", "*", "*", "kwargs", ")" ]
Return a fixed frequency DatetimeIndex, with business day as the default frequency Parameters ---------- start : string or datetime-like, default None Left bound for generating dates. end : string or datetime-like, default None Right bound for generating dates. periods : integer, default None Number of periods to generate. freq : string or DateOffset, default 'B' (business daily) Frequency strings can have multiples, e.g. '5H'. tz : string or None Time zone name for returning localized DatetimeIndex, for example Asia/Beijing. normalize : bool, default False Normalize start/end dates to midnight before generating date range. name : string, default None Name of the resulting DatetimeIndex. weekmask : string or None, default None Weekmask of valid business days, passed to ``numpy.busdaycalendar``, only used when custom frequency strings are passed. The default value None is equivalent to 'Mon Tue Wed Thu Fri'. .. versionadded:: 0.21.0 holidays : list-like or None, default None Dates to exclude from the set of valid business days, passed to ``numpy.busdaycalendar``, only used when custom frequency strings are passed. .. versionadded:: 0.21.0 closed : string, default None Make the interval closed with respect to the given frequency to the 'left', 'right', or both sides (None). **kwargs For compatibility. Has no effect on the result. Returns ------- DatetimeIndex Notes ----- Of the four parameters: ``start``, ``end``, ``periods``, and ``freq``, exactly three must be specified. Specifying ``freq`` is a requirement for ``bdate_range``. Use ``date_range`` if specifying ``freq`` is not desired. To learn more about the frequency strings, please see `this link <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__. Examples -------- Note how the two weekend days are skipped in the result. >>> pd.bdate_range(start='1/1/2018', end='1/08/2018') DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04', '2018-01-05', '2018-01-08'], dtype='datetime64[ns]', freq='B')
[ "Return", "a", "fixed", "frequency", "DatetimeIndex", "with", "business", "day", "as", "the", "default", "frequency" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/datetimes.py#L1548-L1633
19,505
pandas-dev/pandas
pandas/core/indexes/datetimes.py
cdate_range
def cdate_range(start=None, end=None, periods=None, freq='C', tz=None, normalize=True, name=None, closed=None, **kwargs): """ Return a fixed frequency DatetimeIndex, with CustomBusinessDay as the default frequency .. deprecated:: 0.21.0 Parameters ---------- start : string or datetime-like, default None Left bound for generating dates end : string or datetime-like, default None Right bound for generating dates periods : integer, default None Number of periods to generate freq : string or DateOffset, default 'C' (CustomBusinessDay) Frequency strings can have multiples, e.g. '5H' tz : string, default None Time zone name for returning localized DatetimeIndex, for example Asia/Beijing normalize : bool, default False Normalize start/end dates to midnight before generating date range name : string, default None Name of the resulting DatetimeIndex weekmask : string, Default 'Mon Tue Wed Thu Fri' weekmask of valid business days, passed to ``numpy.busdaycalendar`` holidays : list list/array of dates to exclude from the set of valid business days, passed to ``numpy.busdaycalendar`` closed : string, default None Make the interval closed with respect to the given frequency to the 'left', 'right', or both sides (None) Notes ----- Of the three parameters: ``start``, ``end``, and ``periods``, exactly two must be specified. To learn more about the frequency strings, please see `this link <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__. Returns ------- rng : DatetimeIndex """ warnings.warn("cdate_range is deprecated and will be removed in a future " "version, instead use pd.bdate_range(..., freq='{freq}')" .format(freq=freq), FutureWarning, stacklevel=2) if freq == 'C': holidays = kwargs.pop('holidays', []) weekmask = kwargs.pop('weekmask', 'Mon Tue Wed Thu Fri') freq = CDay(holidays=holidays, weekmask=weekmask) return date_range(start=start, end=end, periods=periods, freq=freq, tz=tz, normalize=normalize, name=name, closed=closed, **kwargs)
python
def cdate_range(start=None, end=None, periods=None, freq='C', tz=None, normalize=True, name=None, closed=None, **kwargs): """ Return a fixed frequency DatetimeIndex, with CustomBusinessDay as the default frequency .. deprecated:: 0.21.0 Parameters ---------- start : string or datetime-like, default None Left bound for generating dates end : string or datetime-like, default None Right bound for generating dates periods : integer, default None Number of periods to generate freq : string or DateOffset, default 'C' (CustomBusinessDay) Frequency strings can have multiples, e.g. '5H' tz : string, default None Time zone name for returning localized DatetimeIndex, for example Asia/Beijing normalize : bool, default False Normalize start/end dates to midnight before generating date range name : string, default None Name of the resulting DatetimeIndex weekmask : string, Default 'Mon Tue Wed Thu Fri' weekmask of valid business days, passed to ``numpy.busdaycalendar`` holidays : list list/array of dates to exclude from the set of valid business days, passed to ``numpy.busdaycalendar`` closed : string, default None Make the interval closed with respect to the given frequency to the 'left', 'right', or both sides (None) Notes ----- Of the three parameters: ``start``, ``end``, and ``periods``, exactly two must be specified. To learn more about the frequency strings, please see `this link <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__. Returns ------- rng : DatetimeIndex """ warnings.warn("cdate_range is deprecated and will be removed in a future " "version, instead use pd.bdate_range(..., freq='{freq}')" .format(freq=freq), FutureWarning, stacklevel=2) if freq == 'C': holidays = kwargs.pop('holidays', []) weekmask = kwargs.pop('weekmask', 'Mon Tue Wed Thu Fri') freq = CDay(holidays=holidays, weekmask=weekmask) return date_range(start=start, end=end, periods=periods, freq=freq, tz=tz, normalize=normalize, name=name, closed=closed, **kwargs)
[ "def", "cdate_range", "(", "start", "=", "None", ",", "end", "=", "None", ",", "periods", "=", "None", ",", "freq", "=", "'C'", ",", "tz", "=", "None", ",", "normalize", "=", "True", ",", "name", "=", "None", ",", "closed", "=", "None", ",", "*", "*", "kwargs", ")", ":", "warnings", ".", "warn", "(", "\"cdate_range is deprecated and will be removed in a future \"", "\"version, instead use pd.bdate_range(..., freq='{freq}')\"", ".", "format", "(", "freq", "=", "freq", ")", ",", "FutureWarning", ",", "stacklevel", "=", "2", ")", "if", "freq", "==", "'C'", ":", "holidays", "=", "kwargs", ".", "pop", "(", "'holidays'", ",", "[", "]", ")", "weekmask", "=", "kwargs", ".", "pop", "(", "'weekmask'", ",", "'Mon Tue Wed Thu Fri'", ")", "freq", "=", "CDay", "(", "holidays", "=", "holidays", ",", "weekmask", "=", "weekmask", ")", "return", "date_range", "(", "start", "=", "start", ",", "end", "=", "end", ",", "periods", "=", "periods", ",", "freq", "=", "freq", ",", "tz", "=", "tz", ",", "normalize", "=", "normalize", ",", "name", "=", "name", ",", "closed", "=", "closed", ",", "*", "*", "kwargs", ")" ]
Return a fixed frequency DatetimeIndex, with CustomBusinessDay as the default frequency .. deprecated:: 0.21.0 Parameters ---------- start : string or datetime-like, default None Left bound for generating dates end : string or datetime-like, default None Right bound for generating dates periods : integer, default None Number of periods to generate freq : string or DateOffset, default 'C' (CustomBusinessDay) Frequency strings can have multiples, e.g. '5H' tz : string, default None Time zone name for returning localized DatetimeIndex, for example Asia/Beijing normalize : bool, default False Normalize start/end dates to midnight before generating date range name : string, default None Name of the resulting DatetimeIndex weekmask : string, Default 'Mon Tue Wed Thu Fri' weekmask of valid business days, passed to ``numpy.busdaycalendar`` holidays : list list/array of dates to exclude from the set of valid business days, passed to ``numpy.busdaycalendar`` closed : string, default None Make the interval closed with respect to the given frequency to the 'left', 'right', or both sides (None) Notes ----- Of the three parameters: ``start``, ``end``, and ``periods``, exactly two must be specified. To learn more about the frequency strings, please see `this link <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__. Returns ------- rng : DatetimeIndex
[ "Return", "a", "fixed", "frequency", "DatetimeIndex", "with", "CustomBusinessDay", "as", "the", "default", "frequency" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/datetimes.py#L1636-L1693
19,506
pandas-dev/pandas
pandas/core/window.py
_Window._create_blocks
def _create_blocks(self): """ Split data into blocks & return conformed data. """ obj, index = self._convert_freq() if index is not None: index = self._on # filter out the on from the object if self.on is not None: if obj.ndim == 2: obj = obj.reindex(columns=obj.columns.difference([self.on]), copy=False) blocks = obj._to_dict_of_blocks(copy=False).values() return blocks, obj, index
python
def _create_blocks(self): """ Split data into blocks & return conformed data. """ obj, index = self._convert_freq() if index is not None: index = self._on # filter out the on from the object if self.on is not None: if obj.ndim == 2: obj = obj.reindex(columns=obj.columns.difference([self.on]), copy=False) blocks = obj._to_dict_of_blocks(copy=False).values() return blocks, obj, index
[ "def", "_create_blocks", "(", "self", ")", ":", "obj", ",", "index", "=", "self", ".", "_convert_freq", "(", ")", "if", "index", "is", "not", "None", ":", "index", "=", "self", ".", "_on", "# filter out the on from the object", "if", "self", ".", "on", "is", "not", "None", ":", "if", "obj", ".", "ndim", "==", "2", ":", "obj", "=", "obj", ".", "reindex", "(", "columns", "=", "obj", ".", "columns", ".", "difference", "(", "[", "self", ".", "on", "]", ")", ",", "copy", "=", "False", ")", "blocks", "=", "obj", ".", "_to_dict_of_blocks", "(", "copy", "=", "False", ")", ".", "values", "(", ")", "return", "blocks", ",", "obj", ",", "index" ]
Split data into blocks & return conformed data.
[ "Split", "data", "into", "blocks", "&", "return", "conformed", "data", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/window.py#L99-L115
19,507
pandas-dev/pandas
pandas/core/window.py
_Window._get_index
def _get_index(self, index=None): """ Return index as ndarrays. Returns ------- tuple of (index, index_as_ndarray) """ if self.is_freq_type: if index is None: index = self._on return index, index.asi8 return index, index
python
def _get_index(self, index=None): """ Return index as ndarrays. Returns ------- tuple of (index, index_as_ndarray) """ if self.is_freq_type: if index is None: index = self._on return index, index.asi8 return index, index
[ "def", "_get_index", "(", "self", ",", "index", "=", "None", ")", ":", "if", "self", ".", "is_freq_type", ":", "if", "index", "is", "None", ":", "index", "=", "self", ".", "_on", "return", "index", ",", "index", ".", "asi8", "return", "index", ",", "index" ]
Return index as ndarrays. Returns ------- tuple of (index, index_as_ndarray)
[ "Return", "index", "as", "ndarrays", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/window.py#L174-L187
19,508
pandas-dev/pandas
pandas/core/window.py
_Window._wrap_result
def _wrap_result(self, result, block=None, obj=None): """ Wrap a single result. """ if obj is None: obj = self._selected_obj index = obj.index if isinstance(result, np.ndarray): # coerce if necessary if block is not None: if is_timedelta64_dtype(block.values.dtype): from pandas import to_timedelta result = to_timedelta( result.ravel(), unit='ns').values.reshape(result.shape) if result.ndim == 1: from pandas import Series return Series(result, index, name=obj.name) return type(obj)(result, index=index, columns=block.columns) return result
python
def _wrap_result(self, result, block=None, obj=None): """ Wrap a single result. """ if obj is None: obj = self._selected_obj index = obj.index if isinstance(result, np.ndarray): # coerce if necessary if block is not None: if is_timedelta64_dtype(block.values.dtype): from pandas import to_timedelta result = to_timedelta( result.ravel(), unit='ns').values.reshape(result.shape) if result.ndim == 1: from pandas import Series return Series(result, index, name=obj.name) return type(obj)(result, index=index, columns=block.columns) return result
[ "def", "_wrap_result", "(", "self", ",", "result", ",", "block", "=", "None", ",", "obj", "=", "None", ")", ":", "if", "obj", "is", "None", ":", "obj", "=", "self", ".", "_selected_obj", "index", "=", "obj", ".", "index", "if", "isinstance", "(", "result", ",", "np", ".", "ndarray", ")", ":", "# coerce if necessary", "if", "block", "is", "not", "None", ":", "if", "is_timedelta64_dtype", "(", "block", ".", "values", ".", "dtype", ")", ":", "from", "pandas", "import", "to_timedelta", "result", "=", "to_timedelta", "(", "result", ".", "ravel", "(", ")", ",", "unit", "=", "'ns'", ")", ".", "values", ".", "reshape", "(", "result", ".", "shape", ")", "if", "result", ".", "ndim", "==", "1", ":", "from", "pandas", "import", "Series", "return", "Series", "(", "result", ",", "index", ",", "name", "=", "obj", ".", "name", ")", "return", "type", "(", "obj", ")", "(", "result", ",", "index", "=", "index", ",", "columns", "=", "block", ".", "columns", ")", "return", "result" ]
Wrap a single result.
[ "Wrap", "a", "single", "result", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/window.py#L219-L242
19,509
pandas-dev/pandas
pandas/core/window.py
_Window._wrap_results
def _wrap_results(self, results, blocks, obj): """ Wrap the results. Parameters ---------- results : list of ndarrays blocks : list of blocks obj : conformed data (may be resampled) """ from pandas import Series, concat from pandas.core.index import ensure_index final = [] for result, block in zip(results, blocks): result = self._wrap_result(result, block=block, obj=obj) if result.ndim == 1: return result final.append(result) # if we have an 'on' column # we want to put it back into the results # in the same location columns = self._selected_obj.columns if self.on is not None and not self._on.equals(obj.index): name = self._on.name final.append(Series(self._on, index=obj.index, name=name)) if self._selection is not None: selection = ensure_index(self._selection) # need to reorder to include original location of # the on column (if its not already there) if name not in selection: columns = self.obj.columns indexer = columns.get_indexer(selection.tolist() + [name]) columns = columns.take(sorted(indexer)) if not len(final): return obj.astype('float64') return concat(final, axis=1).reindex(columns=columns, copy=False)
python
def _wrap_results(self, results, blocks, obj): """ Wrap the results. Parameters ---------- results : list of ndarrays blocks : list of blocks obj : conformed data (may be resampled) """ from pandas import Series, concat from pandas.core.index import ensure_index final = [] for result, block in zip(results, blocks): result = self._wrap_result(result, block=block, obj=obj) if result.ndim == 1: return result final.append(result) # if we have an 'on' column # we want to put it back into the results # in the same location columns = self._selected_obj.columns if self.on is not None and not self._on.equals(obj.index): name = self._on.name final.append(Series(self._on, index=obj.index, name=name)) if self._selection is not None: selection = ensure_index(self._selection) # need to reorder to include original location of # the on column (if its not already there) if name not in selection: columns = self.obj.columns indexer = columns.get_indexer(selection.tolist() + [name]) columns = columns.take(sorted(indexer)) if not len(final): return obj.astype('float64') return concat(final, axis=1).reindex(columns=columns, copy=False)
[ "def", "_wrap_results", "(", "self", ",", "results", ",", "blocks", ",", "obj", ")", ":", "from", "pandas", "import", "Series", ",", "concat", "from", "pandas", ".", "core", ".", "index", "import", "ensure_index", "final", "=", "[", "]", "for", "result", ",", "block", "in", "zip", "(", "results", ",", "blocks", ")", ":", "result", "=", "self", ".", "_wrap_result", "(", "result", ",", "block", "=", "block", ",", "obj", "=", "obj", ")", "if", "result", ".", "ndim", "==", "1", ":", "return", "result", "final", ".", "append", "(", "result", ")", "# if we have an 'on' column", "# we want to put it back into the results", "# in the same location", "columns", "=", "self", ".", "_selected_obj", ".", "columns", "if", "self", ".", "on", "is", "not", "None", "and", "not", "self", ".", "_on", ".", "equals", "(", "obj", ".", "index", ")", ":", "name", "=", "self", ".", "_on", ".", "name", "final", ".", "append", "(", "Series", "(", "self", ".", "_on", ",", "index", "=", "obj", ".", "index", ",", "name", "=", "name", ")", ")", "if", "self", ".", "_selection", "is", "not", "None", ":", "selection", "=", "ensure_index", "(", "self", ".", "_selection", ")", "# need to reorder to include original location of", "# the on column (if its not already there)", "if", "name", "not", "in", "selection", ":", "columns", "=", "self", ".", "obj", ".", "columns", "indexer", "=", "columns", ".", "get_indexer", "(", "selection", ".", "tolist", "(", ")", "+", "[", "name", "]", ")", "columns", "=", "columns", ".", "take", "(", "sorted", "(", "indexer", ")", ")", "if", "not", "len", "(", "final", ")", ":", "return", "obj", ".", "astype", "(", "'float64'", ")", "return", "concat", "(", "final", ",", "axis", "=", "1", ")", ".", "reindex", "(", "columns", "=", "columns", ",", "copy", "=", "False", ")" ]
Wrap the results. Parameters ---------- results : list of ndarrays blocks : list of blocks obj : conformed data (may be resampled)
[ "Wrap", "the", "results", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/window.py#L244-L288
19,510
pandas-dev/pandas
pandas/core/window.py
_Window._center_window
def _center_window(self, result, window): """ Center the result in the window. """ if self.axis > result.ndim - 1: raise ValueError("Requested axis is larger then no. of argument " "dimensions") offset = _offset(window, True) if offset > 0: if isinstance(result, (ABCSeries, ABCDataFrame)): result = result.slice_shift(-offset, axis=self.axis) else: lead_indexer = [slice(None)] * result.ndim lead_indexer[self.axis] = slice(offset, None) result = np.copy(result[tuple(lead_indexer)]) return result
python
def _center_window(self, result, window): """ Center the result in the window. """ if self.axis > result.ndim - 1: raise ValueError("Requested axis is larger then no. of argument " "dimensions") offset = _offset(window, True) if offset > 0: if isinstance(result, (ABCSeries, ABCDataFrame)): result = result.slice_shift(-offset, axis=self.axis) else: lead_indexer = [slice(None)] * result.ndim lead_indexer[self.axis] = slice(offset, None) result = np.copy(result[tuple(lead_indexer)]) return result
[ "def", "_center_window", "(", "self", ",", "result", ",", "window", ")", ":", "if", "self", ".", "axis", ">", "result", ".", "ndim", "-", "1", ":", "raise", "ValueError", "(", "\"Requested axis is larger then no. of argument \"", "\"dimensions\"", ")", "offset", "=", "_offset", "(", "window", ",", "True", ")", "if", "offset", ">", "0", ":", "if", "isinstance", "(", "result", ",", "(", "ABCSeries", ",", "ABCDataFrame", ")", ")", ":", "result", "=", "result", ".", "slice_shift", "(", "-", "offset", ",", "axis", "=", "self", ".", "axis", ")", "else", ":", "lead_indexer", "=", "[", "slice", "(", "None", ")", "]", "*", "result", ".", "ndim", "lead_indexer", "[", "self", ".", "axis", "]", "=", "slice", "(", "offset", ",", "None", ")", "result", "=", "np", ".", "copy", "(", "result", "[", "tuple", "(", "lead_indexer", ")", "]", ")", "return", "result" ]
Center the result in the window.
[ "Center", "the", "result", "in", "the", "window", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/window.py#L290-L306
19,511
pandas-dev/pandas
pandas/core/window.py
Window._prep_window
def _prep_window(self, **kwargs): """ Provide validation for our window type, return the window we have already been validated. """ window = self._get_window() if isinstance(window, (list, tuple, np.ndarray)): return com.asarray_tuplesafe(window).astype(float) elif is_integer(window): import scipy.signal as sig # the below may pop from kwargs def _validate_win_type(win_type, kwargs): arg_map = {'kaiser': ['beta'], 'gaussian': ['std'], 'general_gaussian': ['power', 'width'], 'slepian': ['width']} if win_type in arg_map: return tuple([win_type] + _pop_args(win_type, arg_map[win_type], kwargs)) return win_type def _pop_args(win_type, arg_names, kwargs): msg = '%s window requires %%s' % win_type all_args = [] for n in arg_names: if n not in kwargs: raise ValueError(msg % n) all_args.append(kwargs.pop(n)) return all_args win_type = _validate_win_type(self.win_type, kwargs) # GH #15662. `False` makes symmetric window, rather than periodic. return sig.get_window(win_type, window, False).astype(float)
python
def _prep_window(self, **kwargs): """ Provide validation for our window type, return the window we have already been validated. """ window = self._get_window() if isinstance(window, (list, tuple, np.ndarray)): return com.asarray_tuplesafe(window).astype(float) elif is_integer(window): import scipy.signal as sig # the below may pop from kwargs def _validate_win_type(win_type, kwargs): arg_map = {'kaiser': ['beta'], 'gaussian': ['std'], 'general_gaussian': ['power', 'width'], 'slepian': ['width']} if win_type in arg_map: return tuple([win_type] + _pop_args(win_type, arg_map[win_type], kwargs)) return win_type def _pop_args(win_type, arg_names, kwargs): msg = '%s window requires %%s' % win_type all_args = [] for n in arg_names: if n not in kwargs: raise ValueError(msg % n) all_args.append(kwargs.pop(n)) return all_args win_type = _validate_win_type(self.win_type, kwargs) # GH #15662. `False` makes symmetric window, rather than periodic. return sig.get_window(win_type, window, False).astype(float)
[ "def", "_prep_window", "(", "self", ",", "*", "*", "kwargs", ")", ":", "window", "=", "self", ".", "_get_window", "(", ")", "if", "isinstance", "(", "window", ",", "(", "list", ",", "tuple", ",", "np", ".", "ndarray", ")", ")", ":", "return", "com", ".", "asarray_tuplesafe", "(", "window", ")", ".", "astype", "(", "float", ")", "elif", "is_integer", "(", "window", ")", ":", "import", "scipy", ".", "signal", "as", "sig", "# the below may pop from kwargs", "def", "_validate_win_type", "(", "win_type", ",", "kwargs", ")", ":", "arg_map", "=", "{", "'kaiser'", ":", "[", "'beta'", "]", ",", "'gaussian'", ":", "[", "'std'", "]", ",", "'general_gaussian'", ":", "[", "'power'", ",", "'width'", "]", ",", "'slepian'", ":", "[", "'width'", "]", "}", "if", "win_type", "in", "arg_map", ":", "return", "tuple", "(", "[", "win_type", "]", "+", "_pop_args", "(", "win_type", ",", "arg_map", "[", "win_type", "]", ",", "kwargs", ")", ")", "return", "win_type", "def", "_pop_args", "(", "win_type", ",", "arg_names", ",", "kwargs", ")", ":", "msg", "=", "'%s window requires %%s'", "%", "win_type", "all_args", "=", "[", "]", "for", "n", "in", "arg_names", ":", "if", "n", "not", "in", "kwargs", ":", "raise", "ValueError", "(", "msg", "%", "n", ")", "all_args", ".", "append", "(", "kwargs", ".", "pop", "(", "n", ")", ")", "return", "all_args", "win_type", "=", "_validate_win_type", "(", "self", ".", "win_type", ",", "kwargs", ")", "# GH #15662. `False` makes symmetric window, rather than periodic.", "return", "sig", ".", "get_window", "(", "win_type", ",", "window", ",", "False", ")", ".", "astype", "(", "float", ")" ]
Provide validation for our window type, return the window we have already been validated.
[ "Provide", "validation", "for", "our", "window", "type", "return", "the", "window", "we", "have", "already", "been", "validated", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/window.py#L609-L644
19,512
pandas-dev/pandas
pandas/core/window.py
Window._apply_window
def _apply_window(self, mean=True, **kwargs): """ Applies a moving window of type ``window_type`` on the data. Parameters ---------- mean : bool, default True If True computes weighted mean, else weighted sum Returns ------- y : same type as input argument """ window = self._prep_window(**kwargs) center = self.center blocks, obj, index = self._create_blocks() results = [] for b in blocks: try: values = self._prep_values(b.values) except TypeError: results.append(b.values.copy()) continue if values.size == 0: results.append(values.copy()) continue offset = _offset(window, center) additional_nans = np.array([np.NaN] * offset) def f(arg, *args, **kwargs): minp = _use_window(self.min_periods, len(window)) return libwindow.roll_window(np.concatenate((arg, additional_nans)) if center else arg, window, minp, avg=mean) result = np.apply_along_axis(f, self.axis, values) if center: result = self._center_window(result, window) results.append(result) return self._wrap_results(results, blocks, obj)
python
def _apply_window(self, mean=True, **kwargs): """ Applies a moving window of type ``window_type`` on the data. Parameters ---------- mean : bool, default True If True computes weighted mean, else weighted sum Returns ------- y : same type as input argument """ window = self._prep_window(**kwargs) center = self.center blocks, obj, index = self._create_blocks() results = [] for b in blocks: try: values = self._prep_values(b.values) except TypeError: results.append(b.values.copy()) continue if values.size == 0: results.append(values.copy()) continue offset = _offset(window, center) additional_nans = np.array([np.NaN] * offset) def f(arg, *args, **kwargs): minp = _use_window(self.min_periods, len(window)) return libwindow.roll_window(np.concatenate((arg, additional_nans)) if center else arg, window, minp, avg=mean) result = np.apply_along_axis(f, self.axis, values) if center: result = self._center_window(result, window) results.append(result) return self._wrap_results(results, blocks, obj)
[ "def", "_apply_window", "(", "self", ",", "mean", "=", "True", ",", "*", "*", "kwargs", ")", ":", "window", "=", "self", ".", "_prep_window", "(", "*", "*", "kwargs", ")", "center", "=", "self", ".", "center", "blocks", ",", "obj", ",", "index", "=", "self", ".", "_create_blocks", "(", ")", "results", "=", "[", "]", "for", "b", "in", "blocks", ":", "try", ":", "values", "=", "self", ".", "_prep_values", "(", "b", ".", "values", ")", "except", "TypeError", ":", "results", ".", "append", "(", "b", ".", "values", ".", "copy", "(", ")", ")", "continue", "if", "values", ".", "size", "==", "0", ":", "results", ".", "append", "(", "values", ".", "copy", "(", ")", ")", "continue", "offset", "=", "_offset", "(", "window", ",", "center", ")", "additional_nans", "=", "np", ".", "array", "(", "[", "np", ".", "NaN", "]", "*", "offset", ")", "def", "f", "(", "arg", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "minp", "=", "_use_window", "(", "self", ".", "min_periods", ",", "len", "(", "window", ")", ")", "return", "libwindow", ".", "roll_window", "(", "np", ".", "concatenate", "(", "(", "arg", ",", "additional_nans", ")", ")", "if", "center", "else", "arg", ",", "window", ",", "minp", ",", "avg", "=", "mean", ")", "result", "=", "np", ".", "apply_along_axis", "(", "f", ",", "self", ".", "axis", ",", "values", ")", "if", "center", ":", "result", "=", "self", ".", "_center_window", "(", "result", ",", "window", ")", "results", ".", "append", "(", "result", ")", "return", "self", ".", "_wrap_results", "(", "results", ",", "blocks", ",", "obj", ")" ]
Applies a moving window of type ``window_type`` on the data. Parameters ---------- mean : bool, default True If True computes weighted mean, else weighted sum Returns ------- y : same type as input argument
[ "Applies", "a", "moving", "window", "of", "type", "window_type", "on", "the", "data", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/window.py#L646-L692
19,513
pandas-dev/pandas
pandas/core/window.py
_GroupByMixin._apply
def _apply(self, func, name, window=None, center=None, check_minp=None, **kwargs): """ Dispatch to apply; we are stripping all of the _apply kwargs and performing the original function call on the grouped object. """ def f(x, name=name, *args): x = self._shallow_copy(x) if isinstance(name, str): return getattr(x, name)(*args, **kwargs) return x.apply(name, *args, **kwargs) return self._groupby.apply(f)
python
def _apply(self, func, name, window=None, center=None, check_minp=None, **kwargs): """ Dispatch to apply; we are stripping all of the _apply kwargs and performing the original function call on the grouped object. """ def f(x, name=name, *args): x = self._shallow_copy(x) if isinstance(name, str): return getattr(x, name)(*args, **kwargs) return x.apply(name, *args, **kwargs) return self._groupby.apply(f)
[ "def", "_apply", "(", "self", ",", "func", ",", "name", ",", "window", "=", "None", ",", "center", "=", "None", ",", "check_minp", "=", "None", ",", "*", "*", "kwargs", ")", ":", "def", "f", "(", "x", ",", "name", "=", "name", ",", "*", "args", ")", ":", "x", "=", "self", ".", "_shallow_copy", "(", "x", ")", "if", "isinstance", "(", "name", ",", "str", ")", ":", "return", "getattr", "(", "x", ",", "name", ")", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "x", ".", "apply", "(", "name", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "self", ".", "_groupby", ".", "apply", "(", "f", ")" ]
Dispatch to apply; we are stripping all of the _apply kwargs and performing the original function call on the grouped object.
[ "Dispatch", "to", "apply", ";", "we", "are", "stripping", "all", "of", "the", "_apply", "kwargs", "and", "performing", "the", "original", "function", "call", "on", "the", "grouped", "object", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/window.py#L782-L797
19,514
pandas-dev/pandas
pandas/core/window.py
_Rolling._apply
def _apply(self, func, name=None, window=None, center=None, check_minp=None, **kwargs): """ Rolling statistical measure using supplied function. Designed to be used with passed-in Cython array-based functions. Parameters ---------- func : str/callable to apply name : str, optional name of this function window : int/array, default to _get_window() center : bool, default to self.center check_minp : function, default to _use_window Returns ------- y : type of input """ if center is None: center = self.center if window is None: window = self._get_window() if check_minp is None: check_minp = _use_window blocks, obj, index = self._create_blocks() index, indexi = self._get_index(index=index) results = [] for b in blocks: values = self._prep_values(b.values) if values.size == 0: results.append(values.copy()) continue # if we have a string function name, wrap it if isinstance(func, str): cfunc = getattr(libwindow, func, None) if cfunc is None: raise ValueError("we do not support this function " "in libwindow.{func}".format(func=func)) def func(arg, window, min_periods=None, closed=None): minp = check_minp(min_periods, window) # ensure we are only rolling on floats arg = ensure_float64(arg) return cfunc(arg, window, minp, indexi, closed, **kwargs) # calculation function if center: offset = _offset(window, center) additional_nans = np.array([np.NaN] * offset) def calc(x): return func(np.concatenate((x, additional_nans)), window, min_periods=self.min_periods, closed=self.closed) else: def calc(x): return func(x, window, min_periods=self.min_periods, closed=self.closed) with np.errstate(all='ignore'): if values.ndim > 1: result = np.apply_along_axis(calc, self.axis, values) else: result = calc(values) if center: result = self._center_window(result, window) results.append(result) return self._wrap_results(results, blocks, obj)
python
def _apply(self, func, name=None, window=None, center=None, check_minp=None, **kwargs): """ Rolling statistical measure using supplied function. Designed to be used with passed-in Cython array-based functions. Parameters ---------- func : str/callable to apply name : str, optional name of this function window : int/array, default to _get_window() center : bool, default to self.center check_minp : function, default to _use_window Returns ------- y : type of input """ if center is None: center = self.center if window is None: window = self._get_window() if check_minp is None: check_minp = _use_window blocks, obj, index = self._create_blocks() index, indexi = self._get_index(index=index) results = [] for b in blocks: values = self._prep_values(b.values) if values.size == 0: results.append(values.copy()) continue # if we have a string function name, wrap it if isinstance(func, str): cfunc = getattr(libwindow, func, None) if cfunc is None: raise ValueError("we do not support this function " "in libwindow.{func}".format(func=func)) def func(arg, window, min_periods=None, closed=None): minp = check_minp(min_periods, window) # ensure we are only rolling on floats arg = ensure_float64(arg) return cfunc(arg, window, minp, indexi, closed, **kwargs) # calculation function if center: offset = _offset(window, center) additional_nans = np.array([np.NaN] * offset) def calc(x): return func(np.concatenate((x, additional_nans)), window, min_periods=self.min_periods, closed=self.closed) else: def calc(x): return func(x, window, min_periods=self.min_periods, closed=self.closed) with np.errstate(all='ignore'): if values.ndim > 1: result = np.apply_along_axis(calc, self.axis, values) else: result = calc(values) if center: result = self._center_window(result, window) results.append(result) return self._wrap_results(results, blocks, obj)
[ "def", "_apply", "(", "self", ",", "func", ",", "name", "=", "None", ",", "window", "=", "None", ",", "center", "=", "None", ",", "check_minp", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "center", "is", "None", ":", "center", "=", "self", ".", "center", "if", "window", "is", "None", ":", "window", "=", "self", ".", "_get_window", "(", ")", "if", "check_minp", "is", "None", ":", "check_minp", "=", "_use_window", "blocks", ",", "obj", ",", "index", "=", "self", ".", "_create_blocks", "(", ")", "index", ",", "indexi", "=", "self", ".", "_get_index", "(", "index", "=", "index", ")", "results", "=", "[", "]", "for", "b", "in", "blocks", ":", "values", "=", "self", ".", "_prep_values", "(", "b", ".", "values", ")", "if", "values", ".", "size", "==", "0", ":", "results", ".", "append", "(", "values", ".", "copy", "(", ")", ")", "continue", "# if we have a string function name, wrap it", "if", "isinstance", "(", "func", ",", "str", ")", ":", "cfunc", "=", "getattr", "(", "libwindow", ",", "func", ",", "None", ")", "if", "cfunc", "is", "None", ":", "raise", "ValueError", "(", "\"we do not support this function \"", "\"in libwindow.{func}\"", ".", "format", "(", "func", "=", "func", ")", ")", "def", "func", "(", "arg", ",", "window", ",", "min_periods", "=", "None", ",", "closed", "=", "None", ")", ":", "minp", "=", "check_minp", "(", "min_periods", ",", "window", ")", "# ensure we are only rolling on floats", "arg", "=", "ensure_float64", "(", "arg", ")", "return", "cfunc", "(", "arg", ",", "window", ",", "minp", ",", "indexi", ",", "closed", ",", "*", "*", "kwargs", ")", "# calculation function", "if", "center", ":", "offset", "=", "_offset", "(", "window", ",", "center", ")", "additional_nans", "=", "np", ".", "array", "(", "[", "np", ".", "NaN", "]", "*", "offset", ")", "def", "calc", "(", "x", ")", ":", "return", "func", "(", "np", ".", "concatenate", "(", "(", "x", ",", "additional_nans", ")", ")", ",", "window", ",", "min_periods", "=", "self", ".", "min_periods", ",", "closed", "=", "self", ".", "closed", ")", "else", ":", "def", "calc", "(", "x", ")", ":", "return", "func", "(", "x", ",", "window", ",", "min_periods", "=", "self", ".", "min_periods", ",", "closed", "=", "self", ".", "closed", ")", "with", "np", ".", "errstate", "(", "all", "=", "'ignore'", ")", ":", "if", "values", ".", "ndim", ">", "1", ":", "result", "=", "np", ".", "apply_along_axis", "(", "calc", ",", "self", ".", "axis", ",", "values", ")", "else", ":", "result", "=", "calc", "(", "values", ")", "if", "center", ":", "result", "=", "self", ".", "_center_window", "(", "result", ",", "window", ")", "results", ".", "append", "(", "result", ")", "return", "self", ".", "_wrap_results", "(", "results", ",", "blocks", ",", "obj", ")" ]
Rolling statistical measure using supplied function. Designed to be used with passed-in Cython array-based functions. Parameters ---------- func : str/callable to apply name : str, optional name of this function window : int/array, default to _get_window() center : bool, default to self.center check_minp : function, default to _use_window Returns ------- y : type of input
[ "Rolling", "statistical", "measure", "using", "supplied", "function", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/window.py#L806-L884
19,515
pandas-dev/pandas
pandas/core/window.py
Rolling._validate_monotonic
def _validate_monotonic(self): """ Validate on is_monotonic. """ if not self._on.is_monotonic: formatted = self.on or 'index' raise ValueError("{0} must be " "monotonic".format(formatted))
python
def _validate_monotonic(self): """ Validate on is_monotonic. """ if not self._on.is_monotonic: formatted = self.on or 'index' raise ValueError("{0} must be " "monotonic".format(formatted))
[ "def", "_validate_monotonic", "(", "self", ")", ":", "if", "not", "self", ".", "_on", ".", "is_monotonic", ":", "formatted", "=", "self", ".", "on", "or", "'index'", "raise", "ValueError", "(", "\"{0} must be \"", "\"monotonic\"", ".", "format", "(", "formatted", ")", ")" ]
Validate on is_monotonic.
[ "Validate", "on", "is_monotonic", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/window.py#L1602-L1609
19,516
pandas-dev/pandas
pandas/core/window.py
Rolling._validate_freq
def _validate_freq(self): """ Validate & return window frequency. """ from pandas.tseries.frequencies import to_offset try: return to_offset(self.window) except (TypeError, ValueError): raise ValueError("passed window {0} is not " "compatible with a datetimelike " "index".format(self.window))
python
def _validate_freq(self): """ Validate & return window frequency. """ from pandas.tseries.frequencies import to_offset try: return to_offset(self.window) except (TypeError, ValueError): raise ValueError("passed window {0} is not " "compatible with a datetimelike " "index".format(self.window))
[ "def", "_validate_freq", "(", "self", ")", ":", "from", "pandas", ".", "tseries", ".", "frequencies", "import", "to_offset", "try", ":", "return", "to_offset", "(", "self", ".", "window", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "\"passed window {0} is not \"", "\"compatible with a datetimelike \"", "\"index\"", ".", "format", "(", "self", ".", "window", ")", ")" ]
Validate & return window frequency.
[ "Validate", "&", "return", "window", "frequency", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/window.py#L1611-L1621
19,517
pandas-dev/pandas
pandas/core/window.py
Expanding._get_window
def _get_window(self, other=None): """ Get the window length over which to perform some operation. Parameters ---------- other : object, default None The other object that is involved in the operation. Such an object is involved for operations like covariance. Returns ------- window : int The window length. """ axis = self.obj._get_axis(self.axis) length = len(axis) + (other is not None) * len(axis) other = self.min_periods or -1 return max(length, other)
python
def _get_window(self, other=None): """ Get the window length over which to perform some operation. Parameters ---------- other : object, default None The other object that is involved in the operation. Such an object is involved for operations like covariance. Returns ------- window : int The window length. """ axis = self.obj._get_axis(self.axis) length = len(axis) + (other is not None) * len(axis) other = self.min_periods or -1 return max(length, other)
[ "def", "_get_window", "(", "self", ",", "other", "=", "None", ")", ":", "axis", "=", "self", ".", "obj", ".", "_get_axis", "(", "self", ".", "axis", ")", "length", "=", "len", "(", "axis", ")", "+", "(", "other", "is", "not", "None", ")", "*", "len", "(", "axis", ")", "other", "=", "self", ".", "min_periods", "or", "-", "1", "return", "max", "(", "length", ",", "other", ")" ]
Get the window length over which to perform some operation. Parameters ---------- other : object, default None The other object that is involved in the operation. Such an object is involved for operations like covariance. Returns ------- window : int The window length.
[ "Get", "the", "window", "length", "over", "which", "to", "perform", "some", "operation", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/window.py#L1888-L1907
19,518
pandas-dev/pandas
pandas/core/window.py
EWM._apply
def _apply(self, func, **kwargs): """ Rolling statistical measure using supplied function. Designed to be used with passed-in Cython array-based functions. Parameters ---------- func : str/callable to apply Returns ------- y : same type as input argument """ blocks, obj, index = self._create_blocks() results = [] for b in blocks: try: values = self._prep_values(b.values) except TypeError: results.append(b.values.copy()) continue if values.size == 0: results.append(values.copy()) continue # if we have a string function name, wrap it if isinstance(func, str): cfunc = getattr(libwindow, func, None) if cfunc is None: raise ValueError("we do not support this function " "in libwindow.{func}".format(func=func)) def func(arg): return cfunc(arg, self.com, int(self.adjust), int(self.ignore_na), int(self.min_periods)) results.append(np.apply_along_axis(func, self.axis, values)) return self._wrap_results(results, blocks, obj)
python
def _apply(self, func, **kwargs): """ Rolling statistical measure using supplied function. Designed to be used with passed-in Cython array-based functions. Parameters ---------- func : str/callable to apply Returns ------- y : same type as input argument """ blocks, obj, index = self._create_blocks() results = [] for b in blocks: try: values = self._prep_values(b.values) except TypeError: results.append(b.values.copy()) continue if values.size == 0: results.append(values.copy()) continue # if we have a string function name, wrap it if isinstance(func, str): cfunc = getattr(libwindow, func, None) if cfunc is None: raise ValueError("we do not support this function " "in libwindow.{func}".format(func=func)) def func(arg): return cfunc(arg, self.com, int(self.adjust), int(self.ignore_na), int(self.min_periods)) results.append(np.apply_along_axis(func, self.axis, values)) return self._wrap_results(results, blocks, obj)
[ "def", "_apply", "(", "self", ",", "func", ",", "*", "*", "kwargs", ")", ":", "blocks", ",", "obj", ",", "index", "=", "self", ".", "_create_blocks", "(", ")", "results", "=", "[", "]", "for", "b", "in", "blocks", ":", "try", ":", "values", "=", "self", ".", "_prep_values", "(", "b", ".", "values", ")", "except", "TypeError", ":", "results", ".", "append", "(", "b", ".", "values", ".", "copy", "(", ")", ")", "continue", "if", "values", ".", "size", "==", "0", ":", "results", ".", "append", "(", "values", ".", "copy", "(", ")", ")", "continue", "# if we have a string function name, wrap it", "if", "isinstance", "(", "func", ",", "str", ")", ":", "cfunc", "=", "getattr", "(", "libwindow", ",", "func", ",", "None", ")", "if", "cfunc", "is", "None", ":", "raise", "ValueError", "(", "\"we do not support this function \"", "\"in libwindow.{func}\"", ".", "format", "(", "func", "=", "func", ")", ")", "def", "func", "(", "arg", ")", ":", "return", "cfunc", "(", "arg", ",", "self", ".", "com", ",", "int", "(", "self", ".", "adjust", ")", ",", "int", "(", "self", ".", "ignore_na", ")", ",", "int", "(", "self", ".", "min_periods", ")", ")", "results", ".", "append", "(", "np", ".", "apply_along_axis", "(", "func", ",", "self", ".", "axis", ",", "values", ")", ")", "return", "self", ".", "_wrap_results", "(", "results", ",", "blocks", ",", "obj", ")" ]
Rolling statistical measure using supplied function. Designed to be used with passed-in Cython array-based functions. Parameters ---------- func : str/callable to apply Returns ------- y : same type as input argument
[ "Rolling", "statistical", "measure", "using", "supplied", "function", ".", "Designed", "to", "be", "used", "with", "passed", "-", "in", "Cython", "array", "-", "based", "functions", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/window.py#L2269-L2308
19,519
pandas-dev/pandas
pandas/core/window.py
EWM.mean
def mean(self, *args, **kwargs): """ Exponential weighted moving average. Parameters ---------- *args, **kwargs Arguments and keyword arguments to be passed into func. """ nv.validate_window_func('mean', args, kwargs) return self._apply('ewma', **kwargs)
python
def mean(self, *args, **kwargs): """ Exponential weighted moving average. Parameters ---------- *args, **kwargs Arguments and keyword arguments to be passed into func. """ nv.validate_window_func('mean', args, kwargs) return self._apply('ewma', **kwargs)
[ "def", "mean", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "nv", ".", "validate_window_func", "(", "'mean'", ",", "args", ",", "kwargs", ")", "return", "self", ".", "_apply", "(", "'ewma'", ",", "*", "*", "kwargs", ")" ]
Exponential weighted moving average. Parameters ---------- *args, **kwargs Arguments and keyword arguments to be passed into func.
[ "Exponential", "weighted", "moving", "average", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/window.py#L2312-L2322
19,520
pandas-dev/pandas
pandas/core/window.py
EWM.std
def std(self, bias=False, *args, **kwargs): """ Exponential weighted moving stddev. """ nv.validate_window_func('std', args, kwargs) return _zsqrt(self.var(bias=bias, **kwargs))
python
def std(self, bias=False, *args, **kwargs): """ Exponential weighted moving stddev. """ nv.validate_window_func('std', args, kwargs) return _zsqrt(self.var(bias=bias, **kwargs))
[ "def", "std", "(", "self", ",", "bias", "=", "False", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "nv", ".", "validate_window_func", "(", "'std'", ",", "args", ",", "kwargs", ")", "return", "_zsqrt", "(", "self", ".", "var", "(", "bias", "=", "bias", ",", "*", "*", "kwargs", ")", ")" ]
Exponential weighted moving stddev.
[ "Exponential", "weighted", "moving", "stddev", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/window.py#L2327-L2332
19,521
pandas-dev/pandas
pandas/core/window.py
EWM.var
def var(self, bias=False, *args, **kwargs): """ Exponential weighted moving variance. """ nv.validate_window_func('var', args, kwargs) def f(arg): return libwindow.ewmcov(arg, arg, self.com, int(self.adjust), int(self.ignore_na), int(self.min_periods), int(bias)) return self._apply(f, **kwargs)
python
def var(self, bias=False, *args, **kwargs): """ Exponential weighted moving variance. """ nv.validate_window_func('var', args, kwargs) def f(arg): return libwindow.ewmcov(arg, arg, self.com, int(self.adjust), int(self.ignore_na), int(self.min_periods), int(bias)) return self._apply(f, **kwargs)
[ "def", "var", "(", "self", ",", "bias", "=", "False", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "nv", ".", "validate_window_func", "(", "'var'", ",", "args", ",", "kwargs", ")", "def", "f", "(", "arg", ")", ":", "return", "libwindow", ".", "ewmcov", "(", "arg", ",", "arg", ",", "self", ".", "com", ",", "int", "(", "self", ".", "adjust", ")", ",", "int", "(", "self", ".", "ignore_na", ")", ",", "int", "(", "self", ".", "min_periods", ")", ",", "int", "(", "bias", ")", ")", "return", "self", ".", "_apply", "(", "f", ",", "*", "*", "kwargs", ")" ]
Exponential weighted moving variance.
[ "Exponential", "weighted", "moving", "variance", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/window.py#L2339-L2350
19,522
pandas-dev/pandas
pandas/core/window.py
EWM.cov
def cov(self, other=None, pairwise=None, bias=False, **kwargs): """ Exponential weighted sample covariance. """ if other is None: other = self._selected_obj # only default unset pairwise = True if pairwise is None else pairwise other = self._shallow_copy(other) def _get_cov(X, Y): X = self._shallow_copy(X) Y = self._shallow_copy(Y) cov = libwindow.ewmcov(X._prep_values(), Y._prep_values(), self.com, int(self.adjust), int(self.ignore_na), int(self.min_periods), int(bias)) return X._wrap_result(cov) return _flex_binary_moment(self._selected_obj, other._selected_obj, _get_cov, pairwise=bool(pairwise))
python
def cov(self, other=None, pairwise=None, bias=False, **kwargs): """ Exponential weighted sample covariance. """ if other is None: other = self._selected_obj # only default unset pairwise = True if pairwise is None else pairwise other = self._shallow_copy(other) def _get_cov(X, Y): X = self._shallow_copy(X) Y = self._shallow_copy(Y) cov = libwindow.ewmcov(X._prep_values(), Y._prep_values(), self.com, int(self.adjust), int(self.ignore_na), int(self.min_periods), int(bias)) return X._wrap_result(cov) return _flex_binary_moment(self._selected_obj, other._selected_obj, _get_cov, pairwise=bool(pairwise))
[ "def", "cov", "(", "self", ",", "other", "=", "None", ",", "pairwise", "=", "None", ",", "bias", "=", "False", ",", "*", "*", "kwargs", ")", ":", "if", "other", "is", "None", ":", "other", "=", "self", ".", "_selected_obj", "# only default unset", "pairwise", "=", "True", "if", "pairwise", "is", "None", "else", "pairwise", "other", "=", "self", ".", "_shallow_copy", "(", "other", ")", "def", "_get_cov", "(", "X", ",", "Y", ")", ":", "X", "=", "self", ".", "_shallow_copy", "(", "X", ")", "Y", "=", "self", ".", "_shallow_copy", "(", "Y", ")", "cov", "=", "libwindow", ".", "ewmcov", "(", "X", ".", "_prep_values", "(", ")", ",", "Y", ".", "_prep_values", "(", ")", ",", "self", ".", "com", ",", "int", "(", "self", ".", "adjust", ")", ",", "int", "(", "self", ".", "ignore_na", ")", ",", "int", "(", "self", ".", "min_periods", ")", ",", "int", "(", "bias", ")", ")", "return", "X", ".", "_wrap_result", "(", "cov", ")", "return", "_flex_binary_moment", "(", "self", ".", "_selected_obj", ",", "other", ".", "_selected_obj", ",", "_get_cov", ",", "pairwise", "=", "bool", "(", "pairwise", ")", ")" ]
Exponential weighted sample covariance.
[ "Exponential", "weighted", "sample", "covariance", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/window.py#L2355-L2375
19,523
pandas-dev/pandas
pandas/core/window.py
EWM.corr
def corr(self, other=None, pairwise=None, **kwargs): """ Exponential weighted sample correlation. """ if other is None: other = self._selected_obj # only default unset pairwise = True if pairwise is None else pairwise other = self._shallow_copy(other) def _get_corr(X, Y): X = self._shallow_copy(X) Y = self._shallow_copy(Y) def _cov(x, y): return libwindow.ewmcov(x, y, self.com, int(self.adjust), int(self.ignore_na), int(self.min_periods), 1) x_values = X._prep_values() y_values = Y._prep_values() with np.errstate(all='ignore'): cov = _cov(x_values, y_values) x_var = _cov(x_values, x_values) y_var = _cov(y_values, y_values) corr = cov / _zsqrt(x_var * y_var) return X._wrap_result(corr) return _flex_binary_moment(self._selected_obj, other._selected_obj, _get_corr, pairwise=bool(pairwise))
python
def corr(self, other=None, pairwise=None, **kwargs): """ Exponential weighted sample correlation. """ if other is None: other = self._selected_obj # only default unset pairwise = True if pairwise is None else pairwise other = self._shallow_copy(other) def _get_corr(X, Y): X = self._shallow_copy(X) Y = self._shallow_copy(Y) def _cov(x, y): return libwindow.ewmcov(x, y, self.com, int(self.adjust), int(self.ignore_na), int(self.min_periods), 1) x_values = X._prep_values() y_values = Y._prep_values() with np.errstate(all='ignore'): cov = _cov(x_values, y_values) x_var = _cov(x_values, x_values) y_var = _cov(y_values, y_values) corr = cov / _zsqrt(x_var * y_var) return X._wrap_result(corr) return _flex_binary_moment(self._selected_obj, other._selected_obj, _get_corr, pairwise=bool(pairwise))
[ "def", "corr", "(", "self", ",", "other", "=", "None", ",", "pairwise", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "other", "is", "None", ":", "other", "=", "self", ".", "_selected_obj", "# only default unset", "pairwise", "=", "True", "if", "pairwise", "is", "None", "else", "pairwise", "other", "=", "self", ".", "_shallow_copy", "(", "other", ")", "def", "_get_corr", "(", "X", ",", "Y", ")", ":", "X", "=", "self", ".", "_shallow_copy", "(", "X", ")", "Y", "=", "self", ".", "_shallow_copy", "(", "Y", ")", "def", "_cov", "(", "x", ",", "y", ")", ":", "return", "libwindow", ".", "ewmcov", "(", "x", ",", "y", ",", "self", ".", "com", ",", "int", "(", "self", ".", "adjust", ")", ",", "int", "(", "self", ".", "ignore_na", ")", ",", "int", "(", "self", ".", "min_periods", ")", ",", "1", ")", "x_values", "=", "X", ".", "_prep_values", "(", ")", "y_values", "=", "Y", ".", "_prep_values", "(", ")", "with", "np", ".", "errstate", "(", "all", "=", "'ignore'", ")", ":", "cov", "=", "_cov", "(", "x_values", ",", "y_values", ")", "x_var", "=", "_cov", "(", "x_values", ",", "x_values", ")", "y_var", "=", "_cov", "(", "y_values", ",", "y_values", ")", "corr", "=", "cov", "/", "_zsqrt", "(", "x_var", "*", "y_var", ")", "return", "X", ".", "_wrap_result", "(", "corr", ")", "return", "_flex_binary_moment", "(", "self", ".", "_selected_obj", ",", "other", ".", "_selected_obj", ",", "_get_corr", ",", "pairwise", "=", "bool", "(", "pairwise", ")", ")" ]
Exponential weighted sample correlation.
[ "Exponential", "weighted", "sample", "correlation", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/window.py#L2380-L2410
19,524
pandas-dev/pandas
pandas/core/panel.py
_ensure_like_indices
def _ensure_like_indices(time, panels): """ Makes sure that time and panels are conformable. """ n_time = len(time) n_panel = len(panels) u_panels = np.unique(panels) # this sorts! u_time = np.unique(time) if len(u_time) == n_time: time = np.tile(u_time, len(u_panels)) if len(u_panels) == n_panel: panels = np.repeat(u_panels, len(u_time)) return time, panels
python
def _ensure_like_indices(time, panels): """ Makes sure that time and panels are conformable. """ n_time = len(time) n_panel = len(panels) u_panels = np.unique(panels) # this sorts! u_time = np.unique(time) if len(u_time) == n_time: time = np.tile(u_time, len(u_panels)) if len(u_panels) == n_panel: panels = np.repeat(u_panels, len(u_time)) return time, panels
[ "def", "_ensure_like_indices", "(", "time", ",", "panels", ")", ":", "n_time", "=", "len", "(", "time", ")", "n_panel", "=", "len", "(", "panels", ")", "u_panels", "=", "np", ".", "unique", "(", "panels", ")", "# this sorts!", "u_time", "=", "np", ".", "unique", "(", "time", ")", "if", "len", "(", "u_time", ")", "==", "n_time", ":", "time", "=", "np", ".", "tile", "(", "u_time", ",", "len", "(", "u_panels", ")", ")", "if", "len", "(", "u_panels", ")", "==", "n_panel", ":", "panels", "=", "np", ".", "repeat", "(", "u_panels", ",", "len", "(", "u_time", ")", ")", "return", "time", ",", "panels" ]
Makes sure that time and panels are conformable.
[ "Makes", "sure", "that", "time", "and", "panels", "are", "conformable", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/panel.py#L45-L57
19,525
pandas-dev/pandas
pandas/core/panel.py
panel_index
def panel_index(time, panels, names=None): """ Returns a multi-index suitable for a panel-like DataFrame. Parameters ---------- time : array-like Time index, does not have to repeat panels : array-like Panel index, does not have to repeat names : list, optional List containing the names of the indices Returns ------- multi_index : MultiIndex Time index is the first level, the panels are the second level. Examples -------- >>> years = range(1960,1963) >>> panels = ['A', 'B', 'C'] >>> panel_idx = panel_index(years, panels) >>> panel_idx MultiIndex([(1960, 'A'), (1961, 'A'), (1962, 'A'), (1960, 'B'), (1961, 'B'), (1962, 'B'), (1960, 'C'), (1961, 'C'), (1962, 'C')], dtype=object) or >>> years = np.repeat(range(1960,1963), 3) >>> panels = np.tile(['A', 'B', 'C'], 3) >>> panel_idx = panel_index(years, panels) >>> panel_idx MultiIndex([(1960, 'A'), (1960, 'B'), (1960, 'C'), (1961, 'A'), (1961, 'B'), (1961, 'C'), (1962, 'A'), (1962, 'B'), (1962, 'C')], dtype=object) """ if names is None: names = ['time', 'panel'] time, panels = _ensure_like_indices(time, panels) return MultiIndex.from_arrays([time, panels], sortorder=None, names=names)
python
def panel_index(time, panels, names=None): """ Returns a multi-index suitable for a panel-like DataFrame. Parameters ---------- time : array-like Time index, does not have to repeat panels : array-like Panel index, does not have to repeat names : list, optional List containing the names of the indices Returns ------- multi_index : MultiIndex Time index is the first level, the panels are the second level. Examples -------- >>> years = range(1960,1963) >>> panels = ['A', 'B', 'C'] >>> panel_idx = panel_index(years, panels) >>> panel_idx MultiIndex([(1960, 'A'), (1961, 'A'), (1962, 'A'), (1960, 'B'), (1961, 'B'), (1962, 'B'), (1960, 'C'), (1961, 'C'), (1962, 'C')], dtype=object) or >>> years = np.repeat(range(1960,1963), 3) >>> panels = np.tile(['A', 'B', 'C'], 3) >>> panel_idx = panel_index(years, panels) >>> panel_idx MultiIndex([(1960, 'A'), (1960, 'B'), (1960, 'C'), (1961, 'A'), (1961, 'B'), (1961, 'C'), (1962, 'A'), (1962, 'B'), (1962, 'C')], dtype=object) """ if names is None: names = ['time', 'panel'] time, panels = _ensure_like_indices(time, panels) return MultiIndex.from_arrays([time, panels], sortorder=None, names=names)
[ "def", "panel_index", "(", "time", ",", "panels", ",", "names", "=", "None", ")", ":", "if", "names", "is", "None", ":", "names", "=", "[", "'time'", ",", "'panel'", "]", "time", ",", "panels", "=", "_ensure_like_indices", "(", "time", ",", "panels", ")", "return", "MultiIndex", ".", "from_arrays", "(", "[", "time", ",", "panels", "]", ",", "sortorder", "=", "None", ",", "names", "=", "names", ")" ]
Returns a multi-index suitable for a panel-like DataFrame. Parameters ---------- time : array-like Time index, does not have to repeat panels : array-like Panel index, does not have to repeat names : list, optional List containing the names of the indices Returns ------- multi_index : MultiIndex Time index is the first level, the panels are the second level. Examples -------- >>> years = range(1960,1963) >>> panels = ['A', 'B', 'C'] >>> panel_idx = panel_index(years, panels) >>> panel_idx MultiIndex([(1960, 'A'), (1961, 'A'), (1962, 'A'), (1960, 'B'), (1961, 'B'), (1962, 'B'), (1960, 'C'), (1961, 'C'), (1962, 'C')], dtype=object) or >>> years = np.repeat(range(1960,1963), 3) >>> panels = np.tile(['A', 'B', 'C'], 3) >>> panel_idx = panel_index(years, panels) >>> panel_idx MultiIndex([(1960, 'A'), (1960, 'B'), (1960, 'C'), (1961, 'A'), (1961, 'B'), (1961, 'C'), (1962, 'A'), (1962, 'B'), (1962, 'C')], dtype=object)
[ "Returns", "a", "multi", "-", "index", "suitable", "for", "a", "panel", "-", "like", "DataFrame", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/panel.py#L60-L101
19,526
pandas-dev/pandas
pandas/core/panel.py
Panel.from_dict
def from_dict(cls, data, intersect=False, orient='items', dtype=None): """ Construct Panel from dict of DataFrame objects. Parameters ---------- data : dict {field : DataFrame} intersect : boolean Intersect indexes of input DataFrames orient : {'items', 'minor'}, default 'items' The "orientation" of the data. If the keys of the passed dict should be the items of the result panel, pass 'items' (default). Otherwise if the columns of the values of the passed DataFrame objects should be the items (which in the case of mixed-dtype data you should do), instead pass 'minor' dtype : dtype, default None Data type to force, otherwise infer Returns ------- Panel """ from collections import defaultdict orient = orient.lower() if orient == 'minor': new_data = defaultdict(OrderedDict) for col, df in data.items(): for item, s in df.items(): new_data[item][col] = s data = new_data elif orient != 'items': # pragma: no cover raise ValueError('Orientation must be one of {items, minor}.') d = cls._homogenize_dict(cls, data, intersect=intersect, dtype=dtype) ks = list(d['data'].keys()) if not isinstance(d['data'], OrderedDict): ks = list(sorted(ks)) d[cls._info_axis_name] = Index(ks) return cls(**d)
python
def from_dict(cls, data, intersect=False, orient='items', dtype=None): """ Construct Panel from dict of DataFrame objects. Parameters ---------- data : dict {field : DataFrame} intersect : boolean Intersect indexes of input DataFrames orient : {'items', 'minor'}, default 'items' The "orientation" of the data. If the keys of the passed dict should be the items of the result panel, pass 'items' (default). Otherwise if the columns of the values of the passed DataFrame objects should be the items (which in the case of mixed-dtype data you should do), instead pass 'minor' dtype : dtype, default None Data type to force, otherwise infer Returns ------- Panel """ from collections import defaultdict orient = orient.lower() if orient == 'minor': new_data = defaultdict(OrderedDict) for col, df in data.items(): for item, s in df.items(): new_data[item][col] = s data = new_data elif orient != 'items': # pragma: no cover raise ValueError('Orientation must be one of {items, minor}.') d = cls._homogenize_dict(cls, data, intersect=intersect, dtype=dtype) ks = list(d['data'].keys()) if not isinstance(d['data'], OrderedDict): ks = list(sorted(ks)) d[cls._info_axis_name] = Index(ks) return cls(**d)
[ "def", "from_dict", "(", "cls", ",", "data", ",", "intersect", "=", "False", ",", "orient", "=", "'items'", ",", "dtype", "=", "None", ")", ":", "from", "collections", "import", "defaultdict", "orient", "=", "orient", ".", "lower", "(", ")", "if", "orient", "==", "'minor'", ":", "new_data", "=", "defaultdict", "(", "OrderedDict", ")", "for", "col", ",", "df", "in", "data", ".", "items", "(", ")", ":", "for", "item", ",", "s", "in", "df", ".", "items", "(", ")", ":", "new_data", "[", "item", "]", "[", "col", "]", "=", "s", "data", "=", "new_data", "elif", "orient", "!=", "'items'", ":", "# pragma: no cover", "raise", "ValueError", "(", "'Orientation must be one of {items, minor}.'", ")", "d", "=", "cls", ".", "_homogenize_dict", "(", "cls", ",", "data", ",", "intersect", "=", "intersect", ",", "dtype", "=", "dtype", ")", "ks", "=", "list", "(", "d", "[", "'data'", "]", ".", "keys", "(", ")", ")", "if", "not", "isinstance", "(", "d", "[", "'data'", "]", ",", "OrderedDict", ")", ":", "ks", "=", "list", "(", "sorted", "(", "ks", ")", ")", "d", "[", "cls", ".", "_info_axis_name", "]", "=", "Index", "(", "ks", ")", "return", "cls", "(", "*", "*", "d", ")" ]
Construct Panel from dict of DataFrame objects. Parameters ---------- data : dict {field : DataFrame} intersect : boolean Intersect indexes of input DataFrames orient : {'items', 'minor'}, default 'items' The "orientation" of the data. If the keys of the passed dict should be the items of the result panel, pass 'items' (default). Otherwise if the columns of the values of the passed DataFrame objects should be the items (which in the case of mixed-dtype data you should do), instead pass 'minor' dtype : dtype, default None Data type to force, otherwise infer Returns ------- Panel
[ "Construct", "Panel", "from", "dict", "of", "DataFrame", "objects", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/panel.py#L239-L279
19,527
pandas-dev/pandas
pandas/core/panel.py
Panel.to_excel
def to_excel(self, path, na_rep='', engine=None, **kwargs): """ Write each DataFrame in Panel to a separate excel sheet. Parameters ---------- path : string or ExcelWriter object File path or existing ExcelWriter na_rep : string, default '' Missing data representation engine : string, default None write engine to use - you can also set this via the options ``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and ``io.excel.xlsm.writer``. Other Parameters ---------------- float_format : string, default None Format string for floating point numbers cols : sequence, optional Columns to write header : boolean or list of string, default True Write out column names. If a list of string is given it is assumed to be aliases for the column names index : boolean, default True Write row names (index) index_label : string or sequence, default None Column label for index column(s) if desired. If None is given, and `header` and `index` are True, then the index names are used. A sequence should be given if the DataFrame uses MultiIndex. startrow : upper left cell row to dump data frame startcol : upper left cell column to dump data frame Notes ----- Keyword arguments (and na_rep) are passed to the ``to_excel`` method for each DataFrame written. """ from pandas.io.excel import ExcelWriter if isinstance(path, str): writer = ExcelWriter(path, engine=engine) else: writer = path kwargs['na_rep'] = na_rep for item, df in self.iteritems(): name = str(item) df.to_excel(writer, name, **kwargs) writer.save()
python
def to_excel(self, path, na_rep='', engine=None, **kwargs): """ Write each DataFrame in Panel to a separate excel sheet. Parameters ---------- path : string or ExcelWriter object File path or existing ExcelWriter na_rep : string, default '' Missing data representation engine : string, default None write engine to use - you can also set this via the options ``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and ``io.excel.xlsm.writer``. Other Parameters ---------------- float_format : string, default None Format string for floating point numbers cols : sequence, optional Columns to write header : boolean or list of string, default True Write out column names. If a list of string is given it is assumed to be aliases for the column names index : boolean, default True Write row names (index) index_label : string or sequence, default None Column label for index column(s) if desired. If None is given, and `header` and `index` are True, then the index names are used. A sequence should be given if the DataFrame uses MultiIndex. startrow : upper left cell row to dump data frame startcol : upper left cell column to dump data frame Notes ----- Keyword arguments (and na_rep) are passed to the ``to_excel`` method for each DataFrame written. """ from pandas.io.excel import ExcelWriter if isinstance(path, str): writer = ExcelWriter(path, engine=engine) else: writer = path kwargs['na_rep'] = na_rep for item, df in self.iteritems(): name = str(item) df.to_excel(writer, name, **kwargs) writer.save()
[ "def", "to_excel", "(", "self", ",", "path", ",", "na_rep", "=", "''", ",", "engine", "=", "None", ",", "*", "*", "kwargs", ")", ":", "from", "pandas", ".", "io", ".", "excel", "import", "ExcelWriter", "if", "isinstance", "(", "path", ",", "str", ")", ":", "writer", "=", "ExcelWriter", "(", "path", ",", "engine", "=", "engine", ")", "else", ":", "writer", "=", "path", "kwargs", "[", "'na_rep'", "]", "=", "na_rep", "for", "item", ",", "df", "in", "self", ".", "iteritems", "(", ")", ":", "name", "=", "str", "(", "item", ")", "df", ".", "to_excel", "(", "writer", ",", "name", ",", "*", "*", "kwargs", ")", "writer", ".", "save", "(", ")" ]
Write each DataFrame in Panel to a separate excel sheet. Parameters ---------- path : string or ExcelWriter object File path or existing ExcelWriter na_rep : string, default '' Missing data representation engine : string, default None write engine to use - you can also set this via the options ``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and ``io.excel.xlsm.writer``. Other Parameters ---------------- float_format : string, default None Format string for floating point numbers cols : sequence, optional Columns to write header : boolean or list of string, default True Write out column names. If a list of string is given it is assumed to be aliases for the column names index : boolean, default True Write row names (index) index_label : string or sequence, default None Column label for index column(s) if desired. If None is given, and `header` and `index` are True, then the index names are used. A sequence should be given if the DataFrame uses MultiIndex. startrow : upper left cell row to dump data frame startcol : upper left cell column to dump data frame Notes ----- Keyword arguments (and na_rep) are passed to the ``to_excel`` method for each DataFrame written.
[ "Write", "each", "DataFrame", "in", "Panel", "to", "a", "separate", "excel", "sheet", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/panel.py#L409-L458
19,528
pandas-dev/pandas
pandas/core/panel.py
Panel._unpickle_panel_compat
def _unpickle_panel_compat(self, state): # pragma: no cover """ Unpickle the panel. """ from pandas.io.pickle import _unpickle_array _unpickle = _unpickle_array vals, items, major, minor = state items = _unpickle(items) major = _unpickle(major) minor = _unpickle(minor) values = _unpickle(vals) wp = Panel(values, items, major, minor) self._data = wp._data
python
def _unpickle_panel_compat(self, state): # pragma: no cover """ Unpickle the panel. """ from pandas.io.pickle import _unpickle_array _unpickle = _unpickle_array vals, items, major, minor = state items = _unpickle(items) major = _unpickle(major) minor = _unpickle(minor) values = _unpickle(vals) wp = Panel(values, items, major, minor) self._data = wp._data
[ "def", "_unpickle_panel_compat", "(", "self", ",", "state", ")", ":", "# pragma: no cover", "from", "pandas", ".", "io", ".", "pickle", "import", "_unpickle_array", "_unpickle", "=", "_unpickle_array", "vals", ",", "items", ",", "major", ",", "minor", "=", "state", "items", "=", "_unpickle", "(", "items", ")", "major", "=", "_unpickle", "(", "major", ")", "minor", "=", "_unpickle", "(", "minor", ")", "values", "=", "_unpickle", "(", "vals", ")", "wp", "=", "Panel", "(", "values", ",", "items", ",", "major", ",", "minor", ")", "self", ".", "_data", "=", "wp", ".", "_data" ]
Unpickle the panel.
[ "Unpickle", "the", "panel", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/panel.py#L615-L629
19,529
pandas-dev/pandas
pandas/core/panel.py
Panel.conform
def conform(self, frame, axis='items'): """ Conform input DataFrame to align with chosen axis pair. Parameters ---------- frame : DataFrame axis : {'items', 'major', 'minor'} Axis the input corresponds to. E.g., if axis='major', then the frame's columns would be items, and the index would be values of the minor axis Returns ------- DataFrame """ axes = self._get_plane_axes(axis) return frame.reindex(**self._extract_axes_for_slice(self, axes))
python
def conform(self, frame, axis='items'): """ Conform input DataFrame to align with chosen axis pair. Parameters ---------- frame : DataFrame axis : {'items', 'major', 'minor'} Axis the input corresponds to. E.g., if axis='major', then the frame's columns would be items, and the index would be values of the minor axis Returns ------- DataFrame """ axes = self._get_plane_axes(axis) return frame.reindex(**self._extract_axes_for_slice(self, axes))
[ "def", "conform", "(", "self", ",", "frame", ",", "axis", "=", "'items'", ")", ":", "axes", "=", "self", ".", "_get_plane_axes", "(", "axis", ")", "return", "frame", ".", "reindex", "(", "*", "*", "self", ".", "_extract_axes_for_slice", "(", "self", ",", "axes", ")", ")" ]
Conform input DataFrame to align with chosen axis pair. Parameters ---------- frame : DataFrame axis : {'items', 'major', 'minor'} Axis the input corresponds to. E.g., if axis='major', then the frame's columns would be items, and the index would be values of the minor axis Returns ------- DataFrame
[ "Conform", "input", "DataFrame", "to", "align", "with", "chosen", "axis", "pair", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/panel.py#L631-L649
19,530
pandas-dev/pandas
pandas/core/panel.py
Panel.round
def round(self, decimals=0, *args, **kwargs): """ Round each value in Panel to a specified number of decimal places. .. versionadded:: 0.18.0 Parameters ---------- decimals : int Number of decimal places to round to (default: 0). If decimals is negative, it specifies the number of positions to the left of the decimal point. Returns ------- Panel object See Also -------- numpy.around """ nv.validate_round(args, kwargs) if is_integer(decimals): result = np.apply_along_axis(np.round, 0, self.values) return self._wrap_result(result, axis=0) raise TypeError("decimals must be an integer")
python
def round(self, decimals=0, *args, **kwargs): """ Round each value in Panel to a specified number of decimal places. .. versionadded:: 0.18.0 Parameters ---------- decimals : int Number of decimal places to round to (default: 0). If decimals is negative, it specifies the number of positions to the left of the decimal point. Returns ------- Panel object See Also -------- numpy.around """ nv.validate_round(args, kwargs) if is_integer(decimals): result = np.apply_along_axis(np.round, 0, self.values) return self._wrap_result(result, axis=0) raise TypeError("decimals must be an integer")
[ "def", "round", "(", "self", ",", "decimals", "=", "0", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "nv", ".", "validate_round", "(", "args", ",", "kwargs", ")", "if", "is_integer", "(", "decimals", ")", ":", "result", "=", "np", ".", "apply_along_axis", "(", "np", ".", "round", ",", "0", ",", "self", ".", "values", ")", "return", "self", ".", "_wrap_result", "(", "result", ",", "axis", "=", "0", ")", "raise", "TypeError", "(", "\"decimals must be an integer\"", ")" ]
Round each value in Panel to a specified number of decimal places. .. versionadded:: 0.18.0 Parameters ---------- decimals : int Number of decimal places to round to (default: 0). If decimals is negative, it specifies the number of positions to the left of the decimal point. Returns ------- Panel object See Also -------- numpy.around
[ "Round", "each", "value", "in", "Panel", "to", "a", "specified", "number", "of", "decimal", "places", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/panel.py#L657-L683
19,531
pandas-dev/pandas
pandas/core/panel.py
Panel.dropna
def dropna(self, axis=0, how='any', inplace=False): """ Drop 2D from panel, holding passed axis constant. Parameters ---------- axis : int, default 0 Axis to hold constant. E.g. axis=1 will drop major_axis entries having a certain amount of NA data how : {'all', 'any'}, default 'any' 'any': one or more values are NA in the DataFrame along the axis. For 'all' they all must be. inplace : bool, default False If True, do operation inplace and return None. Returns ------- dropped : Panel """ axis = self._get_axis_number(axis) values = self.values mask = notna(values) for ax in reversed(sorted(set(range(self._AXIS_LEN)) - {axis})): mask = mask.sum(ax) per_slice = np.prod(values.shape[:axis] + values.shape[axis + 1:]) if how == 'all': cond = mask > 0 else: cond = mask == per_slice new_ax = self._get_axis(axis)[cond] result = self.reindex_axis(new_ax, axis=axis) if inplace: self._update_inplace(result) else: return result
python
def dropna(self, axis=0, how='any', inplace=False): """ Drop 2D from panel, holding passed axis constant. Parameters ---------- axis : int, default 0 Axis to hold constant. E.g. axis=1 will drop major_axis entries having a certain amount of NA data how : {'all', 'any'}, default 'any' 'any': one or more values are NA in the DataFrame along the axis. For 'all' they all must be. inplace : bool, default False If True, do operation inplace and return None. Returns ------- dropped : Panel """ axis = self._get_axis_number(axis) values = self.values mask = notna(values) for ax in reversed(sorted(set(range(self._AXIS_LEN)) - {axis})): mask = mask.sum(ax) per_slice = np.prod(values.shape[:axis] + values.shape[axis + 1:]) if how == 'all': cond = mask > 0 else: cond = mask == per_slice new_ax = self._get_axis(axis)[cond] result = self.reindex_axis(new_ax, axis=axis) if inplace: self._update_inplace(result) else: return result
[ "def", "dropna", "(", "self", ",", "axis", "=", "0", ",", "how", "=", "'any'", ",", "inplace", "=", "False", ")", ":", "axis", "=", "self", ".", "_get_axis_number", "(", "axis", ")", "values", "=", "self", ".", "values", "mask", "=", "notna", "(", "values", ")", "for", "ax", "in", "reversed", "(", "sorted", "(", "set", "(", "range", "(", "self", ".", "_AXIS_LEN", ")", ")", "-", "{", "axis", "}", ")", ")", ":", "mask", "=", "mask", ".", "sum", "(", "ax", ")", "per_slice", "=", "np", ".", "prod", "(", "values", ".", "shape", "[", ":", "axis", "]", "+", "values", ".", "shape", "[", "axis", "+", "1", ":", "]", ")", "if", "how", "==", "'all'", ":", "cond", "=", "mask", ">", "0", "else", ":", "cond", "=", "mask", "==", "per_slice", "new_ax", "=", "self", ".", "_get_axis", "(", "axis", ")", "[", "cond", "]", "result", "=", "self", ".", "reindex_axis", "(", "new_ax", ",", "axis", "=", "axis", ")", "if", "inplace", ":", "self", ".", "_update_inplace", "(", "result", ")", "else", ":", "return", "result" ]
Drop 2D from panel, holding passed axis constant. Parameters ---------- axis : int, default 0 Axis to hold constant. E.g. axis=1 will drop major_axis entries having a certain amount of NA data how : {'all', 'any'}, default 'any' 'any': one or more values are NA in the DataFrame along the axis. For 'all' they all must be. inplace : bool, default False If True, do operation inplace and return None. Returns ------- dropped : Panel
[ "Drop", "2D", "from", "panel", "holding", "passed", "axis", "constant", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/panel.py#L694-L733
19,532
pandas-dev/pandas
pandas/core/panel.py
Panel.xs
def xs(self, key, axis=1): """ Return slice of panel along selected axis. Parameters ---------- key : object Label axis : {'items', 'major', 'minor}, default 1/'major' Returns ------- y : ndim(self)-1 Notes ----- xs is only for getting, not setting values. MultiIndex Slicers is a generic way to get/set values on any level or levels and is a superset of xs functionality, see :ref:`MultiIndex Slicers <advanced.mi_slicers>` """ axis = self._get_axis_number(axis) if axis == 0: return self[key] self._consolidate_inplace() axis_number = self._get_axis_number(axis) new_data = self._data.xs(key, axis=axis_number, copy=False) result = self._construct_return_type(new_data) copy = new_data.is_mixed_type result._set_is_copy(self, copy=copy) return result
python
def xs(self, key, axis=1): """ Return slice of panel along selected axis. Parameters ---------- key : object Label axis : {'items', 'major', 'minor}, default 1/'major' Returns ------- y : ndim(self)-1 Notes ----- xs is only for getting, not setting values. MultiIndex Slicers is a generic way to get/set values on any level or levels and is a superset of xs functionality, see :ref:`MultiIndex Slicers <advanced.mi_slicers>` """ axis = self._get_axis_number(axis) if axis == 0: return self[key] self._consolidate_inplace() axis_number = self._get_axis_number(axis) new_data = self._data.xs(key, axis=axis_number, copy=False) result = self._construct_return_type(new_data) copy = new_data.is_mixed_type result._set_is_copy(self, copy=copy) return result
[ "def", "xs", "(", "self", ",", "key", ",", "axis", "=", "1", ")", ":", "axis", "=", "self", ".", "_get_axis_number", "(", "axis", ")", "if", "axis", "==", "0", ":", "return", "self", "[", "key", "]", "self", ".", "_consolidate_inplace", "(", ")", "axis_number", "=", "self", ".", "_get_axis_number", "(", "axis", ")", "new_data", "=", "self", ".", "_data", ".", "xs", "(", "key", ",", "axis", "=", "axis_number", ",", "copy", "=", "False", ")", "result", "=", "self", ".", "_construct_return_type", "(", "new_data", ")", "copy", "=", "new_data", ".", "is_mixed_type", "result", ".", "_set_is_copy", "(", "self", ",", "copy", "=", "copy", ")", "return", "result" ]
Return slice of panel along selected axis. Parameters ---------- key : object Label axis : {'items', 'major', 'minor}, default 1/'major' Returns ------- y : ndim(self)-1 Notes ----- xs is only for getting, not setting values. MultiIndex Slicers is a generic way to get/set values on any level or levels and is a superset of xs functionality, see :ref:`MultiIndex Slicers <advanced.mi_slicers>`
[ "Return", "slice", "of", "panel", "along", "selected", "axis", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/panel.py#L834-L866
19,533
pandas-dev/pandas
pandas/core/panel.py
Panel._apply_2d
def _apply_2d(self, func, axis): """ Handle 2-d slices, equiv to iterating over the other axis. """ ndim = self.ndim axis = [self._get_axis_number(a) for a in axis] # construct slabs, in 2-d this is a DataFrame result indexer_axis = list(range(ndim)) for a in axis: indexer_axis.remove(a) indexer_axis = indexer_axis[0] slicer = [slice(None, None)] * ndim ax = self._get_axis(indexer_axis) results = [] for i, e in enumerate(ax): slicer[indexer_axis] = i sliced = self.iloc[tuple(slicer)] obj = func(sliced) results.append((e, obj)) return self._construct_return_type(dict(results))
python
def _apply_2d(self, func, axis): """ Handle 2-d slices, equiv to iterating over the other axis. """ ndim = self.ndim axis = [self._get_axis_number(a) for a in axis] # construct slabs, in 2-d this is a DataFrame result indexer_axis = list(range(ndim)) for a in axis: indexer_axis.remove(a) indexer_axis = indexer_axis[0] slicer = [slice(None, None)] * ndim ax = self._get_axis(indexer_axis) results = [] for i, e in enumerate(ax): slicer[indexer_axis] = i sliced = self.iloc[tuple(slicer)] obj = func(sliced) results.append((e, obj)) return self._construct_return_type(dict(results))
[ "def", "_apply_2d", "(", "self", ",", "func", ",", "axis", ")", ":", "ndim", "=", "self", ".", "ndim", "axis", "=", "[", "self", ".", "_get_axis_number", "(", "a", ")", "for", "a", "in", "axis", "]", "# construct slabs, in 2-d this is a DataFrame result", "indexer_axis", "=", "list", "(", "range", "(", "ndim", ")", ")", "for", "a", "in", "axis", ":", "indexer_axis", ".", "remove", "(", "a", ")", "indexer_axis", "=", "indexer_axis", "[", "0", "]", "slicer", "=", "[", "slice", "(", "None", ",", "None", ")", "]", "*", "ndim", "ax", "=", "self", ".", "_get_axis", "(", "indexer_axis", ")", "results", "=", "[", "]", "for", "i", ",", "e", "in", "enumerate", "(", "ax", ")", ":", "slicer", "[", "indexer_axis", "]", "=", "i", "sliced", "=", "self", ".", "iloc", "[", "tuple", "(", "slicer", ")", "]", "obj", "=", "func", "(", "sliced", ")", "results", ".", "append", "(", "(", "e", ",", "obj", ")", ")", "return", "self", ".", "_construct_return_type", "(", "dict", "(", "results", ")", ")" ]
Handle 2-d slices, equiv to iterating over the other axis.
[ "Handle", "2", "-", "d", "slices", "equiv", "to", "iterating", "over", "the", "other", "axis", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/panel.py#L1115-L1139
19,534
pandas-dev/pandas
pandas/core/panel.py
Panel._construct_return_type
def _construct_return_type(self, result, axes=None): """ Return the type for the ndim of the result. """ ndim = getattr(result, 'ndim', None) # need to assume they are the same if ndim is None: if isinstance(result, dict): ndim = getattr(list(result.values())[0], 'ndim', 0) # have a dict, so top-level is +1 dim if ndim != 0: ndim += 1 # scalar if ndim == 0: return Series(result) # same as self elif self.ndim == ndim: # return the construction dictionary for these axes if axes is None: return self._constructor(result) return self._constructor(result, **self._construct_axes_dict()) # sliced elif self.ndim == ndim + 1: if axes is None: return self._constructor_sliced(result) return self._constructor_sliced( result, **self._extract_axes_for_slice(self, axes)) raise ValueError('invalid _construct_return_type [self->{self}] ' '[result->{result}]'.format(self=self, result=result))
python
def _construct_return_type(self, result, axes=None): """ Return the type for the ndim of the result. """ ndim = getattr(result, 'ndim', None) # need to assume they are the same if ndim is None: if isinstance(result, dict): ndim = getattr(list(result.values())[0], 'ndim', 0) # have a dict, so top-level is +1 dim if ndim != 0: ndim += 1 # scalar if ndim == 0: return Series(result) # same as self elif self.ndim == ndim: # return the construction dictionary for these axes if axes is None: return self._constructor(result) return self._constructor(result, **self._construct_axes_dict()) # sliced elif self.ndim == ndim + 1: if axes is None: return self._constructor_sliced(result) return self._constructor_sliced( result, **self._extract_axes_for_slice(self, axes)) raise ValueError('invalid _construct_return_type [self->{self}] ' '[result->{result}]'.format(self=self, result=result))
[ "def", "_construct_return_type", "(", "self", ",", "result", ",", "axes", "=", "None", ")", ":", "ndim", "=", "getattr", "(", "result", ",", "'ndim'", ",", "None", ")", "# need to assume they are the same", "if", "ndim", "is", "None", ":", "if", "isinstance", "(", "result", ",", "dict", ")", ":", "ndim", "=", "getattr", "(", "list", "(", "result", ".", "values", "(", ")", ")", "[", "0", "]", ",", "'ndim'", ",", "0", ")", "# have a dict, so top-level is +1 dim", "if", "ndim", "!=", "0", ":", "ndim", "+=", "1", "# scalar", "if", "ndim", "==", "0", ":", "return", "Series", "(", "result", ")", "# same as self", "elif", "self", ".", "ndim", "==", "ndim", ":", "# return the construction dictionary for these axes", "if", "axes", "is", "None", ":", "return", "self", ".", "_constructor", "(", "result", ")", "return", "self", ".", "_constructor", "(", "result", ",", "*", "*", "self", ".", "_construct_axes_dict", "(", ")", ")", "# sliced", "elif", "self", ".", "ndim", "==", "ndim", "+", "1", ":", "if", "axes", "is", "None", ":", "return", "self", ".", "_constructor_sliced", "(", "result", ")", "return", "self", ".", "_constructor_sliced", "(", "result", ",", "*", "*", "self", ".", "_extract_axes_for_slice", "(", "self", ",", "axes", ")", ")", "raise", "ValueError", "(", "'invalid _construct_return_type [self->{self}] '", "'[result->{result}]'", ".", "format", "(", "self", "=", "self", ",", "result", "=", "result", ")", ")" ]
Return the type for the ndim of the result.
[ "Return", "the", "type", "for", "the", "ndim", "of", "the", "result", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/panel.py#L1173-L1207
19,535
pandas-dev/pandas
pandas/core/panel.py
Panel.count
def count(self, axis='major'): """ Return number of observations over requested axis. Parameters ---------- axis : {'items', 'major', 'minor'} or {0, 1, 2} Returns ------- count : DataFrame """ i = self._get_axis_number(axis) values = self.values mask = np.isfinite(values) result = mask.sum(axis=i, dtype='int64') return self._wrap_result(result, axis)
python
def count(self, axis='major'): """ Return number of observations over requested axis. Parameters ---------- axis : {'items', 'major', 'minor'} or {0, 1, 2} Returns ------- count : DataFrame """ i = self._get_axis_number(axis) values = self.values mask = np.isfinite(values) result = mask.sum(axis=i, dtype='int64') return self._wrap_result(result, axis)
[ "def", "count", "(", "self", ",", "axis", "=", "'major'", ")", ":", "i", "=", "self", ".", "_get_axis_number", "(", "axis", ")", "values", "=", "self", ".", "values", "mask", "=", "np", ".", "isfinite", "(", "values", ")", "result", "=", "mask", ".", "sum", "(", "axis", "=", "i", ",", "dtype", "=", "'int64'", ")", "return", "self", ".", "_wrap_result", "(", "result", ",", "axis", ")" ]
Return number of observations over requested axis. Parameters ---------- axis : {'items', 'major', 'minor'} or {0, 1, 2} Returns ------- count : DataFrame
[ "Return", "number", "of", "observations", "over", "requested", "axis", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/panel.py#L1288-L1306
19,536
pandas-dev/pandas
pandas/core/panel.py
Panel.shift
def shift(self, periods=1, freq=None, axis='major'): """ Shift index by desired number of periods with an optional time freq. The shifted data will not include the dropped periods and the shifted axis will be smaller than the original. This is different from the behavior of DataFrame.shift() Parameters ---------- periods : int Number of periods to move, can be positive or negative freq : DateOffset, timedelta, or time rule string, optional axis : {'items', 'major', 'minor'} or {0, 1, 2} Returns ------- shifted : Panel """ if freq: return self.tshift(periods, freq, axis=axis) return super().slice_shift(periods, axis=axis)
python
def shift(self, periods=1, freq=None, axis='major'): """ Shift index by desired number of periods with an optional time freq. The shifted data will not include the dropped periods and the shifted axis will be smaller than the original. This is different from the behavior of DataFrame.shift() Parameters ---------- periods : int Number of periods to move, can be positive or negative freq : DateOffset, timedelta, or time rule string, optional axis : {'items', 'major', 'minor'} or {0, 1, 2} Returns ------- shifted : Panel """ if freq: return self.tshift(periods, freq, axis=axis) return super().slice_shift(periods, axis=axis)
[ "def", "shift", "(", "self", ",", "periods", "=", "1", ",", "freq", "=", "None", ",", "axis", "=", "'major'", ")", ":", "if", "freq", ":", "return", "self", ".", "tshift", "(", "periods", ",", "freq", ",", "axis", "=", "axis", ")", "return", "super", "(", ")", ".", "slice_shift", "(", "periods", ",", "axis", "=", "axis", ")" ]
Shift index by desired number of periods with an optional time freq. The shifted data will not include the dropped periods and the shifted axis will be smaller than the original. This is different from the behavior of DataFrame.shift() Parameters ---------- periods : int Number of periods to move, can be positive or negative freq : DateOffset, timedelta, or time rule string, optional axis : {'items', 'major', 'minor'} or {0, 1, 2} Returns ------- shifted : Panel
[ "Shift", "index", "by", "desired", "number", "of", "periods", "with", "an", "optional", "time", "freq", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/panel.py#L1308-L1330
19,537
pandas-dev/pandas
pandas/core/panel.py
Panel.join
def join(self, other, how='left', lsuffix='', rsuffix=''): """ Join items with other Panel either on major and minor axes column. Parameters ---------- other : Panel or list of Panels Index should be similar to one of the columns in this one how : {'left', 'right', 'outer', 'inner'} How to handle indexes of the two objects. Default: 'left' for joining on index, None otherwise * left: use calling frame's index * right: use input frame's index * outer: form union of indexes * inner: use intersection of indexes lsuffix : string Suffix to use from left frame's overlapping columns rsuffix : string Suffix to use from right frame's overlapping columns Returns ------- joined : Panel """ from pandas.core.reshape.concat import concat if isinstance(other, Panel): join_major, join_minor = self._get_join_index(other, how) this = self.reindex(major=join_major, minor=join_minor) other = other.reindex(major=join_major, minor=join_minor) merged_data = this._data.merge(other._data, lsuffix, rsuffix) return self._constructor(merged_data) else: if lsuffix or rsuffix: raise ValueError('Suffixes not supported when passing ' 'multiple panels') if how == 'left': how = 'outer' join_axes = [self.major_axis, self.minor_axis] elif how == 'right': raise ValueError('Right join not supported with multiple ' 'panels') else: join_axes = None return concat([self] + list(other), axis=0, join=how, join_axes=join_axes, verify_integrity=True)
python
def join(self, other, how='left', lsuffix='', rsuffix=''): """ Join items with other Panel either on major and minor axes column. Parameters ---------- other : Panel or list of Panels Index should be similar to one of the columns in this one how : {'left', 'right', 'outer', 'inner'} How to handle indexes of the two objects. Default: 'left' for joining on index, None otherwise * left: use calling frame's index * right: use input frame's index * outer: form union of indexes * inner: use intersection of indexes lsuffix : string Suffix to use from left frame's overlapping columns rsuffix : string Suffix to use from right frame's overlapping columns Returns ------- joined : Panel """ from pandas.core.reshape.concat import concat if isinstance(other, Panel): join_major, join_minor = self._get_join_index(other, how) this = self.reindex(major=join_major, minor=join_minor) other = other.reindex(major=join_major, minor=join_minor) merged_data = this._data.merge(other._data, lsuffix, rsuffix) return self._constructor(merged_data) else: if lsuffix or rsuffix: raise ValueError('Suffixes not supported when passing ' 'multiple panels') if how == 'left': how = 'outer' join_axes = [self.major_axis, self.minor_axis] elif how == 'right': raise ValueError('Right join not supported with multiple ' 'panels') else: join_axes = None return concat([self] + list(other), axis=0, join=how, join_axes=join_axes, verify_integrity=True)
[ "def", "join", "(", "self", ",", "other", ",", "how", "=", "'left'", ",", "lsuffix", "=", "''", ",", "rsuffix", "=", "''", ")", ":", "from", "pandas", ".", "core", ".", "reshape", ".", "concat", "import", "concat", "if", "isinstance", "(", "other", ",", "Panel", ")", ":", "join_major", ",", "join_minor", "=", "self", ".", "_get_join_index", "(", "other", ",", "how", ")", "this", "=", "self", ".", "reindex", "(", "major", "=", "join_major", ",", "minor", "=", "join_minor", ")", "other", "=", "other", ".", "reindex", "(", "major", "=", "join_major", ",", "minor", "=", "join_minor", ")", "merged_data", "=", "this", ".", "_data", ".", "merge", "(", "other", ".", "_data", ",", "lsuffix", ",", "rsuffix", ")", "return", "self", ".", "_constructor", "(", "merged_data", ")", "else", ":", "if", "lsuffix", "or", "rsuffix", ":", "raise", "ValueError", "(", "'Suffixes not supported when passing '", "'multiple panels'", ")", "if", "how", "==", "'left'", ":", "how", "=", "'outer'", "join_axes", "=", "[", "self", ".", "major_axis", ",", "self", ".", "minor_axis", "]", "elif", "how", "==", "'right'", ":", "raise", "ValueError", "(", "'Right join not supported with multiple '", "'panels'", ")", "else", ":", "join_axes", "=", "None", "return", "concat", "(", "[", "self", "]", "+", "list", "(", "other", ")", ",", "axis", "=", "0", ",", "join", "=", "how", ",", "join_axes", "=", "join_axes", ",", "verify_integrity", "=", "True", ")" ]
Join items with other Panel either on major and minor axes column. Parameters ---------- other : Panel or list of Panels Index should be similar to one of the columns in this one how : {'left', 'right', 'outer', 'inner'} How to handle indexes of the two objects. Default: 'left' for joining on index, None otherwise * left: use calling frame's index * right: use input frame's index * outer: form union of indexes * inner: use intersection of indexes lsuffix : string Suffix to use from left frame's overlapping columns rsuffix : string Suffix to use from right frame's overlapping columns Returns ------- joined : Panel
[ "Join", "items", "with", "other", "Panel", "either", "on", "major", "and", "minor", "axes", "column", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/panel.py#L1335-L1382
19,538
pandas-dev/pandas
pandas/core/panel.py
Panel.update
def update(self, other, join='left', overwrite=True, filter_func=None, errors='ignore'): """ Modify Panel in place using non-NA values from other Panel. May also use object coercible to Panel. Will align on items. Parameters ---------- other : Panel, or object coercible to Panel The object from which the caller will be udpated. join : {'left', 'right', 'outer', 'inner'}, default 'left' How individual DataFrames are joined. overwrite : bool, default True If True then overwrite values for common keys in the calling Panel. filter_func : callable(1d-array) -> 1d-array<bool>, default None Can choose to replace values other than NA. Return True for values that should be updated. errors : {'raise', 'ignore'}, default 'ignore' If 'raise', will raise an error if a DataFrame and other both. .. versionchanged :: 0.24.0 Changed from `raise_conflict=False|True` to `errors='ignore'|'raise'`. See Also -------- DataFrame.update : Similar method for DataFrames. dict.update : Similar method for dictionaries. """ if not isinstance(other, self._constructor): other = self._constructor(other) axis_name = self._info_axis_name axis_values = self._info_axis other = other.reindex(**{axis_name: axis_values}) for frame in axis_values: self[frame].update(other[frame], join=join, overwrite=overwrite, filter_func=filter_func, errors=errors)
python
def update(self, other, join='left', overwrite=True, filter_func=None, errors='ignore'): """ Modify Panel in place using non-NA values from other Panel. May also use object coercible to Panel. Will align on items. Parameters ---------- other : Panel, or object coercible to Panel The object from which the caller will be udpated. join : {'left', 'right', 'outer', 'inner'}, default 'left' How individual DataFrames are joined. overwrite : bool, default True If True then overwrite values for common keys in the calling Panel. filter_func : callable(1d-array) -> 1d-array<bool>, default None Can choose to replace values other than NA. Return True for values that should be updated. errors : {'raise', 'ignore'}, default 'ignore' If 'raise', will raise an error if a DataFrame and other both. .. versionchanged :: 0.24.0 Changed from `raise_conflict=False|True` to `errors='ignore'|'raise'`. See Also -------- DataFrame.update : Similar method for DataFrames. dict.update : Similar method for dictionaries. """ if not isinstance(other, self._constructor): other = self._constructor(other) axis_name = self._info_axis_name axis_values = self._info_axis other = other.reindex(**{axis_name: axis_values}) for frame in axis_values: self[frame].update(other[frame], join=join, overwrite=overwrite, filter_func=filter_func, errors=errors)
[ "def", "update", "(", "self", ",", "other", ",", "join", "=", "'left'", ",", "overwrite", "=", "True", ",", "filter_func", "=", "None", ",", "errors", "=", "'ignore'", ")", ":", "if", "not", "isinstance", "(", "other", ",", "self", ".", "_constructor", ")", ":", "other", "=", "self", ".", "_constructor", "(", "other", ")", "axis_name", "=", "self", ".", "_info_axis_name", "axis_values", "=", "self", ".", "_info_axis", "other", "=", "other", ".", "reindex", "(", "*", "*", "{", "axis_name", ":", "axis_values", "}", ")", "for", "frame", "in", "axis_values", ":", "self", "[", "frame", "]", ".", "update", "(", "other", "[", "frame", "]", ",", "join", "=", "join", ",", "overwrite", "=", "overwrite", ",", "filter_func", "=", "filter_func", ",", "errors", "=", "errors", ")" ]
Modify Panel in place using non-NA values from other Panel. May also use object coercible to Panel. Will align on items. Parameters ---------- other : Panel, or object coercible to Panel The object from which the caller will be udpated. join : {'left', 'right', 'outer', 'inner'}, default 'left' How individual DataFrames are joined. overwrite : bool, default True If True then overwrite values for common keys in the calling Panel. filter_func : callable(1d-array) -> 1d-array<bool>, default None Can choose to replace values other than NA. Return True for values that should be updated. errors : {'raise', 'ignore'}, default 'ignore' If 'raise', will raise an error if a DataFrame and other both. .. versionchanged :: 0.24.0 Changed from `raise_conflict=False|True` to `errors='ignore'|'raise'`. See Also -------- DataFrame.update : Similar method for DataFrames. dict.update : Similar method for dictionaries.
[ "Modify", "Panel", "in", "place", "using", "non", "-", "NA", "values", "from", "other", "Panel", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/panel.py#L1386-L1426
19,539
pandas-dev/pandas
pandas/core/panel.py
Panel._extract_axes
def _extract_axes(self, data, axes, **kwargs): """ Return a list of the axis indices. """ return [self._extract_axis(self, data, axis=i, **kwargs) for i, a in enumerate(axes)]
python
def _extract_axes(self, data, axes, **kwargs): """ Return a list of the axis indices. """ return [self._extract_axis(self, data, axis=i, **kwargs) for i, a in enumerate(axes)]
[ "def", "_extract_axes", "(", "self", ",", "data", ",", "axes", ",", "*", "*", "kwargs", ")", ":", "return", "[", "self", ".", "_extract_axis", "(", "self", ",", "data", ",", "axis", "=", "i", ",", "*", "*", "kwargs", ")", "for", "i", ",", "a", "in", "enumerate", "(", "axes", ")", "]" ]
Return a list of the axis indices.
[ "Return", "a", "list", "of", "the", "axis", "indices", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/panel.py#L1443-L1448
19,540
pandas-dev/pandas
pandas/core/panel.py
Panel._extract_axes_for_slice
def _extract_axes_for_slice(self, axes): """ Return the slice dictionary for these axes. """ return {self._AXIS_SLICEMAP[i]: a for i, a in zip(self._AXIS_ORDERS[self._AXIS_LEN - len(axes):], axes)}
python
def _extract_axes_for_slice(self, axes): """ Return the slice dictionary for these axes. """ return {self._AXIS_SLICEMAP[i]: a for i, a in zip(self._AXIS_ORDERS[self._AXIS_LEN - len(axes):], axes)}
[ "def", "_extract_axes_for_slice", "(", "self", ",", "axes", ")", ":", "return", "{", "self", ".", "_AXIS_SLICEMAP", "[", "i", "]", ":", "a", "for", "i", ",", "a", "in", "zip", "(", "self", ".", "_AXIS_ORDERS", "[", "self", ".", "_AXIS_LEN", "-", "len", "(", "axes", ")", ":", "]", ",", "axes", ")", "}" ]
Return the slice dictionary for these axes.
[ "Return", "the", "slice", "dictionary", "for", "these", "axes", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/panel.py#L1451-L1456
19,541
pandas-dev/pandas
pandas/core/sorting.py
decons_obs_group_ids
def decons_obs_group_ids(comp_ids, obs_ids, shape, labels, xnull): """ reconstruct labels from observed group ids Parameters ---------- xnull: boolean, if nulls are excluded; i.e. -1 labels are passed through """ if not xnull: lift = np.fromiter(((a == -1).any() for a in labels), dtype='i8') shape = np.asarray(shape, dtype='i8') + lift if not is_int64_overflow_possible(shape): # obs ids are deconstructable! take the fast route! out = decons_group_index(obs_ids, shape) return out if xnull or not lift.any() \ else [x - y for x, y in zip(out, lift)] i = unique_label_indices(comp_ids) i8copy = lambda a: a.astype('i8', subok=False, copy=True) return [i8copy(lab[i]) for lab in labels]
python
def decons_obs_group_ids(comp_ids, obs_ids, shape, labels, xnull): """ reconstruct labels from observed group ids Parameters ---------- xnull: boolean, if nulls are excluded; i.e. -1 labels are passed through """ if not xnull: lift = np.fromiter(((a == -1).any() for a in labels), dtype='i8') shape = np.asarray(shape, dtype='i8') + lift if not is_int64_overflow_possible(shape): # obs ids are deconstructable! take the fast route! out = decons_group_index(obs_ids, shape) return out if xnull or not lift.any() \ else [x - y for x, y in zip(out, lift)] i = unique_label_indices(comp_ids) i8copy = lambda a: a.astype('i8', subok=False, copy=True) return [i8copy(lab[i]) for lab in labels]
[ "def", "decons_obs_group_ids", "(", "comp_ids", ",", "obs_ids", ",", "shape", ",", "labels", ",", "xnull", ")", ":", "if", "not", "xnull", ":", "lift", "=", "np", ".", "fromiter", "(", "(", "(", "a", "==", "-", "1", ")", ".", "any", "(", ")", "for", "a", "in", "labels", ")", ",", "dtype", "=", "'i8'", ")", "shape", "=", "np", ".", "asarray", "(", "shape", ",", "dtype", "=", "'i8'", ")", "+", "lift", "if", "not", "is_int64_overflow_possible", "(", "shape", ")", ":", "# obs ids are deconstructable! take the fast route!", "out", "=", "decons_group_index", "(", "obs_ids", ",", "shape", ")", "return", "out", "if", "xnull", "or", "not", "lift", ".", "any", "(", ")", "else", "[", "x", "-", "y", "for", "x", ",", "y", "in", "zip", "(", "out", ",", "lift", ")", "]", "i", "=", "unique_label_indices", "(", "comp_ids", ")", "i8copy", "=", "lambda", "a", ":", "a", ".", "astype", "(", "'i8'", ",", "subok", "=", "False", ",", "copy", "=", "True", ")", "return", "[", "i8copy", "(", "lab", "[", "i", "]", ")", "for", "lab", "in", "labels", "]" ]
reconstruct labels from observed group ids Parameters ---------- xnull: boolean, if nulls are excluded; i.e. -1 labels are passed through
[ "reconstruct", "labels", "from", "observed", "group", "ids" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/sorting.py#L151-L173
19,542
pandas-dev/pandas
pandas/core/computation/engines.py
_check_ne_builtin_clash
def _check_ne_builtin_clash(expr): """Attempt to prevent foot-shooting in a helpful way. Parameters ---------- terms : Term Terms can contain """ names = expr.names overlap = names & _ne_builtins if overlap: s = ', '.join(map(repr, overlap)) raise NumExprClobberingError('Variables in expression "{expr}" ' 'overlap with builtins: ({s})' .format(expr=expr, s=s))
python
def _check_ne_builtin_clash(expr): """Attempt to prevent foot-shooting in a helpful way. Parameters ---------- terms : Term Terms can contain """ names = expr.names overlap = names & _ne_builtins if overlap: s = ', '.join(map(repr, overlap)) raise NumExprClobberingError('Variables in expression "{expr}" ' 'overlap with builtins: ({s})' .format(expr=expr, s=s))
[ "def", "_check_ne_builtin_clash", "(", "expr", ")", ":", "names", "=", "expr", ".", "names", "overlap", "=", "names", "&", "_ne_builtins", "if", "overlap", ":", "s", "=", "', '", ".", "join", "(", "map", "(", "repr", ",", "overlap", ")", ")", "raise", "NumExprClobberingError", "(", "'Variables in expression \"{expr}\" '", "'overlap with builtins: ({s})'", ".", "format", "(", "expr", "=", "expr", ",", "s", "=", "s", ")", ")" ]
Attempt to prevent foot-shooting in a helpful way. Parameters ---------- terms : Term Terms can contain
[ "Attempt", "to", "prevent", "foot", "-", "shooting", "in", "a", "helpful", "way", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/computation/engines.py#L20-L35
19,543
pandas-dev/pandas
pandas/core/computation/engines.py
AbstractEngine.evaluate
def evaluate(self): """Run the engine on the expression This method performs alignment which is necessary no matter what engine is being used, thus its implementation is in the base class. Returns ------- obj : object The result of the passed expression. """ if not self._is_aligned: self.result_type, self.aligned_axes = _align(self.expr.terms) # make sure no names in resolvers and locals/globals clash res = self._evaluate() return _reconstruct_object(self.result_type, res, self.aligned_axes, self.expr.terms.return_type)
python
def evaluate(self): """Run the engine on the expression This method performs alignment which is necessary no matter what engine is being used, thus its implementation is in the base class. Returns ------- obj : object The result of the passed expression. """ if not self._is_aligned: self.result_type, self.aligned_axes = _align(self.expr.terms) # make sure no names in resolvers and locals/globals clash res = self._evaluate() return _reconstruct_object(self.result_type, res, self.aligned_axes, self.expr.terms.return_type)
[ "def", "evaluate", "(", "self", ")", ":", "if", "not", "self", ".", "_is_aligned", ":", "self", ".", "result_type", ",", "self", ".", "aligned_axes", "=", "_align", "(", "self", ".", "expr", ".", "terms", ")", "# make sure no names in resolvers and locals/globals clash", "res", "=", "self", ".", "_evaluate", "(", ")", "return", "_reconstruct_object", "(", "self", ".", "result_type", ",", "res", ",", "self", ".", "aligned_axes", ",", "self", ".", "expr", ".", "terms", ".", "return_type", ")" ]
Run the engine on the expression This method performs alignment which is necessary no matter what engine is being used, thus its implementation is in the base class. Returns ------- obj : object The result of the passed expression.
[ "Run", "the", "engine", "on", "the", "expression" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/computation/engines.py#L55-L72
19,544
pandas-dev/pandas
pandas/core/internals/blocks.py
get_block_type
def get_block_type(values, dtype=None): """ Find the appropriate Block subclass to use for the given values and dtype. Parameters ---------- values : ndarray-like dtype : numpy or pandas dtype Returns ------- cls : class, subclass of Block """ dtype = dtype or values.dtype vtype = dtype.type if is_sparse(dtype): # Need this first(ish) so that Sparse[datetime] is sparse cls = ExtensionBlock elif is_categorical(values): cls = CategoricalBlock elif issubclass(vtype, np.datetime64): assert not is_datetime64tz_dtype(values) cls = DatetimeBlock elif is_datetime64tz_dtype(values): cls = DatetimeTZBlock elif is_interval_dtype(dtype) or is_period_dtype(dtype): cls = ObjectValuesExtensionBlock elif is_extension_array_dtype(values): cls = ExtensionBlock elif issubclass(vtype, np.floating): cls = FloatBlock elif issubclass(vtype, np.timedelta64): assert issubclass(vtype, np.integer) cls = TimeDeltaBlock elif issubclass(vtype, np.complexfloating): cls = ComplexBlock elif issubclass(vtype, np.integer): cls = IntBlock elif dtype == np.bool_: cls = BoolBlock else: cls = ObjectBlock return cls
python
def get_block_type(values, dtype=None): """ Find the appropriate Block subclass to use for the given values and dtype. Parameters ---------- values : ndarray-like dtype : numpy or pandas dtype Returns ------- cls : class, subclass of Block """ dtype = dtype or values.dtype vtype = dtype.type if is_sparse(dtype): # Need this first(ish) so that Sparse[datetime] is sparse cls = ExtensionBlock elif is_categorical(values): cls = CategoricalBlock elif issubclass(vtype, np.datetime64): assert not is_datetime64tz_dtype(values) cls = DatetimeBlock elif is_datetime64tz_dtype(values): cls = DatetimeTZBlock elif is_interval_dtype(dtype) or is_period_dtype(dtype): cls = ObjectValuesExtensionBlock elif is_extension_array_dtype(values): cls = ExtensionBlock elif issubclass(vtype, np.floating): cls = FloatBlock elif issubclass(vtype, np.timedelta64): assert issubclass(vtype, np.integer) cls = TimeDeltaBlock elif issubclass(vtype, np.complexfloating): cls = ComplexBlock elif issubclass(vtype, np.integer): cls = IntBlock elif dtype == np.bool_: cls = BoolBlock else: cls = ObjectBlock return cls
[ "def", "get_block_type", "(", "values", ",", "dtype", "=", "None", ")", ":", "dtype", "=", "dtype", "or", "values", ".", "dtype", "vtype", "=", "dtype", ".", "type", "if", "is_sparse", "(", "dtype", ")", ":", "# Need this first(ish) so that Sparse[datetime] is sparse", "cls", "=", "ExtensionBlock", "elif", "is_categorical", "(", "values", ")", ":", "cls", "=", "CategoricalBlock", "elif", "issubclass", "(", "vtype", ",", "np", ".", "datetime64", ")", ":", "assert", "not", "is_datetime64tz_dtype", "(", "values", ")", "cls", "=", "DatetimeBlock", "elif", "is_datetime64tz_dtype", "(", "values", ")", ":", "cls", "=", "DatetimeTZBlock", "elif", "is_interval_dtype", "(", "dtype", ")", "or", "is_period_dtype", "(", "dtype", ")", ":", "cls", "=", "ObjectValuesExtensionBlock", "elif", "is_extension_array_dtype", "(", "values", ")", ":", "cls", "=", "ExtensionBlock", "elif", "issubclass", "(", "vtype", ",", "np", ".", "floating", ")", ":", "cls", "=", "FloatBlock", "elif", "issubclass", "(", "vtype", ",", "np", ".", "timedelta64", ")", ":", "assert", "issubclass", "(", "vtype", ",", "np", ".", "integer", ")", "cls", "=", "TimeDeltaBlock", "elif", "issubclass", "(", "vtype", ",", "np", ".", "complexfloating", ")", ":", "cls", "=", "ComplexBlock", "elif", "issubclass", "(", "vtype", ",", "np", ".", "integer", ")", ":", "cls", "=", "IntBlock", "elif", "dtype", "==", "np", ".", "bool_", ":", "cls", "=", "BoolBlock", "else", ":", "cls", "=", "ObjectBlock", "return", "cls" ]
Find the appropriate Block subclass to use for the given values and dtype. Parameters ---------- values : ndarray-like dtype : numpy or pandas dtype Returns ------- cls : class, subclass of Block
[ "Find", "the", "appropriate", "Block", "subclass", "to", "use", "for", "the", "given", "values", "and", "dtype", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/blocks.py#L2987-L3030
19,545
pandas-dev/pandas
pandas/core/internals/blocks.py
_extend_blocks
def _extend_blocks(result, blocks=None): """ return a new extended blocks, givin the result """ from pandas.core.internals import BlockManager if blocks is None: blocks = [] if isinstance(result, list): for r in result: if isinstance(r, list): blocks.extend(r) else: blocks.append(r) elif isinstance(result, BlockManager): blocks.extend(result.blocks) else: blocks.append(result) return blocks
python
def _extend_blocks(result, blocks=None): """ return a new extended blocks, givin the result """ from pandas.core.internals import BlockManager if blocks is None: blocks = [] if isinstance(result, list): for r in result: if isinstance(r, list): blocks.extend(r) else: blocks.append(r) elif isinstance(result, BlockManager): blocks.extend(result.blocks) else: blocks.append(result) return blocks
[ "def", "_extend_blocks", "(", "result", ",", "blocks", "=", "None", ")", ":", "from", "pandas", ".", "core", ".", "internals", "import", "BlockManager", "if", "blocks", "is", "None", ":", "blocks", "=", "[", "]", "if", "isinstance", "(", "result", ",", "list", ")", ":", "for", "r", "in", "result", ":", "if", "isinstance", "(", "r", ",", "list", ")", ":", "blocks", ".", "extend", "(", "r", ")", "else", ":", "blocks", ".", "append", "(", "r", ")", "elif", "isinstance", "(", "result", ",", "BlockManager", ")", ":", "blocks", ".", "extend", "(", "result", ".", "blocks", ")", "else", ":", "blocks", ".", "append", "(", "result", ")", "return", "blocks" ]
return a new extended blocks, givin the result
[ "return", "a", "new", "extended", "blocks", "givin", "the", "result" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/blocks.py#L3060-L3075
19,546
pandas-dev/pandas
pandas/core/internals/blocks.py
_block_shape
def _block_shape(values, ndim=1, shape=None): """ guarantee the shape of the values to be at least 1 d """ if values.ndim < ndim: if shape is None: shape = values.shape if not is_extension_array_dtype(values): # TODO: https://github.com/pandas-dev/pandas/issues/23023 # block.shape is incorrect for "2D" ExtensionArrays # We can't, and don't need to, reshape. values = values.reshape(tuple((1, ) + shape)) return values
python
def _block_shape(values, ndim=1, shape=None): """ guarantee the shape of the values to be at least 1 d """ if values.ndim < ndim: if shape is None: shape = values.shape if not is_extension_array_dtype(values): # TODO: https://github.com/pandas-dev/pandas/issues/23023 # block.shape is incorrect for "2D" ExtensionArrays # We can't, and don't need to, reshape. values = values.reshape(tuple((1, ) + shape)) return values
[ "def", "_block_shape", "(", "values", ",", "ndim", "=", "1", ",", "shape", "=", "None", ")", ":", "if", "values", ".", "ndim", "<", "ndim", ":", "if", "shape", "is", "None", ":", "shape", "=", "values", ".", "shape", "if", "not", "is_extension_array_dtype", "(", "values", ")", ":", "# TODO: https://github.com/pandas-dev/pandas/issues/23023", "# block.shape is incorrect for \"2D\" ExtensionArrays", "# We can't, and don't need to, reshape.", "values", "=", "values", ".", "reshape", "(", "tuple", "(", "(", "1", ",", ")", "+", "shape", ")", ")", "return", "values" ]
guarantee the shape of the values to be at least 1 d
[ "guarantee", "the", "shape", "of", "the", "values", "to", "be", "at", "least", "1", "d" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/blocks.py#L3078-L3088
19,547
pandas-dev/pandas
pandas/core/internals/blocks.py
_putmask_smart
def _putmask_smart(v, m, n): """ Return a new ndarray, try to preserve dtype if possible. Parameters ---------- v : `values`, updated in-place (array like) m : `mask`, applies to both sides (array like) n : `new values` either scalar or an array like aligned with `values` Returns ------- values : ndarray with updated values this *may* be a copy of the original See Also -------- ndarray.putmask """ # we cannot use np.asarray() here as we cannot have conversions # that numpy does when numeric are mixed with strings # n should be the length of the mask or a scalar here if not is_list_like(n): n = np.repeat(n, len(m)) elif isinstance(n, np.ndarray) and n.ndim == 0: # numpy scalar n = np.repeat(np.array(n, ndmin=1), len(m)) # see if we are only masking values that if putted # will work in the current dtype try: nn = n[m] # make sure that we have a nullable type # if we have nulls if not _isna_compat(v, nn[0]): raise ValueError # we ignore ComplexWarning here with warnings.catch_warnings(record=True): warnings.simplefilter("ignore", np.ComplexWarning) nn_at = nn.astype(v.dtype) # avoid invalid dtype comparisons # between numbers & strings # only compare integers/floats # don't compare integers to datetimelikes if (not is_numeric_v_string_like(nn, nn_at) and (is_float_dtype(nn.dtype) or is_integer_dtype(nn.dtype) and is_float_dtype(nn_at.dtype) or is_integer_dtype(nn_at.dtype))): comp = (nn == nn_at) if is_list_like(comp) and comp.all(): nv = v.copy() nv[m] = nn_at return nv except (ValueError, IndexError, TypeError, OverflowError): pass n = np.asarray(n) def _putmask_preserve(nv, n): try: nv[m] = n[m] except (IndexError, ValueError): nv[m] = n return nv # preserves dtype if possible if v.dtype.kind == n.dtype.kind: return _putmask_preserve(v, n) # change the dtype if needed dtype, _ = maybe_promote(n.dtype) if is_extension_type(v.dtype) and is_object_dtype(dtype): v = v.get_values(dtype) else: v = v.astype(dtype) return _putmask_preserve(v, n)
python
def _putmask_smart(v, m, n): """ Return a new ndarray, try to preserve dtype if possible. Parameters ---------- v : `values`, updated in-place (array like) m : `mask`, applies to both sides (array like) n : `new values` either scalar or an array like aligned with `values` Returns ------- values : ndarray with updated values this *may* be a copy of the original See Also -------- ndarray.putmask """ # we cannot use np.asarray() here as we cannot have conversions # that numpy does when numeric are mixed with strings # n should be the length of the mask or a scalar here if not is_list_like(n): n = np.repeat(n, len(m)) elif isinstance(n, np.ndarray) and n.ndim == 0: # numpy scalar n = np.repeat(np.array(n, ndmin=1), len(m)) # see if we are only masking values that if putted # will work in the current dtype try: nn = n[m] # make sure that we have a nullable type # if we have nulls if not _isna_compat(v, nn[0]): raise ValueError # we ignore ComplexWarning here with warnings.catch_warnings(record=True): warnings.simplefilter("ignore", np.ComplexWarning) nn_at = nn.astype(v.dtype) # avoid invalid dtype comparisons # between numbers & strings # only compare integers/floats # don't compare integers to datetimelikes if (not is_numeric_v_string_like(nn, nn_at) and (is_float_dtype(nn.dtype) or is_integer_dtype(nn.dtype) and is_float_dtype(nn_at.dtype) or is_integer_dtype(nn_at.dtype))): comp = (nn == nn_at) if is_list_like(comp) and comp.all(): nv = v.copy() nv[m] = nn_at return nv except (ValueError, IndexError, TypeError, OverflowError): pass n = np.asarray(n) def _putmask_preserve(nv, n): try: nv[m] = n[m] except (IndexError, ValueError): nv[m] = n return nv # preserves dtype if possible if v.dtype.kind == n.dtype.kind: return _putmask_preserve(v, n) # change the dtype if needed dtype, _ = maybe_promote(n.dtype) if is_extension_type(v.dtype) and is_object_dtype(dtype): v = v.get_values(dtype) else: v = v.astype(dtype) return _putmask_preserve(v, n)
[ "def", "_putmask_smart", "(", "v", ",", "m", ",", "n", ")", ":", "# we cannot use np.asarray() here as we cannot have conversions", "# that numpy does when numeric are mixed with strings", "# n should be the length of the mask or a scalar here", "if", "not", "is_list_like", "(", "n", ")", ":", "n", "=", "np", ".", "repeat", "(", "n", ",", "len", "(", "m", ")", ")", "elif", "isinstance", "(", "n", ",", "np", ".", "ndarray", ")", "and", "n", ".", "ndim", "==", "0", ":", "# numpy scalar", "n", "=", "np", ".", "repeat", "(", "np", ".", "array", "(", "n", ",", "ndmin", "=", "1", ")", ",", "len", "(", "m", ")", ")", "# see if we are only masking values that if putted", "# will work in the current dtype", "try", ":", "nn", "=", "n", "[", "m", "]", "# make sure that we have a nullable type", "# if we have nulls", "if", "not", "_isna_compat", "(", "v", ",", "nn", "[", "0", "]", ")", ":", "raise", "ValueError", "# we ignore ComplexWarning here", "with", "warnings", ".", "catch_warnings", "(", "record", "=", "True", ")", ":", "warnings", ".", "simplefilter", "(", "\"ignore\"", ",", "np", ".", "ComplexWarning", ")", "nn_at", "=", "nn", ".", "astype", "(", "v", ".", "dtype", ")", "# avoid invalid dtype comparisons", "# between numbers & strings", "# only compare integers/floats", "# don't compare integers to datetimelikes", "if", "(", "not", "is_numeric_v_string_like", "(", "nn", ",", "nn_at", ")", "and", "(", "is_float_dtype", "(", "nn", ".", "dtype", ")", "or", "is_integer_dtype", "(", "nn", ".", "dtype", ")", "and", "is_float_dtype", "(", "nn_at", ".", "dtype", ")", "or", "is_integer_dtype", "(", "nn_at", ".", "dtype", ")", ")", ")", ":", "comp", "=", "(", "nn", "==", "nn_at", ")", "if", "is_list_like", "(", "comp", ")", "and", "comp", ".", "all", "(", ")", ":", "nv", "=", "v", ".", "copy", "(", ")", "nv", "[", "m", "]", "=", "nn_at", "return", "nv", "except", "(", "ValueError", ",", "IndexError", ",", "TypeError", ",", "OverflowError", ")", ":", "pass", "n", "=", "np", ".", "asarray", "(", "n", ")", "def", "_putmask_preserve", "(", "nv", ",", "n", ")", ":", "try", ":", "nv", "[", "m", "]", "=", "n", "[", "m", "]", "except", "(", "IndexError", ",", "ValueError", ")", ":", "nv", "[", "m", "]", "=", "n", "return", "nv", "# preserves dtype if possible", "if", "v", ".", "dtype", ".", "kind", "==", "n", ".", "dtype", ".", "kind", ":", "return", "_putmask_preserve", "(", "v", ",", "n", ")", "# change the dtype if needed", "dtype", ",", "_", "=", "maybe_promote", "(", "n", ".", "dtype", ")", "if", "is_extension_type", "(", "v", ".", "dtype", ")", "and", "is_object_dtype", "(", "dtype", ")", ":", "v", "=", "v", ".", "get_values", "(", "dtype", ")", "else", ":", "v", "=", "v", ".", "astype", "(", "dtype", ")", "return", "_putmask_preserve", "(", "v", ",", "n", ")" ]
Return a new ndarray, try to preserve dtype if possible. Parameters ---------- v : `values`, updated in-place (array like) m : `mask`, applies to both sides (array like) n : `new values` either scalar or an array like aligned with `values` Returns ------- values : ndarray with updated values this *may* be a copy of the original See Also -------- ndarray.putmask
[ "Return", "a", "new", "ndarray", "try", "to", "preserve", "dtype", "if", "possible", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/blocks.py#L3140-L3224
19,548
pandas-dev/pandas
pandas/core/internals/blocks.py
Block._check_ndim
def _check_ndim(self, values, ndim): """ ndim inference and validation. Infers ndim from 'values' if not provided to __init__. Validates that values.ndim and ndim are consistent if and only if the class variable '_validate_ndim' is True. Parameters ---------- values : array-like ndim : int or None Returns ------- ndim : int Raises ------ ValueError : the number of dimensions do not match """ if ndim is None: ndim = values.ndim if self._validate_ndim and values.ndim != ndim: msg = ("Wrong number of dimensions. values.ndim != ndim " "[{} != {}]") raise ValueError(msg.format(values.ndim, ndim)) return ndim
python
def _check_ndim(self, values, ndim): """ ndim inference and validation. Infers ndim from 'values' if not provided to __init__. Validates that values.ndim and ndim are consistent if and only if the class variable '_validate_ndim' is True. Parameters ---------- values : array-like ndim : int or None Returns ------- ndim : int Raises ------ ValueError : the number of dimensions do not match """ if ndim is None: ndim = values.ndim if self._validate_ndim and values.ndim != ndim: msg = ("Wrong number of dimensions. values.ndim != ndim " "[{} != {}]") raise ValueError(msg.format(values.ndim, ndim)) return ndim
[ "def", "_check_ndim", "(", "self", ",", "values", ",", "ndim", ")", ":", "if", "ndim", "is", "None", ":", "ndim", "=", "values", ".", "ndim", "if", "self", ".", "_validate_ndim", "and", "values", ".", "ndim", "!=", "ndim", ":", "msg", "=", "(", "\"Wrong number of dimensions. values.ndim != ndim \"", "\"[{} != {}]\"", ")", "raise", "ValueError", "(", "msg", ".", "format", "(", "values", ".", "ndim", ",", "ndim", ")", ")", "return", "ndim" ]
ndim inference and validation. Infers ndim from 'values' if not provided to __init__. Validates that values.ndim and ndim are consistent if and only if the class variable '_validate_ndim' is True. Parameters ---------- values : array-like ndim : int or None Returns ------- ndim : int Raises ------ ValueError : the number of dimensions do not match
[ "ndim", "inference", "and", "validation", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/blocks.py#L87-L116
19,549
pandas-dev/pandas
pandas/core/internals/blocks.py
Block.is_categorical_astype
def is_categorical_astype(self, dtype): """ validate that we have a astypeable to categorical, returns a boolean if we are a categorical """ if dtype is Categorical or dtype is CategoricalDtype: # this is a pd.Categorical, but is not # a valid type for astypeing raise TypeError("invalid type {0} for astype".format(dtype)) elif is_categorical_dtype(dtype): return True return False
python
def is_categorical_astype(self, dtype): """ validate that we have a astypeable to categorical, returns a boolean if we are a categorical """ if dtype is Categorical or dtype is CategoricalDtype: # this is a pd.Categorical, but is not # a valid type for astypeing raise TypeError("invalid type {0} for astype".format(dtype)) elif is_categorical_dtype(dtype): return True return False
[ "def", "is_categorical_astype", "(", "self", ",", "dtype", ")", ":", "if", "dtype", "is", "Categorical", "or", "dtype", "is", "CategoricalDtype", ":", "# this is a pd.Categorical, but is not", "# a valid type for astypeing", "raise", "TypeError", "(", "\"invalid type {0} for astype\"", ".", "format", "(", "dtype", ")", ")", "elif", "is_categorical_dtype", "(", "dtype", ")", ":", "return", "True", "return", "False" ]
validate that we have a astypeable to categorical, returns a boolean if we are a categorical
[ "validate", "that", "we", "have", "a", "astypeable", "to", "categorical", "returns", "a", "boolean", "if", "we", "are", "a", "categorical" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/blocks.py#L145-L158
19,550
pandas-dev/pandas
pandas/core/internals/blocks.py
Block.get_values
def get_values(self, dtype=None): """ return an internal format, currently just the ndarray this is often overridden to handle to_dense like operations """ if is_object_dtype(dtype): return self.values.astype(object) return self.values
python
def get_values(self, dtype=None): """ return an internal format, currently just the ndarray this is often overridden to handle to_dense like operations """ if is_object_dtype(dtype): return self.values.astype(object) return self.values
[ "def", "get_values", "(", "self", ",", "dtype", "=", "None", ")", ":", "if", "is_object_dtype", "(", "dtype", ")", ":", "return", "self", ".", "values", ".", "astype", "(", "object", ")", "return", "self", ".", "values" ]
return an internal format, currently just the ndarray this is often overridden to handle to_dense like operations
[ "return", "an", "internal", "format", "currently", "just", "the", "ndarray", "this", "is", "often", "overridden", "to", "handle", "to_dense", "like", "operations" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/blocks.py#L174-L181
19,551
pandas-dev/pandas
pandas/core/internals/blocks.py
Block.make_block
def make_block(self, values, placement=None, ndim=None): """ Create a new block, with type inference propagate any values that are not specified """ if placement is None: placement = self.mgr_locs if ndim is None: ndim = self.ndim return make_block(values, placement=placement, ndim=ndim)
python
def make_block(self, values, placement=None, ndim=None): """ Create a new block, with type inference propagate any values that are not specified """ if placement is None: placement = self.mgr_locs if ndim is None: ndim = self.ndim return make_block(values, placement=placement, ndim=ndim)
[ "def", "make_block", "(", "self", ",", "values", ",", "placement", "=", "None", ",", "ndim", "=", "None", ")", ":", "if", "placement", "is", "None", ":", "placement", "=", "self", ".", "mgr_locs", "if", "ndim", "is", "None", ":", "ndim", "=", "self", ".", "ndim", "return", "make_block", "(", "values", ",", "placement", "=", "placement", ",", "ndim", "=", "ndim", ")" ]
Create a new block, with type inference propagate any values that are not specified
[ "Create", "a", "new", "block", "with", "type", "inference", "propagate", "any", "values", "that", "are", "not", "specified" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/blocks.py#L212-L222
19,552
pandas-dev/pandas
pandas/core/internals/blocks.py
Block.make_block_same_class
def make_block_same_class(self, values, placement=None, ndim=None, dtype=None): """ Wrap given values in a block of same type as self. """ if dtype is not None: # issue 19431 fastparquet is passing this warnings.warn("dtype argument is deprecated, will be removed " "in a future release.", DeprecationWarning) if placement is None: placement = self.mgr_locs return make_block(values, placement=placement, ndim=ndim, klass=self.__class__, dtype=dtype)
python
def make_block_same_class(self, values, placement=None, ndim=None, dtype=None): """ Wrap given values in a block of same type as self. """ if dtype is not None: # issue 19431 fastparquet is passing this warnings.warn("dtype argument is deprecated, will be removed " "in a future release.", DeprecationWarning) if placement is None: placement = self.mgr_locs return make_block(values, placement=placement, ndim=ndim, klass=self.__class__, dtype=dtype)
[ "def", "make_block_same_class", "(", "self", ",", "values", ",", "placement", "=", "None", ",", "ndim", "=", "None", ",", "dtype", "=", "None", ")", ":", "if", "dtype", "is", "not", "None", ":", "# issue 19431 fastparquet is passing this", "warnings", ".", "warn", "(", "\"dtype argument is deprecated, will be removed \"", "\"in a future release.\"", ",", "DeprecationWarning", ")", "if", "placement", "is", "None", ":", "placement", "=", "self", ".", "mgr_locs", "return", "make_block", "(", "values", ",", "placement", "=", "placement", ",", "ndim", "=", "ndim", ",", "klass", "=", "self", ".", "__class__", ",", "dtype", "=", "dtype", ")" ]
Wrap given values in a block of same type as self.
[ "Wrap", "given", "values", "in", "a", "block", "of", "same", "type", "as", "self", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/blocks.py#L224-L234
19,553
pandas-dev/pandas
pandas/core/internals/blocks.py
Block.apply
def apply(self, func, **kwargs): """ apply the function to my values; return a block if we are not one """ with np.errstate(all='ignore'): result = func(self.values, **kwargs) if not isinstance(result, Block): result = self.make_block(values=_block_shape(result, ndim=self.ndim)) return result
python
def apply(self, func, **kwargs): """ apply the function to my values; return a block if we are not one """ with np.errstate(all='ignore'): result = func(self.values, **kwargs) if not isinstance(result, Block): result = self.make_block(values=_block_shape(result, ndim=self.ndim)) return result
[ "def", "apply", "(", "self", ",", "func", ",", "*", "*", "kwargs", ")", ":", "with", "np", ".", "errstate", "(", "all", "=", "'ignore'", ")", ":", "result", "=", "func", "(", "self", ".", "values", ",", "*", "*", "kwargs", ")", "if", "not", "isinstance", "(", "result", ",", "Block", ")", ":", "result", "=", "self", ".", "make_block", "(", "values", "=", "_block_shape", "(", "result", ",", "ndim", "=", "self", ".", "ndim", ")", ")", "return", "result" ]
apply the function to my values; return a block if we are not one
[ "apply", "the", "function", "to", "my", "values", ";", "return", "a", "block", "if", "we", "are", "not", "one" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/blocks.py#L337-L347
19,554
pandas-dev/pandas
pandas/core/internals/blocks.py
Block.fillna
def fillna(self, value, limit=None, inplace=False, downcast=None): """ fillna on the block with the value. If we fail, then convert to ObjectBlock and try again """ inplace = validate_bool_kwarg(inplace, 'inplace') if not self._can_hold_na: if inplace: return self else: return self.copy() mask = isna(self.values) if limit is not None: if not is_integer(limit): raise ValueError('Limit must be an integer') if limit < 1: raise ValueError('Limit must be greater than 0') if self.ndim > 2: raise NotImplementedError("number of dimensions for 'fillna' " "is currently limited to 2") mask[mask.cumsum(self.ndim - 1) > limit] = False # fillna, but if we cannot coerce, then try again as an ObjectBlock try: values, _ = self._try_coerce_args(self.values, value) blocks = self.putmask(mask, value, inplace=inplace) blocks = [b.make_block(values=self._try_coerce_result(b.values)) for b in blocks] return self._maybe_downcast(blocks, downcast) except (TypeError, ValueError): # we can't process the value, but nothing to do if not mask.any(): return self if inplace else self.copy() # operate column-by-column def f(m, v, i): block = self.coerce_to_target_dtype(value) # slice out our block if i is not None: block = block.getitem_block(slice(i, i + 1)) return block.fillna(value, limit=limit, inplace=inplace, downcast=None) return self.split_and_operate(mask, f, inplace)
python
def fillna(self, value, limit=None, inplace=False, downcast=None): """ fillna on the block with the value. If we fail, then convert to ObjectBlock and try again """ inplace = validate_bool_kwarg(inplace, 'inplace') if not self._can_hold_na: if inplace: return self else: return self.copy() mask = isna(self.values) if limit is not None: if not is_integer(limit): raise ValueError('Limit must be an integer') if limit < 1: raise ValueError('Limit must be greater than 0') if self.ndim > 2: raise NotImplementedError("number of dimensions for 'fillna' " "is currently limited to 2") mask[mask.cumsum(self.ndim - 1) > limit] = False # fillna, but if we cannot coerce, then try again as an ObjectBlock try: values, _ = self._try_coerce_args(self.values, value) blocks = self.putmask(mask, value, inplace=inplace) blocks = [b.make_block(values=self._try_coerce_result(b.values)) for b in blocks] return self._maybe_downcast(blocks, downcast) except (TypeError, ValueError): # we can't process the value, but nothing to do if not mask.any(): return self if inplace else self.copy() # operate column-by-column def f(m, v, i): block = self.coerce_to_target_dtype(value) # slice out our block if i is not None: block = block.getitem_block(slice(i, i + 1)) return block.fillna(value, limit=limit, inplace=inplace, downcast=None) return self.split_and_operate(mask, f, inplace)
[ "def", "fillna", "(", "self", ",", "value", ",", "limit", "=", "None", ",", "inplace", "=", "False", ",", "downcast", "=", "None", ")", ":", "inplace", "=", "validate_bool_kwarg", "(", "inplace", ",", "'inplace'", ")", "if", "not", "self", ".", "_can_hold_na", ":", "if", "inplace", ":", "return", "self", "else", ":", "return", "self", ".", "copy", "(", ")", "mask", "=", "isna", "(", "self", ".", "values", ")", "if", "limit", "is", "not", "None", ":", "if", "not", "is_integer", "(", "limit", ")", ":", "raise", "ValueError", "(", "'Limit must be an integer'", ")", "if", "limit", "<", "1", ":", "raise", "ValueError", "(", "'Limit must be greater than 0'", ")", "if", "self", ".", "ndim", ">", "2", ":", "raise", "NotImplementedError", "(", "\"number of dimensions for 'fillna' \"", "\"is currently limited to 2\"", ")", "mask", "[", "mask", ".", "cumsum", "(", "self", ".", "ndim", "-", "1", ")", ">", "limit", "]", "=", "False", "# fillna, but if we cannot coerce, then try again as an ObjectBlock", "try", ":", "values", ",", "_", "=", "self", ".", "_try_coerce_args", "(", "self", ".", "values", ",", "value", ")", "blocks", "=", "self", ".", "putmask", "(", "mask", ",", "value", ",", "inplace", "=", "inplace", ")", "blocks", "=", "[", "b", ".", "make_block", "(", "values", "=", "self", ".", "_try_coerce_result", "(", "b", ".", "values", ")", ")", "for", "b", "in", "blocks", "]", "return", "self", ".", "_maybe_downcast", "(", "blocks", ",", "downcast", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "# we can't process the value, but nothing to do", "if", "not", "mask", ".", "any", "(", ")", ":", "return", "self", "if", "inplace", "else", "self", ".", "copy", "(", ")", "# operate column-by-column", "def", "f", "(", "m", ",", "v", ",", "i", ")", ":", "block", "=", "self", ".", "coerce_to_target_dtype", "(", "value", ")", "# slice out our block", "if", "i", "is", "not", "None", ":", "block", "=", "block", ".", "getitem_block", "(", "slice", "(", "i", ",", "i", "+", "1", ")", ")", "return", "block", ".", "fillna", "(", "value", ",", "limit", "=", "limit", ",", "inplace", "=", "inplace", ",", "downcast", "=", "None", ")", "return", "self", ".", "split_and_operate", "(", "mask", ",", "f", ",", "inplace", ")" ]
fillna on the block with the value. If we fail, then convert to ObjectBlock and try again
[ "fillna", "on", "the", "block", "with", "the", "value", ".", "If", "we", "fail", "then", "convert", "to", "ObjectBlock", "and", "try", "again" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/blocks.py#L349-L397
19,555
pandas-dev/pandas
pandas/core/internals/blocks.py
Block.split_and_operate
def split_and_operate(self, mask, f, inplace): """ split the block per-column, and apply the callable f per-column, return a new block for each. Handle masking which will not change a block unless needed. Parameters ---------- mask : 2-d boolean mask f : callable accepting (1d-mask, 1d values, indexer) inplace : boolean Returns ------- list of blocks """ if mask is None: mask = np.ones(self.shape, dtype=bool) new_values = self.values def make_a_block(nv, ref_loc): if isinstance(nv, Block): block = nv elif isinstance(nv, list): block = nv[0] else: # Put back the dimension that was taken from it and make # a block out of the result. try: nv = _block_shape(nv, ndim=self.ndim) except (AttributeError, NotImplementedError): pass block = self.make_block(values=nv, placement=ref_loc) return block # ndim == 1 if self.ndim == 1: if mask.any(): nv = f(mask, new_values, None) else: nv = new_values if inplace else new_values.copy() block = make_a_block(nv, self.mgr_locs) return [block] # ndim > 1 new_blocks = [] for i, ref_loc in enumerate(self.mgr_locs): m = mask[i] v = new_values[i] # need a new block if m.any(): nv = f(m, v, i) else: nv = v if inplace else v.copy() block = make_a_block(nv, [ref_loc]) new_blocks.append(block) return new_blocks
python
def split_and_operate(self, mask, f, inplace): """ split the block per-column, and apply the callable f per-column, return a new block for each. Handle masking which will not change a block unless needed. Parameters ---------- mask : 2-d boolean mask f : callable accepting (1d-mask, 1d values, indexer) inplace : boolean Returns ------- list of blocks """ if mask is None: mask = np.ones(self.shape, dtype=bool) new_values = self.values def make_a_block(nv, ref_loc): if isinstance(nv, Block): block = nv elif isinstance(nv, list): block = nv[0] else: # Put back the dimension that was taken from it and make # a block out of the result. try: nv = _block_shape(nv, ndim=self.ndim) except (AttributeError, NotImplementedError): pass block = self.make_block(values=nv, placement=ref_loc) return block # ndim == 1 if self.ndim == 1: if mask.any(): nv = f(mask, new_values, None) else: nv = new_values if inplace else new_values.copy() block = make_a_block(nv, self.mgr_locs) return [block] # ndim > 1 new_blocks = [] for i, ref_loc in enumerate(self.mgr_locs): m = mask[i] v = new_values[i] # need a new block if m.any(): nv = f(m, v, i) else: nv = v if inplace else v.copy() block = make_a_block(nv, [ref_loc]) new_blocks.append(block) return new_blocks
[ "def", "split_and_operate", "(", "self", ",", "mask", ",", "f", ",", "inplace", ")", ":", "if", "mask", "is", "None", ":", "mask", "=", "np", ".", "ones", "(", "self", ".", "shape", ",", "dtype", "=", "bool", ")", "new_values", "=", "self", ".", "values", "def", "make_a_block", "(", "nv", ",", "ref_loc", ")", ":", "if", "isinstance", "(", "nv", ",", "Block", ")", ":", "block", "=", "nv", "elif", "isinstance", "(", "nv", ",", "list", ")", ":", "block", "=", "nv", "[", "0", "]", "else", ":", "# Put back the dimension that was taken from it and make", "# a block out of the result.", "try", ":", "nv", "=", "_block_shape", "(", "nv", ",", "ndim", "=", "self", ".", "ndim", ")", "except", "(", "AttributeError", ",", "NotImplementedError", ")", ":", "pass", "block", "=", "self", ".", "make_block", "(", "values", "=", "nv", ",", "placement", "=", "ref_loc", ")", "return", "block", "# ndim == 1", "if", "self", ".", "ndim", "==", "1", ":", "if", "mask", ".", "any", "(", ")", ":", "nv", "=", "f", "(", "mask", ",", "new_values", ",", "None", ")", "else", ":", "nv", "=", "new_values", "if", "inplace", "else", "new_values", ".", "copy", "(", ")", "block", "=", "make_a_block", "(", "nv", ",", "self", ".", "mgr_locs", ")", "return", "[", "block", "]", "# ndim > 1", "new_blocks", "=", "[", "]", "for", "i", ",", "ref_loc", "in", "enumerate", "(", "self", ".", "mgr_locs", ")", ":", "m", "=", "mask", "[", "i", "]", "v", "=", "new_values", "[", "i", "]", "# need a new block", "if", "m", ".", "any", "(", ")", ":", "nv", "=", "f", "(", "m", ",", "v", ",", "i", ")", "else", ":", "nv", "=", "v", "if", "inplace", "else", "v", ".", "copy", "(", ")", "block", "=", "make_a_block", "(", "nv", ",", "[", "ref_loc", "]", ")", "new_blocks", ".", "append", "(", "block", ")", "return", "new_blocks" ]
split the block per-column, and apply the callable f per-column, return a new block for each. Handle masking which will not change a block unless needed. Parameters ---------- mask : 2-d boolean mask f : callable accepting (1d-mask, 1d values, indexer) inplace : boolean Returns ------- list of blocks
[ "split", "the", "block", "per", "-", "column", "and", "apply", "the", "callable", "f", "per", "-", "column", "return", "a", "new", "block", "for", "each", ".", "Handle", "masking", "which", "will", "not", "change", "a", "block", "unless", "needed", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/blocks.py#L399-L460
19,556
pandas-dev/pandas
pandas/core/internals/blocks.py
Block.downcast
def downcast(self, dtypes=None): """ try to downcast each item to the dict of dtypes if present """ # turn it off completely if dtypes is False: return self values = self.values # single block handling if self._is_single_block: # try to cast all non-floats here if dtypes is None: dtypes = 'infer' nv = maybe_downcast_to_dtype(values, dtypes) return self.make_block(nv) # ndim > 1 if dtypes is None: return self if not (dtypes == 'infer' or isinstance(dtypes, dict)): raise ValueError("downcast must have a dictionary or 'infer' as " "its argument") # operate column-by-column # this is expensive as it splits the blocks items-by-item def f(m, v, i): if dtypes == 'infer': dtype = 'infer' else: raise AssertionError("dtypes as dict is not supported yet") if dtype is not None: v = maybe_downcast_to_dtype(v, dtype) return v return self.split_and_operate(None, f, False)
python
def downcast(self, dtypes=None): """ try to downcast each item to the dict of dtypes if present """ # turn it off completely if dtypes is False: return self values = self.values # single block handling if self._is_single_block: # try to cast all non-floats here if dtypes is None: dtypes = 'infer' nv = maybe_downcast_to_dtype(values, dtypes) return self.make_block(nv) # ndim > 1 if dtypes is None: return self if not (dtypes == 'infer' or isinstance(dtypes, dict)): raise ValueError("downcast must have a dictionary or 'infer' as " "its argument") # operate column-by-column # this is expensive as it splits the blocks items-by-item def f(m, v, i): if dtypes == 'infer': dtype = 'infer' else: raise AssertionError("dtypes as dict is not supported yet") if dtype is not None: v = maybe_downcast_to_dtype(v, dtype) return v return self.split_and_operate(None, f, False)
[ "def", "downcast", "(", "self", ",", "dtypes", "=", "None", ")", ":", "# turn it off completely", "if", "dtypes", "is", "False", ":", "return", "self", "values", "=", "self", ".", "values", "# single block handling", "if", "self", ".", "_is_single_block", ":", "# try to cast all non-floats here", "if", "dtypes", "is", "None", ":", "dtypes", "=", "'infer'", "nv", "=", "maybe_downcast_to_dtype", "(", "values", ",", "dtypes", ")", "return", "self", ".", "make_block", "(", "nv", ")", "# ndim > 1", "if", "dtypes", "is", "None", ":", "return", "self", "if", "not", "(", "dtypes", "==", "'infer'", "or", "isinstance", "(", "dtypes", ",", "dict", ")", ")", ":", "raise", "ValueError", "(", "\"downcast must have a dictionary or 'infer' as \"", "\"its argument\"", ")", "# operate column-by-column", "# this is expensive as it splits the blocks items-by-item", "def", "f", "(", "m", ",", "v", ",", "i", ")", ":", "if", "dtypes", "==", "'infer'", ":", "dtype", "=", "'infer'", "else", ":", "raise", "AssertionError", "(", "\"dtypes as dict is not supported yet\"", ")", "if", "dtype", "is", "not", "None", ":", "v", "=", "maybe_downcast_to_dtype", "(", "v", ",", "dtype", ")", "return", "v", "return", "self", ".", "split_and_operate", "(", "None", ",", "f", ",", "False", ")" ]
try to downcast each item to the dict of dtypes if present
[ "try", "to", "downcast", "each", "item", "to", "the", "dict", "of", "dtypes", "if", "present" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/blocks.py#L475-L515
19,557
pandas-dev/pandas
pandas/core/internals/blocks.py
Block._can_hold_element
def _can_hold_element(self, element): """ require the same dtype as ourselves """ dtype = self.values.dtype.type tipo = maybe_infer_dtype_type(element) if tipo is not None: return issubclass(tipo.type, dtype) return isinstance(element, dtype)
python
def _can_hold_element(self, element): """ require the same dtype as ourselves """ dtype = self.values.dtype.type tipo = maybe_infer_dtype_type(element) if tipo is not None: return issubclass(tipo.type, dtype) return isinstance(element, dtype)
[ "def", "_can_hold_element", "(", "self", ",", "element", ")", ":", "dtype", "=", "self", ".", "values", ".", "dtype", ".", "type", "tipo", "=", "maybe_infer_dtype_type", "(", "element", ")", "if", "tipo", "is", "not", "None", ":", "return", "issubclass", "(", "tipo", ".", "type", ",", "dtype", ")", "return", "isinstance", "(", "element", ",", "dtype", ")" ]
require the same dtype as ourselves
[ "require", "the", "same", "dtype", "as", "ourselves" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/blocks.py#L643-L649
19,558
pandas-dev/pandas
pandas/core/internals/blocks.py
Block._try_cast_result
def _try_cast_result(self, result, dtype=None): """ try to cast the result to our original type, we may have roundtripped thru object in the mean-time """ if dtype is None: dtype = self.dtype if self.is_integer or self.is_bool or self.is_datetime: pass elif self.is_float and result.dtype == self.dtype: # protect against a bool/object showing up here if isinstance(dtype, str) and dtype == 'infer': return result if not isinstance(dtype, type): dtype = dtype.type if issubclass(dtype, (np.bool_, np.object_)): if issubclass(dtype, np.bool_): if isna(result).all(): return result.astype(np.bool_) else: result = result.astype(np.object_) result[result == 1] = True result[result == 0] = False return result else: return result.astype(np.object_) return result # may need to change the dtype here return maybe_downcast_to_dtype(result, dtype)
python
def _try_cast_result(self, result, dtype=None): """ try to cast the result to our original type, we may have roundtripped thru object in the mean-time """ if dtype is None: dtype = self.dtype if self.is_integer or self.is_bool or self.is_datetime: pass elif self.is_float and result.dtype == self.dtype: # protect against a bool/object showing up here if isinstance(dtype, str) and dtype == 'infer': return result if not isinstance(dtype, type): dtype = dtype.type if issubclass(dtype, (np.bool_, np.object_)): if issubclass(dtype, np.bool_): if isna(result).all(): return result.astype(np.bool_) else: result = result.astype(np.object_) result[result == 1] = True result[result == 0] = False return result else: return result.astype(np.object_) return result # may need to change the dtype here return maybe_downcast_to_dtype(result, dtype)
[ "def", "_try_cast_result", "(", "self", ",", "result", ",", "dtype", "=", "None", ")", ":", "if", "dtype", "is", "None", ":", "dtype", "=", "self", ".", "dtype", "if", "self", ".", "is_integer", "or", "self", ".", "is_bool", "or", "self", ".", "is_datetime", ":", "pass", "elif", "self", ".", "is_float", "and", "result", ".", "dtype", "==", "self", ".", "dtype", ":", "# protect against a bool/object showing up here", "if", "isinstance", "(", "dtype", ",", "str", ")", "and", "dtype", "==", "'infer'", ":", "return", "result", "if", "not", "isinstance", "(", "dtype", ",", "type", ")", ":", "dtype", "=", "dtype", ".", "type", "if", "issubclass", "(", "dtype", ",", "(", "np", ".", "bool_", ",", "np", ".", "object_", ")", ")", ":", "if", "issubclass", "(", "dtype", ",", "np", ".", "bool_", ")", ":", "if", "isna", "(", "result", ")", ".", "all", "(", ")", ":", "return", "result", ".", "astype", "(", "np", ".", "bool_", ")", "else", ":", "result", "=", "result", ".", "astype", "(", "np", ".", "object_", ")", "result", "[", "result", "==", "1", "]", "=", "True", "result", "[", "result", "==", "0", "]", "=", "False", "return", "result", "else", ":", "return", "result", ".", "astype", "(", "np", ".", "object_", ")", "return", "result", "# may need to change the dtype here", "return", "maybe_downcast_to_dtype", "(", "result", ",", "dtype", ")" ]
try to cast the result to our original type, we may have roundtripped thru object in the mean-time
[ "try", "to", "cast", "the", "result", "to", "our", "original", "type", "we", "may", "have", "roundtripped", "thru", "object", "in", "the", "mean", "-", "time" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/blocks.py#L651-L682
19,559
pandas-dev/pandas
pandas/core/internals/blocks.py
Block.replace
def replace(self, to_replace, value, inplace=False, filter=None, regex=False, convert=True): """replace the to_replace value with value, possible to create new blocks here this is just a call to putmask. regex is not used here. It is used in ObjectBlocks. It is here for API compatibility. """ inplace = validate_bool_kwarg(inplace, 'inplace') original_to_replace = to_replace # try to replace, if we raise an error, convert to ObjectBlock and # retry try: values, to_replace = self._try_coerce_args(self.values, to_replace) mask = missing.mask_missing(values, to_replace) if filter is not None: filtered_out = ~self.mgr_locs.isin(filter) mask[filtered_out.nonzero()[0]] = False blocks = self.putmask(mask, value, inplace=inplace) if convert: blocks = [b.convert(by_item=True, numeric=False, copy=not inplace) for b in blocks] return blocks except (TypeError, ValueError): # GH 22083, TypeError or ValueError occurred within error handling # causes infinite loop. Cast and retry only if not objectblock. if is_object_dtype(self): raise # try again with a compatible block block = self.astype(object) return block.replace(to_replace=original_to_replace, value=value, inplace=inplace, filter=filter, regex=regex, convert=convert)
python
def replace(self, to_replace, value, inplace=False, filter=None, regex=False, convert=True): """replace the to_replace value with value, possible to create new blocks here this is just a call to putmask. regex is not used here. It is used in ObjectBlocks. It is here for API compatibility. """ inplace = validate_bool_kwarg(inplace, 'inplace') original_to_replace = to_replace # try to replace, if we raise an error, convert to ObjectBlock and # retry try: values, to_replace = self._try_coerce_args(self.values, to_replace) mask = missing.mask_missing(values, to_replace) if filter is not None: filtered_out = ~self.mgr_locs.isin(filter) mask[filtered_out.nonzero()[0]] = False blocks = self.putmask(mask, value, inplace=inplace) if convert: blocks = [b.convert(by_item=True, numeric=False, copy=not inplace) for b in blocks] return blocks except (TypeError, ValueError): # GH 22083, TypeError or ValueError occurred within error handling # causes infinite loop. Cast and retry only if not objectblock. if is_object_dtype(self): raise # try again with a compatible block block = self.astype(object) return block.replace(to_replace=original_to_replace, value=value, inplace=inplace, filter=filter, regex=regex, convert=convert)
[ "def", "replace", "(", "self", ",", "to_replace", ",", "value", ",", "inplace", "=", "False", ",", "filter", "=", "None", ",", "regex", "=", "False", ",", "convert", "=", "True", ")", ":", "inplace", "=", "validate_bool_kwarg", "(", "inplace", ",", "'inplace'", ")", "original_to_replace", "=", "to_replace", "# try to replace, if we raise an error, convert to ObjectBlock and", "# retry", "try", ":", "values", ",", "to_replace", "=", "self", ".", "_try_coerce_args", "(", "self", ".", "values", ",", "to_replace", ")", "mask", "=", "missing", ".", "mask_missing", "(", "values", ",", "to_replace", ")", "if", "filter", "is", "not", "None", ":", "filtered_out", "=", "~", "self", ".", "mgr_locs", ".", "isin", "(", "filter", ")", "mask", "[", "filtered_out", ".", "nonzero", "(", ")", "[", "0", "]", "]", "=", "False", "blocks", "=", "self", ".", "putmask", "(", "mask", ",", "value", ",", "inplace", "=", "inplace", ")", "if", "convert", ":", "blocks", "=", "[", "b", ".", "convert", "(", "by_item", "=", "True", ",", "numeric", "=", "False", ",", "copy", "=", "not", "inplace", ")", "for", "b", "in", "blocks", "]", "return", "blocks", "except", "(", "TypeError", ",", "ValueError", ")", ":", "# GH 22083, TypeError or ValueError occurred within error handling", "# causes infinite loop. Cast and retry only if not objectblock.", "if", "is_object_dtype", "(", "self", ")", ":", "raise", "# try again with a compatible block", "block", "=", "self", ".", "astype", "(", "object", ")", "return", "block", ".", "replace", "(", "to_replace", "=", "original_to_replace", ",", "value", "=", "value", ",", "inplace", "=", "inplace", ",", "filter", "=", "filter", ",", "regex", "=", "regex", ",", "convert", "=", "convert", ")" ]
replace the to_replace value with value, possible to create new blocks here this is just a call to putmask. regex is not used here. It is used in ObjectBlocks. It is here for API compatibility.
[ "replace", "the", "to_replace", "value", "with", "value", "possible", "to", "create", "new", "blocks", "here", "this", "is", "just", "a", "call", "to", "putmask", ".", "regex", "is", "not", "used", "here", ".", "It", "is", "used", "in", "ObjectBlocks", ".", "It", "is", "here", "for", "API", "compatibility", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/blocks.py#L731-L769
19,560
pandas-dev/pandas
pandas/core/internals/blocks.py
Block.setitem
def setitem(self, indexer, value): """Set the value inplace, returning a a maybe different typed block. Parameters ---------- indexer : tuple, list-like, array-like, slice The subset of self.values to set value : object The value being set Returns ------- Block Notes ----- `indexer` is a direct slice/positional indexer. `value` must be a compatible shape. """ # coerce None values, if appropriate if value is None: if self.is_numeric: value = np.nan # coerce if block dtype can store value values = self.values try: values, value = self._try_coerce_args(values, value) # can keep its own dtype if hasattr(value, 'dtype') and is_dtype_equal(values.dtype, value.dtype): dtype = self.dtype else: dtype = 'infer' except (TypeError, ValueError): # current dtype cannot store value, coerce to common dtype find_dtype = False if hasattr(value, 'dtype'): dtype = value.dtype find_dtype = True elif lib.is_scalar(value): if isna(value): # NaN promotion is handled in latter path dtype = False else: dtype, _ = infer_dtype_from_scalar(value, pandas_dtype=True) find_dtype = True else: dtype = 'infer' if find_dtype: dtype = find_common_type([values.dtype, dtype]) if not is_dtype_equal(self.dtype, dtype): b = self.astype(dtype) return b.setitem(indexer, value) # value must be storeable at this moment arr_value = np.array(value) # cast the values to a type that can hold nan (if necessary) if not self._can_hold_element(value): dtype, _ = maybe_promote(arr_value.dtype) values = values.astype(dtype) transf = (lambda x: x.T) if self.ndim == 2 else (lambda x: x) values = transf(values) # length checking check_setitem_lengths(indexer, value, values) def _is_scalar_indexer(indexer): # return True if we are all scalar indexers if arr_value.ndim == 1: if not isinstance(indexer, tuple): indexer = tuple([indexer]) return any(isinstance(idx, np.ndarray) and len(idx) == 0 for idx in indexer) return False def _is_empty_indexer(indexer): # return a boolean if we have an empty indexer if is_list_like(indexer) and not len(indexer): return True if arr_value.ndim == 1: if not isinstance(indexer, tuple): indexer = tuple([indexer]) return any(isinstance(idx, np.ndarray) and len(idx) == 0 for idx in indexer) return False # empty indexers # 8669 (empty) if _is_empty_indexer(indexer): pass # setting a single element for each dim and with a rhs that could # be say a list # GH 6043 elif _is_scalar_indexer(indexer): values[indexer] = value # if we are an exact match (ex-broadcasting), # then use the resultant dtype elif (len(arr_value.shape) and arr_value.shape[0] == values.shape[0] and np.prod(arr_value.shape) == np.prod(values.shape)): values[indexer] = value try: values = values.astype(arr_value.dtype) except ValueError: pass # set else: values[indexer] = value # coerce and try to infer the dtypes of the result values = self._try_coerce_and_cast_result(values, dtype) block = self.make_block(transf(values)) return block
python
def setitem(self, indexer, value): """Set the value inplace, returning a a maybe different typed block. Parameters ---------- indexer : tuple, list-like, array-like, slice The subset of self.values to set value : object The value being set Returns ------- Block Notes ----- `indexer` is a direct slice/positional indexer. `value` must be a compatible shape. """ # coerce None values, if appropriate if value is None: if self.is_numeric: value = np.nan # coerce if block dtype can store value values = self.values try: values, value = self._try_coerce_args(values, value) # can keep its own dtype if hasattr(value, 'dtype') and is_dtype_equal(values.dtype, value.dtype): dtype = self.dtype else: dtype = 'infer' except (TypeError, ValueError): # current dtype cannot store value, coerce to common dtype find_dtype = False if hasattr(value, 'dtype'): dtype = value.dtype find_dtype = True elif lib.is_scalar(value): if isna(value): # NaN promotion is handled in latter path dtype = False else: dtype, _ = infer_dtype_from_scalar(value, pandas_dtype=True) find_dtype = True else: dtype = 'infer' if find_dtype: dtype = find_common_type([values.dtype, dtype]) if not is_dtype_equal(self.dtype, dtype): b = self.astype(dtype) return b.setitem(indexer, value) # value must be storeable at this moment arr_value = np.array(value) # cast the values to a type that can hold nan (if necessary) if not self._can_hold_element(value): dtype, _ = maybe_promote(arr_value.dtype) values = values.astype(dtype) transf = (lambda x: x.T) if self.ndim == 2 else (lambda x: x) values = transf(values) # length checking check_setitem_lengths(indexer, value, values) def _is_scalar_indexer(indexer): # return True if we are all scalar indexers if arr_value.ndim == 1: if not isinstance(indexer, tuple): indexer = tuple([indexer]) return any(isinstance(idx, np.ndarray) and len(idx) == 0 for idx in indexer) return False def _is_empty_indexer(indexer): # return a boolean if we have an empty indexer if is_list_like(indexer) and not len(indexer): return True if arr_value.ndim == 1: if not isinstance(indexer, tuple): indexer = tuple([indexer]) return any(isinstance(idx, np.ndarray) and len(idx) == 0 for idx in indexer) return False # empty indexers # 8669 (empty) if _is_empty_indexer(indexer): pass # setting a single element for each dim and with a rhs that could # be say a list # GH 6043 elif _is_scalar_indexer(indexer): values[indexer] = value # if we are an exact match (ex-broadcasting), # then use the resultant dtype elif (len(arr_value.shape) and arr_value.shape[0] == values.shape[0] and np.prod(arr_value.shape) == np.prod(values.shape)): values[indexer] = value try: values = values.astype(arr_value.dtype) except ValueError: pass # set else: values[indexer] = value # coerce and try to infer the dtypes of the result values = self._try_coerce_and_cast_result(values, dtype) block = self.make_block(transf(values)) return block
[ "def", "setitem", "(", "self", ",", "indexer", ",", "value", ")", ":", "# coerce None values, if appropriate", "if", "value", "is", "None", ":", "if", "self", ".", "is_numeric", ":", "value", "=", "np", ".", "nan", "# coerce if block dtype can store value", "values", "=", "self", ".", "values", "try", ":", "values", ",", "value", "=", "self", ".", "_try_coerce_args", "(", "values", ",", "value", ")", "# can keep its own dtype", "if", "hasattr", "(", "value", ",", "'dtype'", ")", "and", "is_dtype_equal", "(", "values", ".", "dtype", ",", "value", ".", "dtype", ")", ":", "dtype", "=", "self", ".", "dtype", "else", ":", "dtype", "=", "'infer'", "except", "(", "TypeError", ",", "ValueError", ")", ":", "# current dtype cannot store value, coerce to common dtype", "find_dtype", "=", "False", "if", "hasattr", "(", "value", ",", "'dtype'", ")", ":", "dtype", "=", "value", ".", "dtype", "find_dtype", "=", "True", "elif", "lib", ".", "is_scalar", "(", "value", ")", ":", "if", "isna", "(", "value", ")", ":", "# NaN promotion is handled in latter path", "dtype", "=", "False", "else", ":", "dtype", ",", "_", "=", "infer_dtype_from_scalar", "(", "value", ",", "pandas_dtype", "=", "True", ")", "find_dtype", "=", "True", "else", ":", "dtype", "=", "'infer'", "if", "find_dtype", ":", "dtype", "=", "find_common_type", "(", "[", "values", ".", "dtype", ",", "dtype", "]", ")", "if", "not", "is_dtype_equal", "(", "self", ".", "dtype", ",", "dtype", ")", ":", "b", "=", "self", ".", "astype", "(", "dtype", ")", "return", "b", ".", "setitem", "(", "indexer", ",", "value", ")", "# value must be storeable at this moment", "arr_value", "=", "np", ".", "array", "(", "value", ")", "# cast the values to a type that can hold nan (if necessary)", "if", "not", "self", ".", "_can_hold_element", "(", "value", ")", ":", "dtype", ",", "_", "=", "maybe_promote", "(", "arr_value", ".", "dtype", ")", "values", "=", "values", ".", "astype", "(", "dtype", ")", "transf", "=", "(", "lambda", "x", ":", "x", ".", "T", ")", "if", "self", ".", "ndim", "==", "2", "else", "(", "lambda", "x", ":", "x", ")", "values", "=", "transf", "(", "values", ")", "# length checking", "check_setitem_lengths", "(", "indexer", ",", "value", ",", "values", ")", "def", "_is_scalar_indexer", "(", "indexer", ")", ":", "# return True if we are all scalar indexers", "if", "arr_value", ".", "ndim", "==", "1", ":", "if", "not", "isinstance", "(", "indexer", ",", "tuple", ")", ":", "indexer", "=", "tuple", "(", "[", "indexer", "]", ")", "return", "any", "(", "isinstance", "(", "idx", ",", "np", ".", "ndarray", ")", "and", "len", "(", "idx", ")", "==", "0", "for", "idx", "in", "indexer", ")", "return", "False", "def", "_is_empty_indexer", "(", "indexer", ")", ":", "# return a boolean if we have an empty indexer", "if", "is_list_like", "(", "indexer", ")", "and", "not", "len", "(", "indexer", ")", ":", "return", "True", "if", "arr_value", ".", "ndim", "==", "1", ":", "if", "not", "isinstance", "(", "indexer", ",", "tuple", ")", ":", "indexer", "=", "tuple", "(", "[", "indexer", "]", ")", "return", "any", "(", "isinstance", "(", "idx", ",", "np", ".", "ndarray", ")", "and", "len", "(", "idx", ")", "==", "0", "for", "idx", "in", "indexer", ")", "return", "False", "# empty indexers", "# 8669 (empty)", "if", "_is_empty_indexer", "(", "indexer", ")", ":", "pass", "# setting a single element for each dim and with a rhs that could", "# be say a list", "# GH 6043", "elif", "_is_scalar_indexer", "(", "indexer", ")", ":", "values", "[", "indexer", "]", "=", "value", "# if we are an exact match (ex-broadcasting),", "# then use the resultant dtype", "elif", "(", "len", "(", "arr_value", ".", "shape", ")", "and", "arr_value", ".", "shape", "[", "0", "]", "==", "values", ".", "shape", "[", "0", "]", "and", "np", ".", "prod", "(", "arr_value", ".", "shape", ")", "==", "np", ".", "prod", "(", "values", ".", "shape", ")", ")", ":", "values", "[", "indexer", "]", "=", "value", "try", ":", "values", "=", "values", ".", "astype", "(", "arr_value", ".", "dtype", ")", "except", "ValueError", ":", "pass", "# set", "else", ":", "values", "[", "indexer", "]", "=", "value", "# coerce and try to infer the dtypes of the result", "values", "=", "self", ".", "_try_coerce_and_cast_result", "(", "values", ",", "dtype", ")", "block", "=", "self", ".", "make_block", "(", "transf", "(", "values", ")", ")", "return", "block" ]
Set the value inplace, returning a a maybe different typed block. Parameters ---------- indexer : tuple, list-like, array-like, slice The subset of self.values to set value : object The value being set Returns ------- Block Notes ----- `indexer` is a direct slice/positional indexer. `value` must be a compatible shape.
[ "Set", "the", "value", "inplace", "returning", "a", "a", "maybe", "different", "typed", "block", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/blocks.py#L775-L900
19,561
pandas-dev/pandas
pandas/core/internals/blocks.py
Block.putmask
def putmask(self, mask, new, align=True, inplace=False, axis=0, transpose=False): """ putmask the data to the block; it is possible that we may create a new dtype of block return the resulting block(s) Parameters ---------- mask : the condition to respect new : a ndarray/object align : boolean, perform alignment on other/cond, default is True inplace : perform inplace modification, default is False axis : int transpose : boolean Set to True if self is stored with axes reversed Returns ------- a list of new blocks, the result of the putmask """ new_values = self.values if inplace else self.values.copy() new = getattr(new, 'values', new) mask = getattr(mask, 'values', mask) # if we are passed a scalar None, convert it here if not is_list_like(new) and isna(new) and not self.is_object: new = self.fill_value if self._can_hold_element(new): _, new = self._try_coerce_args(new_values, new) if transpose: new_values = new_values.T # If the default repeat behavior in np.putmask would go in the # wrong direction, then explicitly repeat and reshape new instead if getattr(new, 'ndim', 0) >= 1: if self.ndim - 1 == new.ndim and axis == 1: new = np.repeat( new, new_values.shape[-1]).reshape(self.shape) new = new.astype(new_values.dtype) # we require exact matches between the len of the # values we are setting (or is compat). np.putmask # doesn't check this and will simply truncate / pad # the output, but we want sane error messages # # TODO: this prob needs some better checking # for 2D cases if ((is_list_like(new) and np.any(mask[mask]) and getattr(new, 'ndim', 1) == 1)): if not (mask.shape[-1] == len(new) or mask[mask].shape[-1] == len(new) or len(new) == 1): raise ValueError("cannot assign mismatch " "length to masked array") np.putmask(new_values, mask, new) # maybe upcast me elif mask.any(): if transpose: mask = mask.T if isinstance(new, np.ndarray): new = new.T axis = new_values.ndim - axis - 1 # Pseudo-broadcast if getattr(new, 'ndim', 0) >= 1: if self.ndim - 1 == new.ndim: new_shape = list(new.shape) new_shape.insert(axis, 1) new = new.reshape(tuple(new_shape)) # operate column-by-column def f(m, v, i): if i is None: # ndim==1 case. n = new else: if isinstance(new, np.ndarray): n = np.squeeze(new[i % new.shape[0]]) else: n = np.array(new) # type of the new block dtype, _ = maybe_promote(n.dtype) # we need to explicitly astype here to make a copy n = n.astype(dtype) nv = _putmask_smart(v, m, n) return nv new_blocks = self.split_and_operate(mask, f, inplace) return new_blocks if inplace: return [self] if transpose: new_values = new_values.T return [self.make_block(new_values)]
python
def putmask(self, mask, new, align=True, inplace=False, axis=0, transpose=False): """ putmask the data to the block; it is possible that we may create a new dtype of block return the resulting block(s) Parameters ---------- mask : the condition to respect new : a ndarray/object align : boolean, perform alignment on other/cond, default is True inplace : perform inplace modification, default is False axis : int transpose : boolean Set to True if self is stored with axes reversed Returns ------- a list of new blocks, the result of the putmask """ new_values = self.values if inplace else self.values.copy() new = getattr(new, 'values', new) mask = getattr(mask, 'values', mask) # if we are passed a scalar None, convert it here if not is_list_like(new) and isna(new) and not self.is_object: new = self.fill_value if self._can_hold_element(new): _, new = self._try_coerce_args(new_values, new) if transpose: new_values = new_values.T # If the default repeat behavior in np.putmask would go in the # wrong direction, then explicitly repeat and reshape new instead if getattr(new, 'ndim', 0) >= 1: if self.ndim - 1 == new.ndim and axis == 1: new = np.repeat( new, new_values.shape[-1]).reshape(self.shape) new = new.astype(new_values.dtype) # we require exact matches between the len of the # values we are setting (or is compat). np.putmask # doesn't check this and will simply truncate / pad # the output, but we want sane error messages # # TODO: this prob needs some better checking # for 2D cases if ((is_list_like(new) and np.any(mask[mask]) and getattr(new, 'ndim', 1) == 1)): if not (mask.shape[-1] == len(new) or mask[mask].shape[-1] == len(new) or len(new) == 1): raise ValueError("cannot assign mismatch " "length to masked array") np.putmask(new_values, mask, new) # maybe upcast me elif mask.any(): if transpose: mask = mask.T if isinstance(new, np.ndarray): new = new.T axis = new_values.ndim - axis - 1 # Pseudo-broadcast if getattr(new, 'ndim', 0) >= 1: if self.ndim - 1 == new.ndim: new_shape = list(new.shape) new_shape.insert(axis, 1) new = new.reshape(tuple(new_shape)) # operate column-by-column def f(m, v, i): if i is None: # ndim==1 case. n = new else: if isinstance(new, np.ndarray): n = np.squeeze(new[i % new.shape[0]]) else: n = np.array(new) # type of the new block dtype, _ = maybe_promote(n.dtype) # we need to explicitly astype here to make a copy n = n.astype(dtype) nv = _putmask_smart(v, m, n) return nv new_blocks = self.split_and_operate(mask, f, inplace) return new_blocks if inplace: return [self] if transpose: new_values = new_values.T return [self.make_block(new_values)]
[ "def", "putmask", "(", "self", ",", "mask", ",", "new", ",", "align", "=", "True", ",", "inplace", "=", "False", ",", "axis", "=", "0", ",", "transpose", "=", "False", ")", ":", "new_values", "=", "self", ".", "values", "if", "inplace", "else", "self", ".", "values", ".", "copy", "(", ")", "new", "=", "getattr", "(", "new", ",", "'values'", ",", "new", ")", "mask", "=", "getattr", "(", "mask", ",", "'values'", ",", "mask", ")", "# if we are passed a scalar None, convert it here", "if", "not", "is_list_like", "(", "new", ")", "and", "isna", "(", "new", ")", "and", "not", "self", ".", "is_object", ":", "new", "=", "self", ".", "fill_value", "if", "self", ".", "_can_hold_element", "(", "new", ")", ":", "_", ",", "new", "=", "self", ".", "_try_coerce_args", "(", "new_values", ",", "new", ")", "if", "transpose", ":", "new_values", "=", "new_values", ".", "T", "# If the default repeat behavior in np.putmask would go in the", "# wrong direction, then explicitly repeat and reshape new instead", "if", "getattr", "(", "new", ",", "'ndim'", ",", "0", ")", ">=", "1", ":", "if", "self", ".", "ndim", "-", "1", "==", "new", ".", "ndim", "and", "axis", "==", "1", ":", "new", "=", "np", ".", "repeat", "(", "new", ",", "new_values", ".", "shape", "[", "-", "1", "]", ")", ".", "reshape", "(", "self", ".", "shape", ")", "new", "=", "new", ".", "astype", "(", "new_values", ".", "dtype", ")", "# we require exact matches between the len of the", "# values we are setting (or is compat). np.putmask", "# doesn't check this and will simply truncate / pad", "# the output, but we want sane error messages", "#", "# TODO: this prob needs some better checking", "# for 2D cases", "if", "(", "(", "is_list_like", "(", "new", ")", "and", "np", ".", "any", "(", "mask", "[", "mask", "]", ")", "and", "getattr", "(", "new", ",", "'ndim'", ",", "1", ")", "==", "1", ")", ")", ":", "if", "not", "(", "mask", ".", "shape", "[", "-", "1", "]", "==", "len", "(", "new", ")", "or", "mask", "[", "mask", "]", ".", "shape", "[", "-", "1", "]", "==", "len", "(", "new", ")", "or", "len", "(", "new", ")", "==", "1", ")", ":", "raise", "ValueError", "(", "\"cannot assign mismatch \"", "\"length to masked array\"", ")", "np", ".", "putmask", "(", "new_values", ",", "mask", ",", "new", ")", "# maybe upcast me", "elif", "mask", ".", "any", "(", ")", ":", "if", "transpose", ":", "mask", "=", "mask", ".", "T", "if", "isinstance", "(", "new", ",", "np", ".", "ndarray", ")", ":", "new", "=", "new", ".", "T", "axis", "=", "new_values", ".", "ndim", "-", "axis", "-", "1", "# Pseudo-broadcast", "if", "getattr", "(", "new", ",", "'ndim'", ",", "0", ")", ">=", "1", ":", "if", "self", ".", "ndim", "-", "1", "==", "new", ".", "ndim", ":", "new_shape", "=", "list", "(", "new", ".", "shape", ")", "new_shape", ".", "insert", "(", "axis", ",", "1", ")", "new", "=", "new", ".", "reshape", "(", "tuple", "(", "new_shape", ")", ")", "# operate column-by-column", "def", "f", "(", "m", ",", "v", ",", "i", ")", ":", "if", "i", "is", "None", ":", "# ndim==1 case.", "n", "=", "new", "else", ":", "if", "isinstance", "(", "new", ",", "np", ".", "ndarray", ")", ":", "n", "=", "np", ".", "squeeze", "(", "new", "[", "i", "%", "new", ".", "shape", "[", "0", "]", "]", ")", "else", ":", "n", "=", "np", ".", "array", "(", "new", ")", "# type of the new block", "dtype", ",", "_", "=", "maybe_promote", "(", "n", ".", "dtype", ")", "# we need to explicitly astype here to make a copy", "n", "=", "n", ".", "astype", "(", "dtype", ")", "nv", "=", "_putmask_smart", "(", "v", ",", "m", ",", "n", ")", "return", "nv", "new_blocks", "=", "self", ".", "split_and_operate", "(", "mask", ",", "f", ",", "inplace", ")", "return", "new_blocks", "if", "inplace", ":", "return", "[", "self", "]", "if", "transpose", ":", "new_values", "=", "new_values", ".", "T", "return", "[", "self", ".", "make_block", "(", "new_values", ")", "]" ]
putmask the data to the block; it is possible that we may create a new dtype of block return the resulting block(s) Parameters ---------- mask : the condition to respect new : a ndarray/object align : boolean, perform alignment on other/cond, default is True inplace : perform inplace modification, default is False axis : int transpose : boolean Set to True if self is stored with axes reversed Returns ------- a list of new blocks, the result of the putmask
[ "putmask", "the", "data", "to", "the", "block", ";", "it", "is", "possible", "that", "we", "may", "create", "a", "new", "dtype", "of", "block" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/blocks.py#L902-L1012
19,562
pandas-dev/pandas
pandas/core/internals/blocks.py
Block.coerce_to_target_dtype
def coerce_to_target_dtype(self, other): """ coerce the current block to a dtype compat for other we will return a block, possibly object, and not raise we can also safely try to coerce to the same dtype and will receive the same block """ # if we cannot then coerce to object dtype, _ = infer_dtype_from(other, pandas_dtype=True) if is_dtype_equal(self.dtype, dtype): return self if self.is_bool or is_object_dtype(dtype) or is_bool_dtype(dtype): # we don't upcast to bool return self.astype(object) elif ((self.is_float or self.is_complex) and (is_integer_dtype(dtype) or is_float_dtype(dtype))): # don't coerce float/complex to int return self elif (self.is_datetime or is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype)): # not a datetime if not ((is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype)) and self.is_datetime): return self.astype(object) # don't upcast timezone with different timezone or no timezone mytz = getattr(self.dtype, 'tz', None) othertz = getattr(dtype, 'tz', None) if str(mytz) != str(othertz): return self.astype(object) raise AssertionError("possible recursion in " "coerce_to_target_dtype: {} {}".format( self, other)) elif (self.is_timedelta or is_timedelta64_dtype(dtype)): # not a timedelta if not (is_timedelta64_dtype(dtype) and self.is_timedelta): return self.astype(object) raise AssertionError("possible recursion in " "coerce_to_target_dtype: {} {}".format( self, other)) try: return self.astype(dtype) except (ValueError, TypeError, OverflowError): pass return self.astype(object)
python
def coerce_to_target_dtype(self, other): """ coerce the current block to a dtype compat for other we will return a block, possibly object, and not raise we can also safely try to coerce to the same dtype and will receive the same block """ # if we cannot then coerce to object dtype, _ = infer_dtype_from(other, pandas_dtype=True) if is_dtype_equal(self.dtype, dtype): return self if self.is_bool or is_object_dtype(dtype) or is_bool_dtype(dtype): # we don't upcast to bool return self.astype(object) elif ((self.is_float or self.is_complex) and (is_integer_dtype(dtype) or is_float_dtype(dtype))): # don't coerce float/complex to int return self elif (self.is_datetime or is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype)): # not a datetime if not ((is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype)) and self.is_datetime): return self.astype(object) # don't upcast timezone with different timezone or no timezone mytz = getattr(self.dtype, 'tz', None) othertz = getattr(dtype, 'tz', None) if str(mytz) != str(othertz): return self.astype(object) raise AssertionError("possible recursion in " "coerce_to_target_dtype: {} {}".format( self, other)) elif (self.is_timedelta or is_timedelta64_dtype(dtype)): # not a timedelta if not (is_timedelta64_dtype(dtype) and self.is_timedelta): return self.astype(object) raise AssertionError("possible recursion in " "coerce_to_target_dtype: {} {}".format( self, other)) try: return self.astype(dtype) except (ValueError, TypeError, OverflowError): pass return self.astype(object)
[ "def", "coerce_to_target_dtype", "(", "self", ",", "other", ")", ":", "# if we cannot then coerce to object", "dtype", ",", "_", "=", "infer_dtype_from", "(", "other", ",", "pandas_dtype", "=", "True", ")", "if", "is_dtype_equal", "(", "self", ".", "dtype", ",", "dtype", ")", ":", "return", "self", "if", "self", ".", "is_bool", "or", "is_object_dtype", "(", "dtype", ")", "or", "is_bool_dtype", "(", "dtype", ")", ":", "# we don't upcast to bool", "return", "self", ".", "astype", "(", "object", ")", "elif", "(", "(", "self", ".", "is_float", "or", "self", ".", "is_complex", ")", "and", "(", "is_integer_dtype", "(", "dtype", ")", "or", "is_float_dtype", "(", "dtype", ")", ")", ")", ":", "# don't coerce float/complex to int", "return", "self", "elif", "(", "self", ".", "is_datetime", "or", "is_datetime64_dtype", "(", "dtype", ")", "or", "is_datetime64tz_dtype", "(", "dtype", ")", ")", ":", "# not a datetime", "if", "not", "(", "(", "is_datetime64_dtype", "(", "dtype", ")", "or", "is_datetime64tz_dtype", "(", "dtype", ")", ")", "and", "self", ".", "is_datetime", ")", ":", "return", "self", ".", "astype", "(", "object", ")", "# don't upcast timezone with different timezone or no timezone", "mytz", "=", "getattr", "(", "self", ".", "dtype", ",", "'tz'", ",", "None", ")", "othertz", "=", "getattr", "(", "dtype", ",", "'tz'", ",", "None", ")", "if", "str", "(", "mytz", ")", "!=", "str", "(", "othertz", ")", ":", "return", "self", ".", "astype", "(", "object", ")", "raise", "AssertionError", "(", "\"possible recursion in \"", "\"coerce_to_target_dtype: {} {}\"", ".", "format", "(", "self", ",", "other", ")", ")", "elif", "(", "self", ".", "is_timedelta", "or", "is_timedelta64_dtype", "(", "dtype", ")", ")", ":", "# not a timedelta", "if", "not", "(", "is_timedelta64_dtype", "(", "dtype", ")", "and", "self", ".", "is_timedelta", ")", ":", "return", "self", ".", "astype", "(", "object", ")", "raise", "AssertionError", "(", "\"possible recursion in \"", "\"coerce_to_target_dtype: {} {}\"", ".", "format", "(", "self", ",", "other", ")", ")", "try", ":", "return", "self", ".", "astype", "(", "dtype", ")", "except", "(", "ValueError", ",", "TypeError", ",", "OverflowError", ")", ":", "pass", "return", "self", ".", "astype", "(", "object", ")" ]
coerce the current block to a dtype compat for other we will return a block, possibly object, and not raise we can also safely try to coerce to the same dtype and will receive the same block
[ "coerce", "the", "current", "block", "to", "a", "dtype", "compat", "for", "other", "we", "will", "return", "a", "block", "possibly", "object", "and", "not", "raise" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/blocks.py#L1014-L1073
19,563
pandas-dev/pandas
pandas/core/internals/blocks.py
Block._interpolate_with_fill
def _interpolate_with_fill(self, method='pad', axis=0, inplace=False, limit=None, fill_value=None, coerce=False, downcast=None): """ fillna but using the interpolate machinery """ inplace = validate_bool_kwarg(inplace, 'inplace') # if we are coercing, then don't force the conversion # if the block can't hold the type if coerce: if not self._can_hold_na: if inplace: return [self] else: return [self.copy()] values = self.values if inplace else self.values.copy() values, fill_value = self._try_coerce_args(values, fill_value) values = missing.interpolate_2d(values, method=method, axis=axis, limit=limit, fill_value=fill_value, dtype=self.dtype) values = self._try_coerce_result(values) blocks = [self.make_block_same_class(values, ndim=self.ndim)] return self._maybe_downcast(blocks, downcast)
python
def _interpolate_with_fill(self, method='pad', axis=0, inplace=False, limit=None, fill_value=None, coerce=False, downcast=None): """ fillna but using the interpolate machinery """ inplace = validate_bool_kwarg(inplace, 'inplace') # if we are coercing, then don't force the conversion # if the block can't hold the type if coerce: if not self._can_hold_na: if inplace: return [self] else: return [self.copy()] values = self.values if inplace else self.values.copy() values, fill_value = self._try_coerce_args(values, fill_value) values = missing.interpolate_2d(values, method=method, axis=axis, limit=limit, fill_value=fill_value, dtype=self.dtype) values = self._try_coerce_result(values) blocks = [self.make_block_same_class(values, ndim=self.ndim)] return self._maybe_downcast(blocks, downcast)
[ "def", "_interpolate_with_fill", "(", "self", ",", "method", "=", "'pad'", ",", "axis", "=", "0", ",", "inplace", "=", "False", ",", "limit", "=", "None", ",", "fill_value", "=", "None", ",", "coerce", "=", "False", ",", "downcast", "=", "None", ")", ":", "inplace", "=", "validate_bool_kwarg", "(", "inplace", ",", "'inplace'", ")", "# if we are coercing, then don't force the conversion", "# if the block can't hold the type", "if", "coerce", ":", "if", "not", "self", ".", "_can_hold_na", ":", "if", "inplace", ":", "return", "[", "self", "]", "else", ":", "return", "[", "self", ".", "copy", "(", ")", "]", "values", "=", "self", ".", "values", "if", "inplace", "else", "self", ".", "values", ".", "copy", "(", ")", "values", ",", "fill_value", "=", "self", ".", "_try_coerce_args", "(", "values", ",", "fill_value", ")", "values", "=", "missing", ".", "interpolate_2d", "(", "values", ",", "method", "=", "method", ",", "axis", "=", "axis", ",", "limit", "=", "limit", ",", "fill_value", "=", "fill_value", ",", "dtype", "=", "self", ".", "dtype", ")", "values", "=", "self", ".", "_try_coerce_result", "(", "values", ")", "blocks", "=", "[", "self", ".", "make_block_same_class", "(", "values", ",", "ndim", "=", "self", ".", "ndim", ")", "]", "return", "self", ".", "_maybe_downcast", "(", "blocks", ",", "downcast", ")" ]
fillna but using the interpolate machinery
[ "fillna", "but", "using", "the", "interpolate", "machinery" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/blocks.py#L1119-L1143
19,564
pandas-dev/pandas
pandas/core/internals/blocks.py
Block._interpolate
def _interpolate(self, method=None, index=None, values=None, fill_value=None, axis=0, limit=None, limit_direction='forward', limit_area=None, inplace=False, downcast=None, **kwargs): """ interpolate using scipy wrappers """ inplace = validate_bool_kwarg(inplace, 'inplace') data = self.values if inplace else self.values.copy() # only deal with floats if not self.is_float: if not self.is_integer: return self data = data.astype(np.float64) if fill_value is None: fill_value = self.fill_value if method in ('krogh', 'piecewise_polynomial', 'pchip'): if not index.is_monotonic: raise ValueError("{0} interpolation requires that the " "index be monotonic.".format(method)) # process 1-d slices in the axis direction def func(x): # process a 1-d slice, returning it # should the axis argument be handled below in apply_along_axis? # i.e. not an arg to missing.interpolate_1d return missing.interpolate_1d(index, x, method=method, limit=limit, limit_direction=limit_direction, limit_area=limit_area, fill_value=fill_value, bounds_error=False, **kwargs) # interp each column independently interp_values = np.apply_along_axis(func, axis, data) blocks = [self.make_block_same_class(interp_values)] return self._maybe_downcast(blocks, downcast)
python
def _interpolate(self, method=None, index=None, values=None, fill_value=None, axis=0, limit=None, limit_direction='forward', limit_area=None, inplace=False, downcast=None, **kwargs): """ interpolate using scipy wrappers """ inplace = validate_bool_kwarg(inplace, 'inplace') data = self.values if inplace else self.values.copy() # only deal with floats if not self.is_float: if not self.is_integer: return self data = data.astype(np.float64) if fill_value is None: fill_value = self.fill_value if method in ('krogh', 'piecewise_polynomial', 'pchip'): if not index.is_monotonic: raise ValueError("{0} interpolation requires that the " "index be monotonic.".format(method)) # process 1-d slices in the axis direction def func(x): # process a 1-d slice, returning it # should the axis argument be handled below in apply_along_axis? # i.e. not an arg to missing.interpolate_1d return missing.interpolate_1d(index, x, method=method, limit=limit, limit_direction=limit_direction, limit_area=limit_area, fill_value=fill_value, bounds_error=False, **kwargs) # interp each column independently interp_values = np.apply_along_axis(func, axis, data) blocks = [self.make_block_same_class(interp_values)] return self._maybe_downcast(blocks, downcast)
[ "def", "_interpolate", "(", "self", ",", "method", "=", "None", ",", "index", "=", "None", ",", "values", "=", "None", ",", "fill_value", "=", "None", ",", "axis", "=", "0", ",", "limit", "=", "None", ",", "limit_direction", "=", "'forward'", ",", "limit_area", "=", "None", ",", "inplace", "=", "False", ",", "downcast", "=", "None", ",", "*", "*", "kwargs", ")", ":", "inplace", "=", "validate_bool_kwarg", "(", "inplace", ",", "'inplace'", ")", "data", "=", "self", ".", "values", "if", "inplace", "else", "self", ".", "values", ".", "copy", "(", ")", "# only deal with floats", "if", "not", "self", ".", "is_float", ":", "if", "not", "self", ".", "is_integer", ":", "return", "self", "data", "=", "data", ".", "astype", "(", "np", ".", "float64", ")", "if", "fill_value", "is", "None", ":", "fill_value", "=", "self", ".", "fill_value", "if", "method", "in", "(", "'krogh'", ",", "'piecewise_polynomial'", ",", "'pchip'", ")", ":", "if", "not", "index", ".", "is_monotonic", ":", "raise", "ValueError", "(", "\"{0} interpolation requires that the \"", "\"index be monotonic.\"", ".", "format", "(", "method", ")", ")", "# process 1-d slices in the axis direction", "def", "func", "(", "x", ")", ":", "# process a 1-d slice, returning it", "# should the axis argument be handled below in apply_along_axis?", "# i.e. not an arg to missing.interpolate_1d", "return", "missing", ".", "interpolate_1d", "(", "index", ",", "x", ",", "method", "=", "method", ",", "limit", "=", "limit", ",", "limit_direction", "=", "limit_direction", ",", "limit_area", "=", "limit_area", ",", "fill_value", "=", "fill_value", ",", "bounds_error", "=", "False", ",", "*", "*", "kwargs", ")", "# interp each column independently", "interp_values", "=", "np", ".", "apply_along_axis", "(", "func", ",", "axis", ",", "data", ")", "blocks", "=", "[", "self", ".", "make_block_same_class", "(", "interp_values", ")", "]", "return", "self", ".", "_maybe_downcast", "(", "blocks", ",", "downcast", ")" ]
interpolate using scipy wrappers
[ "interpolate", "using", "scipy", "wrappers" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/blocks.py#L1145-L1184
19,565
pandas-dev/pandas
pandas/core/internals/blocks.py
Block.take_nd
def take_nd(self, indexer, axis, new_mgr_locs=None, fill_tuple=None): """ Take values according to indexer and return them as a block.bb """ # algos.take_nd dispatches for DatetimeTZBlock, CategoricalBlock # so need to preserve types # sparse is treated like an ndarray, but needs .get_values() shaping values = self.values if self.is_sparse: values = self.get_values() if fill_tuple is None: fill_value = self.fill_value new_values = algos.take_nd(values, indexer, axis=axis, allow_fill=False, fill_value=fill_value) else: fill_value = fill_tuple[0] new_values = algos.take_nd(values, indexer, axis=axis, allow_fill=True, fill_value=fill_value) if new_mgr_locs is None: if axis == 0: slc = libinternals.indexer_as_slice(indexer) if slc is not None: new_mgr_locs = self.mgr_locs[slc] else: new_mgr_locs = self.mgr_locs[indexer] else: new_mgr_locs = self.mgr_locs if not is_dtype_equal(new_values.dtype, self.dtype): return self.make_block(new_values, new_mgr_locs) else: return self.make_block_same_class(new_values, new_mgr_locs)
python
def take_nd(self, indexer, axis, new_mgr_locs=None, fill_tuple=None): """ Take values according to indexer and return them as a block.bb """ # algos.take_nd dispatches for DatetimeTZBlock, CategoricalBlock # so need to preserve types # sparse is treated like an ndarray, but needs .get_values() shaping values = self.values if self.is_sparse: values = self.get_values() if fill_tuple is None: fill_value = self.fill_value new_values = algos.take_nd(values, indexer, axis=axis, allow_fill=False, fill_value=fill_value) else: fill_value = fill_tuple[0] new_values = algos.take_nd(values, indexer, axis=axis, allow_fill=True, fill_value=fill_value) if new_mgr_locs is None: if axis == 0: slc = libinternals.indexer_as_slice(indexer) if slc is not None: new_mgr_locs = self.mgr_locs[slc] else: new_mgr_locs = self.mgr_locs[indexer] else: new_mgr_locs = self.mgr_locs if not is_dtype_equal(new_values.dtype, self.dtype): return self.make_block(new_values, new_mgr_locs) else: return self.make_block_same_class(new_values, new_mgr_locs)
[ "def", "take_nd", "(", "self", ",", "indexer", ",", "axis", ",", "new_mgr_locs", "=", "None", ",", "fill_tuple", "=", "None", ")", ":", "# algos.take_nd dispatches for DatetimeTZBlock, CategoricalBlock", "# so need to preserve types", "# sparse is treated like an ndarray, but needs .get_values() shaping", "values", "=", "self", ".", "values", "if", "self", ".", "is_sparse", ":", "values", "=", "self", ".", "get_values", "(", ")", "if", "fill_tuple", "is", "None", ":", "fill_value", "=", "self", ".", "fill_value", "new_values", "=", "algos", ".", "take_nd", "(", "values", ",", "indexer", ",", "axis", "=", "axis", ",", "allow_fill", "=", "False", ",", "fill_value", "=", "fill_value", ")", "else", ":", "fill_value", "=", "fill_tuple", "[", "0", "]", "new_values", "=", "algos", ".", "take_nd", "(", "values", ",", "indexer", ",", "axis", "=", "axis", ",", "allow_fill", "=", "True", ",", "fill_value", "=", "fill_value", ")", "if", "new_mgr_locs", "is", "None", ":", "if", "axis", "==", "0", ":", "slc", "=", "libinternals", ".", "indexer_as_slice", "(", "indexer", ")", "if", "slc", "is", "not", "None", ":", "new_mgr_locs", "=", "self", ".", "mgr_locs", "[", "slc", "]", "else", ":", "new_mgr_locs", "=", "self", ".", "mgr_locs", "[", "indexer", "]", "else", ":", "new_mgr_locs", "=", "self", ".", "mgr_locs", "if", "not", "is_dtype_equal", "(", "new_values", ".", "dtype", ",", "self", ".", "dtype", ")", ":", "return", "self", ".", "make_block", "(", "new_values", ",", "new_mgr_locs", ")", "else", ":", "return", "self", ".", "make_block_same_class", "(", "new_values", ",", "new_mgr_locs", ")" ]
Take values according to indexer and return them as a block.bb
[ "Take", "values", "according", "to", "indexer", "and", "return", "them", "as", "a", "block", ".", "bb" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/blocks.py#L1186-L1222
19,566
pandas-dev/pandas
pandas/core/internals/blocks.py
Block.diff
def diff(self, n, axis=1): """ return block for the diff of the values """ new_values = algos.diff(self.values, n, axis=axis) return [self.make_block(values=new_values)]
python
def diff(self, n, axis=1): """ return block for the diff of the values """ new_values = algos.diff(self.values, n, axis=axis) return [self.make_block(values=new_values)]
[ "def", "diff", "(", "self", ",", "n", ",", "axis", "=", "1", ")", ":", "new_values", "=", "algos", ".", "diff", "(", "self", ".", "values", ",", "n", ",", "axis", "=", "axis", ")", "return", "[", "self", ".", "make_block", "(", "values", "=", "new_values", ")", "]" ]
return block for the diff of the values
[ "return", "block", "for", "the", "diff", "of", "the", "values" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/blocks.py#L1224-L1227
19,567
pandas-dev/pandas
pandas/core/internals/blocks.py
Block.shift
def shift(self, periods, axis=0, fill_value=None): """ shift the block by periods, possibly upcast """ # convert integer to float if necessary. need to do a lot more than # that, handle boolean etc also new_values, fill_value = maybe_upcast(self.values, fill_value) # make sure array sent to np.roll is c_contiguous f_ordered = new_values.flags.f_contiguous if f_ordered: new_values = new_values.T axis = new_values.ndim - axis - 1 if np.prod(new_values.shape): new_values = np.roll(new_values, ensure_platform_int(periods), axis=axis) axis_indexer = [slice(None)] * self.ndim if periods > 0: axis_indexer[axis] = slice(None, periods) else: axis_indexer[axis] = slice(periods, None) new_values[tuple(axis_indexer)] = fill_value # restore original order if f_ordered: new_values = new_values.T return [self.make_block(new_values)]
python
def shift(self, periods, axis=0, fill_value=None): """ shift the block by periods, possibly upcast """ # convert integer to float if necessary. need to do a lot more than # that, handle boolean etc also new_values, fill_value = maybe_upcast(self.values, fill_value) # make sure array sent to np.roll is c_contiguous f_ordered = new_values.flags.f_contiguous if f_ordered: new_values = new_values.T axis = new_values.ndim - axis - 1 if np.prod(new_values.shape): new_values = np.roll(new_values, ensure_platform_int(periods), axis=axis) axis_indexer = [slice(None)] * self.ndim if periods > 0: axis_indexer[axis] = slice(None, periods) else: axis_indexer[axis] = slice(periods, None) new_values[tuple(axis_indexer)] = fill_value # restore original order if f_ordered: new_values = new_values.T return [self.make_block(new_values)]
[ "def", "shift", "(", "self", ",", "periods", ",", "axis", "=", "0", ",", "fill_value", "=", "None", ")", ":", "# convert integer to float if necessary. need to do a lot more than", "# that, handle boolean etc also", "new_values", ",", "fill_value", "=", "maybe_upcast", "(", "self", ".", "values", ",", "fill_value", ")", "# make sure array sent to np.roll is c_contiguous", "f_ordered", "=", "new_values", ".", "flags", ".", "f_contiguous", "if", "f_ordered", ":", "new_values", "=", "new_values", ".", "T", "axis", "=", "new_values", ".", "ndim", "-", "axis", "-", "1", "if", "np", ".", "prod", "(", "new_values", ".", "shape", ")", ":", "new_values", "=", "np", ".", "roll", "(", "new_values", ",", "ensure_platform_int", "(", "periods", ")", ",", "axis", "=", "axis", ")", "axis_indexer", "=", "[", "slice", "(", "None", ")", "]", "*", "self", ".", "ndim", "if", "periods", ">", "0", ":", "axis_indexer", "[", "axis", "]", "=", "slice", "(", "None", ",", "periods", ")", "else", ":", "axis_indexer", "[", "axis", "]", "=", "slice", "(", "periods", ",", "None", ")", "new_values", "[", "tuple", "(", "axis_indexer", ")", "]", "=", "fill_value", "# restore original order", "if", "f_ordered", ":", "new_values", "=", "new_values", ".", "T", "return", "[", "self", ".", "make_block", "(", "new_values", ")", "]" ]
shift the block by periods, possibly upcast
[ "shift", "the", "block", "by", "periods", "possibly", "upcast" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/blocks.py#L1229-L1257
19,568
pandas-dev/pandas
pandas/core/internals/blocks.py
Block._unstack
def _unstack(self, unstacker_func, new_columns, n_rows, fill_value): """Return a list of unstacked blocks of self Parameters ---------- unstacker_func : callable Partially applied unstacker. new_columns : Index All columns of the unstacked BlockManager. n_rows : int Only used in ExtensionBlock.unstack fill_value : int Only used in ExtensionBlock.unstack Returns ------- blocks : list of Block New blocks of unstacked values. mask : array_like of bool The mask of columns of `blocks` we should keep. """ unstacker = unstacker_func(self.values.T) new_items = unstacker.get_new_columns() new_placement = new_columns.get_indexer(new_items) new_values, mask = unstacker.get_new_values() mask = mask.any(0) new_values = new_values.T[mask] new_placement = new_placement[mask] blocks = [make_block(new_values, placement=new_placement)] return blocks, mask
python
def _unstack(self, unstacker_func, new_columns, n_rows, fill_value): """Return a list of unstacked blocks of self Parameters ---------- unstacker_func : callable Partially applied unstacker. new_columns : Index All columns of the unstacked BlockManager. n_rows : int Only used in ExtensionBlock.unstack fill_value : int Only used in ExtensionBlock.unstack Returns ------- blocks : list of Block New blocks of unstacked values. mask : array_like of bool The mask of columns of `blocks` we should keep. """ unstacker = unstacker_func(self.values.T) new_items = unstacker.get_new_columns() new_placement = new_columns.get_indexer(new_items) new_values, mask = unstacker.get_new_values() mask = mask.any(0) new_values = new_values.T[mask] new_placement = new_placement[mask] blocks = [make_block(new_values, placement=new_placement)] return blocks, mask
[ "def", "_unstack", "(", "self", ",", "unstacker_func", ",", "new_columns", ",", "n_rows", ",", "fill_value", ")", ":", "unstacker", "=", "unstacker_func", "(", "self", ".", "values", ".", "T", ")", "new_items", "=", "unstacker", ".", "get_new_columns", "(", ")", "new_placement", "=", "new_columns", ".", "get_indexer", "(", "new_items", ")", "new_values", ",", "mask", "=", "unstacker", ".", "get_new_values", "(", ")", "mask", "=", "mask", ".", "any", "(", "0", ")", "new_values", "=", "new_values", ".", "T", "[", "mask", "]", "new_placement", "=", "new_placement", "[", "mask", "]", "blocks", "=", "[", "make_block", "(", "new_values", ",", "placement", "=", "new_placement", ")", "]", "return", "blocks", ",", "mask" ]
Return a list of unstacked blocks of self Parameters ---------- unstacker_func : callable Partially applied unstacker. new_columns : Index All columns of the unstacked BlockManager. n_rows : int Only used in ExtensionBlock.unstack fill_value : int Only used in ExtensionBlock.unstack Returns ------- blocks : list of Block New blocks of unstacked values. mask : array_like of bool The mask of columns of `blocks` we should keep.
[ "Return", "a", "list", "of", "unstacked", "blocks", "of", "self" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/blocks.py#L1372-L1403
19,569
pandas-dev/pandas
pandas/core/internals/blocks.py
Block.quantile
def quantile(self, qs, interpolation='linear', axis=0): """ compute the quantiles of the Parameters ---------- qs: a scalar or list of the quantiles to be computed interpolation: type of interpolation, default 'linear' axis: axis to compute, default 0 Returns ------- Block """ if self.is_datetimetz: # TODO: cleanup this special case. # We need to operate on i8 values for datetimetz # but `Block.get_values()` returns an ndarray of objects # right now. We need an API for "values to do numeric-like ops on" values = self.values.asi8 # TODO: NonConsolidatableMixin shape # Usual shape inconsistencies for ExtensionBlocks if self.ndim > 1: values = values[None, :] else: values = self.get_values() values, _ = self._try_coerce_args(values, values) is_empty = values.shape[axis] == 0 orig_scalar = not is_list_like(qs) if orig_scalar: # make list-like, unpack later qs = [qs] if is_empty: if self.ndim == 1: result = self._na_value else: # create the array of na_values # 2d len(values) * len(qs) result = np.repeat(np.array([self.fill_value] * len(qs)), len(values)).reshape(len(values), len(qs)) else: # asarray needed for Sparse, see GH#24600 # TODO: Why self.values and not values? mask = np.asarray(isna(self.values)) result = nanpercentile(values, np.array(qs) * 100, axis=axis, na_value=self.fill_value, mask=mask, ndim=self.ndim, interpolation=interpolation) result = np.array(result, copy=False) if self.ndim > 1: result = result.T if orig_scalar and not lib.is_scalar(result): # result could be scalar in case with is_empty and self.ndim == 1 assert result.shape[-1] == 1, result.shape result = result[..., 0] result = lib.item_from_zerodim(result) ndim = getattr(result, 'ndim', None) or 0 result = self._try_coerce_result(result) return make_block(result, placement=np.arange(len(result)), ndim=ndim)
python
def quantile(self, qs, interpolation='linear', axis=0): """ compute the quantiles of the Parameters ---------- qs: a scalar or list of the quantiles to be computed interpolation: type of interpolation, default 'linear' axis: axis to compute, default 0 Returns ------- Block """ if self.is_datetimetz: # TODO: cleanup this special case. # We need to operate on i8 values for datetimetz # but `Block.get_values()` returns an ndarray of objects # right now. We need an API for "values to do numeric-like ops on" values = self.values.asi8 # TODO: NonConsolidatableMixin shape # Usual shape inconsistencies for ExtensionBlocks if self.ndim > 1: values = values[None, :] else: values = self.get_values() values, _ = self._try_coerce_args(values, values) is_empty = values.shape[axis] == 0 orig_scalar = not is_list_like(qs) if orig_scalar: # make list-like, unpack later qs = [qs] if is_empty: if self.ndim == 1: result = self._na_value else: # create the array of na_values # 2d len(values) * len(qs) result = np.repeat(np.array([self.fill_value] * len(qs)), len(values)).reshape(len(values), len(qs)) else: # asarray needed for Sparse, see GH#24600 # TODO: Why self.values and not values? mask = np.asarray(isna(self.values)) result = nanpercentile(values, np.array(qs) * 100, axis=axis, na_value=self.fill_value, mask=mask, ndim=self.ndim, interpolation=interpolation) result = np.array(result, copy=False) if self.ndim > 1: result = result.T if orig_scalar and not lib.is_scalar(result): # result could be scalar in case with is_empty and self.ndim == 1 assert result.shape[-1] == 1, result.shape result = result[..., 0] result = lib.item_from_zerodim(result) ndim = getattr(result, 'ndim', None) or 0 result = self._try_coerce_result(result) return make_block(result, placement=np.arange(len(result)), ndim=ndim)
[ "def", "quantile", "(", "self", ",", "qs", ",", "interpolation", "=", "'linear'", ",", "axis", "=", "0", ")", ":", "if", "self", ".", "is_datetimetz", ":", "# TODO: cleanup this special case.", "# We need to operate on i8 values for datetimetz", "# but `Block.get_values()` returns an ndarray of objects", "# right now. We need an API for \"values to do numeric-like ops on\"", "values", "=", "self", ".", "values", ".", "asi8", "# TODO: NonConsolidatableMixin shape", "# Usual shape inconsistencies for ExtensionBlocks", "if", "self", ".", "ndim", ">", "1", ":", "values", "=", "values", "[", "None", ",", ":", "]", "else", ":", "values", "=", "self", ".", "get_values", "(", ")", "values", ",", "_", "=", "self", ".", "_try_coerce_args", "(", "values", ",", "values", ")", "is_empty", "=", "values", ".", "shape", "[", "axis", "]", "==", "0", "orig_scalar", "=", "not", "is_list_like", "(", "qs", ")", "if", "orig_scalar", ":", "# make list-like, unpack later", "qs", "=", "[", "qs", "]", "if", "is_empty", ":", "if", "self", ".", "ndim", "==", "1", ":", "result", "=", "self", ".", "_na_value", "else", ":", "# create the array of na_values", "# 2d len(values) * len(qs)", "result", "=", "np", ".", "repeat", "(", "np", ".", "array", "(", "[", "self", ".", "fill_value", "]", "*", "len", "(", "qs", ")", ")", ",", "len", "(", "values", ")", ")", ".", "reshape", "(", "len", "(", "values", ")", ",", "len", "(", "qs", ")", ")", "else", ":", "# asarray needed for Sparse, see GH#24600", "# TODO: Why self.values and not values?", "mask", "=", "np", ".", "asarray", "(", "isna", "(", "self", ".", "values", ")", ")", "result", "=", "nanpercentile", "(", "values", ",", "np", ".", "array", "(", "qs", ")", "*", "100", ",", "axis", "=", "axis", ",", "na_value", "=", "self", ".", "fill_value", ",", "mask", "=", "mask", ",", "ndim", "=", "self", ".", "ndim", ",", "interpolation", "=", "interpolation", ")", "result", "=", "np", ".", "array", "(", "result", ",", "copy", "=", "False", ")", "if", "self", ".", "ndim", ">", "1", ":", "result", "=", "result", ".", "T", "if", "orig_scalar", "and", "not", "lib", ".", "is_scalar", "(", "result", ")", ":", "# result could be scalar in case with is_empty and self.ndim == 1", "assert", "result", ".", "shape", "[", "-", "1", "]", "==", "1", ",", "result", ".", "shape", "result", "=", "result", "[", "...", ",", "0", "]", "result", "=", "lib", ".", "item_from_zerodim", "(", "result", ")", "ndim", "=", "getattr", "(", "result", ",", "'ndim'", ",", "None", ")", "or", "0", "result", "=", "self", ".", "_try_coerce_result", "(", "result", ")", "return", "make_block", "(", "result", ",", "placement", "=", "np", ".", "arange", "(", "len", "(", "result", ")", ")", ",", "ndim", "=", "ndim", ")" ]
compute the quantiles of the Parameters ---------- qs: a scalar or list of the quantiles to be computed interpolation: type of interpolation, default 'linear' axis: axis to compute, default 0 Returns ------- Block
[ "compute", "the", "quantiles", "of", "the" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/blocks.py#L1405-L1472
19,570
pandas-dev/pandas
pandas/core/internals/blocks.py
NonConsolidatableMixIn.putmask
def putmask(self, mask, new, align=True, inplace=False, axis=0, transpose=False): """ putmask the data to the block; we must be a single block and not generate other blocks return the resulting block Parameters ---------- mask : the condition to respect new : a ndarray/object align : boolean, perform alignment on other/cond, default is True inplace : perform inplace modification, default is False Returns ------- a new block, the result of the putmask """ inplace = validate_bool_kwarg(inplace, 'inplace') # use block's copy logic. # .values may be an Index which does shallow copy by default new_values = self.values if inplace else self.copy().values new_values, new = self._try_coerce_args(new_values, new) if isinstance(new, np.ndarray) and len(new) == len(mask): new = new[mask] mask = _safe_reshape(mask, new_values.shape) new_values[mask] = new new_values = self._try_coerce_result(new_values) return [self.make_block(values=new_values)]
python
def putmask(self, mask, new, align=True, inplace=False, axis=0, transpose=False): """ putmask the data to the block; we must be a single block and not generate other blocks return the resulting block Parameters ---------- mask : the condition to respect new : a ndarray/object align : boolean, perform alignment on other/cond, default is True inplace : perform inplace modification, default is False Returns ------- a new block, the result of the putmask """ inplace = validate_bool_kwarg(inplace, 'inplace') # use block's copy logic. # .values may be an Index which does shallow copy by default new_values = self.values if inplace else self.copy().values new_values, new = self._try_coerce_args(new_values, new) if isinstance(new, np.ndarray) and len(new) == len(mask): new = new[mask] mask = _safe_reshape(mask, new_values.shape) new_values[mask] = new new_values = self._try_coerce_result(new_values) return [self.make_block(values=new_values)]
[ "def", "putmask", "(", "self", ",", "mask", ",", "new", ",", "align", "=", "True", ",", "inplace", "=", "False", ",", "axis", "=", "0", ",", "transpose", "=", "False", ")", ":", "inplace", "=", "validate_bool_kwarg", "(", "inplace", ",", "'inplace'", ")", "# use block's copy logic.", "# .values may be an Index which does shallow copy by default", "new_values", "=", "self", ".", "values", "if", "inplace", "else", "self", ".", "copy", "(", ")", ".", "values", "new_values", ",", "new", "=", "self", ".", "_try_coerce_args", "(", "new_values", ",", "new", ")", "if", "isinstance", "(", "new", ",", "np", ".", "ndarray", ")", "and", "len", "(", "new", ")", "==", "len", "(", "mask", ")", ":", "new", "=", "new", "[", "mask", "]", "mask", "=", "_safe_reshape", "(", "mask", ",", "new_values", ".", "shape", ")", "new_values", "[", "mask", "]", "=", "new", "new_values", "=", "self", ".", "_try_coerce_result", "(", "new_values", ")", "return", "[", "self", ".", "make_block", "(", "values", "=", "new_values", ")", "]" ]
putmask the data to the block; we must be a single block and not generate other blocks return the resulting block Parameters ---------- mask : the condition to respect new : a ndarray/object align : boolean, perform alignment on other/cond, default is True inplace : perform inplace modification, default is False Returns ------- a new block, the result of the putmask
[ "putmask", "the", "data", "to", "the", "block", ";", "we", "must", "be", "a", "single", "block", "and", "not", "generate", "other", "blocks" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/blocks.py#L1564-L1597
19,571
pandas-dev/pandas
pandas/core/internals/blocks.py
NonConsolidatableMixIn._get_unstack_items
def _get_unstack_items(self, unstacker, new_columns): """ Get the placement, values, and mask for a Block unstack. This is shared between ObjectBlock and ExtensionBlock. They differ in that ObjectBlock passes the values, while ExtensionBlock passes the dummy ndarray of positions to be used by a take later. Parameters ---------- unstacker : pandas.core.reshape.reshape._Unstacker new_columns : Index All columns of the unstacked BlockManager. Returns ------- new_placement : ndarray[int] The placement of the new columns in `new_columns`. new_values : Union[ndarray, ExtensionArray] The first return value from _Unstacker.get_new_values. mask : ndarray[bool] The second return value from _Unstacker.get_new_values. """ # shared with ExtensionBlock new_items = unstacker.get_new_columns() new_placement = new_columns.get_indexer(new_items) new_values, mask = unstacker.get_new_values() mask = mask.any(0) return new_placement, new_values, mask
python
def _get_unstack_items(self, unstacker, new_columns): """ Get the placement, values, and mask for a Block unstack. This is shared between ObjectBlock and ExtensionBlock. They differ in that ObjectBlock passes the values, while ExtensionBlock passes the dummy ndarray of positions to be used by a take later. Parameters ---------- unstacker : pandas.core.reshape.reshape._Unstacker new_columns : Index All columns of the unstacked BlockManager. Returns ------- new_placement : ndarray[int] The placement of the new columns in `new_columns`. new_values : Union[ndarray, ExtensionArray] The first return value from _Unstacker.get_new_values. mask : ndarray[bool] The second return value from _Unstacker.get_new_values. """ # shared with ExtensionBlock new_items = unstacker.get_new_columns() new_placement = new_columns.get_indexer(new_items) new_values, mask = unstacker.get_new_values() mask = mask.any(0) return new_placement, new_values, mask
[ "def", "_get_unstack_items", "(", "self", ",", "unstacker", ",", "new_columns", ")", ":", "# shared with ExtensionBlock", "new_items", "=", "unstacker", ".", "get_new_columns", "(", ")", "new_placement", "=", "new_columns", ".", "get_indexer", "(", "new_items", ")", "new_values", ",", "mask", "=", "unstacker", ".", "get_new_values", "(", ")", "mask", "=", "mask", ".", "any", "(", "0", ")", "return", "new_placement", ",", "new_values", ",", "mask" ]
Get the placement, values, and mask for a Block unstack. This is shared between ObjectBlock and ExtensionBlock. They differ in that ObjectBlock passes the values, while ExtensionBlock passes the dummy ndarray of positions to be used by a take later. Parameters ---------- unstacker : pandas.core.reshape.reshape._Unstacker new_columns : Index All columns of the unstacked BlockManager. Returns ------- new_placement : ndarray[int] The placement of the new columns in `new_columns`. new_values : Union[ndarray, ExtensionArray] The first return value from _Unstacker.get_new_values. mask : ndarray[bool] The second return value from _Unstacker.get_new_values.
[ "Get", "the", "placement", "values", "and", "mask", "for", "a", "Block", "unstack", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/blocks.py#L1602-L1632
19,572
pandas-dev/pandas
pandas/core/internals/blocks.py
ExtensionBlock._maybe_coerce_values
def _maybe_coerce_values(self, values): """Unbox to an extension array. This will unbox an ExtensionArray stored in an Index or Series. ExtensionArrays pass through. No dtype coercion is done. Parameters ---------- values : Index, Series, ExtensionArray Returns ------- ExtensionArray """ if isinstance(values, (ABCIndexClass, ABCSeries)): values = values._values return values
python
def _maybe_coerce_values(self, values): """Unbox to an extension array. This will unbox an ExtensionArray stored in an Index or Series. ExtensionArrays pass through. No dtype coercion is done. Parameters ---------- values : Index, Series, ExtensionArray Returns ------- ExtensionArray """ if isinstance(values, (ABCIndexClass, ABCSeries)): values = values._values return values
[ "def", "_maybe_coerce_values", "(", "self", ",", "values", ")", ":", "if", "isinstance", "(", "values", ",", "(", "ABCIndexClass", ",", "ABCSeries", ")", ")", ":", "values", "=", "values", ".", "_values", "return", "values" ]
Unbox to an extension array. This will unbox an ExtensionArray stored in an Index or Series. ExtensionArrays pass through. No dtype coercion is done. Parameters ---------- values : Index, Series, ExtensionArray Returns ------- ExtensionArray
[ "Unbox", "to", "an", "extension", "array", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/blocks.py#L1651-L1667
19,573
pandas-dev/pandas
pandas/core/internals/blocks.py
ExtensionBlock.setitem
def setitem(self, indexer, value): """Set the value inplace, returning a same-typed block. This differs from Block.setitem by not allowing setitem to change the dtype of the Block. Parameters ---------- indexer : tuple, list-like, array-like, slice The subset of self.values to set value : object The value being set Returns ------- Block Notes ----- `indexer` is a direct slice/positional indexer. `value` must be a compatible shape. """ if isinstance(indexer, tuple): # we are always 1-D indexer = indexer[0] check_setitem_lengths(indexer, value, self.values) self.values[indexer] = value return self
python
def setitem(self, indexer, value): """Set the value inplace, returning a same-typed block. This differs from Block.setitem by not allowing setitem to change the dtype of the Block. Parameters ---------- indexer : tuple, list-like, array-like, slice The subset of self.values to set value : object The value being set Returns ------- Block Notes ----- `indexer` is a direct slice/positional indexer. `value` must be a compatible shape. """ if isinstance(indexer, tuple): # we are always 1-D indexer = indexer[0] check_setitem_lengths(indexer, value, self.values) self.values[indexer] = value return self
[ "def", "setitem", "(", "self", ",", "indexer", ",", "value", ")", ":", "if", "isinstance", "(", "indexer", ",", "tuple", ")", ":", "# we are always 1-D", "indexer", "=", "indexer", "[", "0", "]", "check_setitem_lengths", "(", "indexer", ",", "value", ",", "self", ".", "values", ")", "self", ".", "values", "[", "indexer", "]", "=", "value", "return", "self" ]
Set the value inplace, returning a same-typed block. This differs from Block.setitem by not allowing setitem to change the dtype of the Block. Parameters ---------- indexer : tuple, list-like, array-like, slice The subset of self.values to set value : object The value being set Returns ------- Block Notes ----- `indexer` is a direct slice/positional indexer. `value` must be a compatible shape.
[ "Set", "the", "value", "inplace", "returning", "a", "same", "-", "typed", "block", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/blocks.py#L1693-L1721
19,574
pandas-dev/pandas
pandas/core/internals/blocks.py
ExtensionBlock.take_nd
def take_nd(self, indexer, axis=0, new_mgr_locs=None, fill_tuple=None): """ Take values according to indexer and return them as a block. """ if fill_tuple is None: fill_value = None else: fill_value = fill_tuple[0] # axis doesn't matter; we are really a single-dim object # but are passed the axis depending on the calling routing # if its REALLY axis 0, then this will be a reindex and not a take new_values = self.values.take(indexer, fill_value=fill_value, allow_fill=True) if self.ndim == 1 and new_mgr_locs is None: new_mgr_locs = [0] else: if new_mgr_locs is None: new_mgr_locs = self.mgr_locs return self.make_block_same_class(new_values, new_mgr_locs)
python
def take_nd(self, indexer, axis=0, new_mgr_locs=None, fill_tuple=None): """ Take values according to indexer and return them as a block. """ if fill_tuple is None: fill_value = None else: fill_value = fill_tuple[0] # axis doesn't matter; we are really a single-dim object # but are passed the axis depending on the calling routing # if its REALLY axis 0, then this will be a reindex and not a take new_values = self.values.take(indexer, fill_value=fill_value, allow_fill=True) if self.ndim == 1 and new_mgr_locs is None: new_mgr_locs = [0] else: if new_mgr_locs is None: new_mgr_locs = self.mgr_locs return self.make_block_same_class(new_values, new_mgr_locs)
[ "def", "take_nd", "(", "self", ",", "indexer", ",", "axis", "=", "0", ",", "new_mgr_locs", "=", "None", ",", "fill_tuple", "=", "None", ")", ":", "if", "fill_tuple", "is", "None", ":", "fill_value", "=", "None", "else", ":", "fill_value", "=", "fill_tuple", "[", "0", "]", "# axis doesn't matter; we are really a single-dim object", "# but are passed the axis depending on the calling routing", "# if its REALLY axis 0, then this will be a reindex and not a take", "new_values", "=", "self", ".", "values", ".", "take", "(", "indexer", ",", "fill_value", "=", "fill_value", ",", "allow_fill", "=", "True", ")", "if", "self", ".", "ndim", "==", "1", "and", "new_mgr_locs", "is", "None", ":", "new_mgr_locs", "=", "[", "0", "]", "else", ":", "if", "new_mgr_locs", "is", "None", ":", "new_mgr_locs", "=", "self", ".", "mgr_locs", "return", "self", ".", "make_block_same_class", "(", "new_values", ",", "new_mgr_locs", ")" ]
Take values according to indexer and return them as a block.
[ "Take", "values", "according", "to", "indexer", "and", "return", "them", "as", "a", "block", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/blocks.py#L1733-L1754
19,575
pandas-dev/pandas
pandas/core/internals/blocks.py
ExtensionBlock.shift
def shift(self, periods: int, axis: libinternals.BlockPlacement = 0, fill_value: Any = None) -> List['ExtensionBlock']: """ Shift the block by `periods`. Dispatches to underlying ExtensionArray and re-boxes in an ExtensionBlock. """ return [ self.make_block_same_class( self.values.shift(periods=periods, fill_value=fill_value), placement=self.mgr_locs, ndim=self.ndim) ]
python
def shift(self, periods: int, axis: libinternals.BlockPlacement = 0, fill_value: Any = None) -> List['ExtensionBlock']: """ Shift the block by `periods`. Dispatches to underlying ExtensionArray and re-boxes in an ExtensionBlock. """ return [ self.make_block_same_class( self.values.shift(periods=periods, fill_value=fill_value), placement=self.mgr_locs, ndim=self.ndim) ]
[ "def", "shift", "(", "self", ",", "periods", ":", "int", ",", "axis", ":", "libinternals", ".", "BlockPlacement", "=", "0", ",", "fill_value", ":", "Any", "=", "None", ")", "->", "List", "[", "'ExtensionBlock'", "]", ":", "return", "[", "self", ".", "make_block_same_class", "(", "self", ".", "values", ".", "shift", "(", "periods", "=", "periods", ",", "fill_value", "=", "fill_value", ")", ",", "placement", "=", "self", ".", "mgr_locs", ",", "ndim", "=", "self", ".", "ndim", ")", "]" ]
Shift the block by `periods`. Dispatches to underlying ExtensionArray and re-boxes in an ExtensionBlock.
[ "Shift", "the", "block", "by", "periods", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/blocks.py#L1816-L1830
19,576
pandas-dev/pandas
pandas/core/internals/blocks.py
DatetimeBlock._astype
def _astype(self, dtype, **kwargs): """ these automatically copy, so copy=True has no effect raise on an except if raise == True """ dtype = pandas_dtype(dtype) # if we are passed a datetime64[ns, tz] if is_datetime64tz_dtype(dtype): values = self.values if getattr(values, 'tz', None) is None: values = DatetimeIndex(values).tz_localize('UTC') values = values.tz_convert(dtype.tz) return self.make_block(values) # delegate return super()._astype(dtype=dtype, **kwargs)
python
def _astype(self, dtype, **kwargs): """ these automatically copy, so copy=True has no effect raise on an except if raise == True """ dtype = pandas_dtype(dtype) # if we are passed a datetime64[ns, tz] if is_datetime64tz_dtype(dtype): values = self.values if getattr(values, 'tz', None) is None: values = DatetimeIndex(values).tz_localize('UTC') values = values.tz_convert(dtype.tz) return self.make_block(values) # delegate return super()._astype(dtype=dtype, **kwargs)
[ "def", "_astype", "(", "self", ",", "dtype", ",", "*", "*", "kwargs", ")", ":", "dtype", "=", "pandas_dtype", "(", "dtype", ")", "# if we are passed a datetime64[ns, tz]", "if", "is_datetime64tz_dtype", "(", "dtype", ")", ":", "values", "=", "self", ".", "values", "if", "getattr", "(", "values", ",", "'tz'", ",", "None", ")", "is", "None", ":", "values", "=", "DatetimeIndex", "(", "values", ")", ".", "tz_localize", "(", "'UTC'", ")", "values", "=", "values", ".", "tz_convert", "(", "dtype", ".", "tz", ")", "return", "self", ".", "make_block", "(", "values", ")", "# delegate", "return", "super", "(", ")", ".", "_astype", "(", "dtype", "=", "dtype", ",", "*", "*", "kwargs", ")" ]
these automatically copy, so copy=True has no effect raise on an except if raise == True
[ "these", "automatically", "copy", "so", "copy", "=", "True", "has", "no", "effect", "raise", "on", "an", "except", "if", "raise", "==", "True" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/blocks.py#L2084-L2100
19,577
pandas-dev/pandas
pandas/core/internals/blocks.py
DatetimeBlock._try_coerce_args
def _try_coerce_args(self, values, other): """ Coerce values and other to dtype 'i8'. NaN and NaT convert to the smallest i8, and will correctly round-trip to NaT if converted back in _try_coerce_result. values is always ndarray-like, other may not be Parameters ---------- values : ndarray-like other : ndarray-like or scalar Returns ------- base-type values, base-type other """ values = values.view('i8') if isinstance(other, bool): raise TypeError elif is_null_datetimelike(other): other = tslibs.iNaT elif isinstance(other, (datetime, np.datetime64, date)): other = self._box_func(other) if getattr(other, 'tz') is not None: raise TypeError("cannot coerce a Timestamp with a tz on a " "naive Block") other = other.asm8.view('i8') elif hasattr(other, 'dtype') and is_datetime64_dtype(other): other = other.astype('i8', copy=False).view('i8') else: # coercion issues # let higher levels handle raise TypeError(other) return values, other
python
def _try_coerce_args(self, values, other): """ Coerce values and other to dtype 'i8'. NaN and NaT convert to the smallest i8, and will correctly round-trip to NaT if converted back in _try_coerce_result. values is always ndarray-like, other may not be Parameters ---------- values : ndarray-like other : ndarray-like or scalar Returns ------- base-type values, base-type other """ values = values.view('i8') if isinstance(other, bool): raise TypeError elif is_null_datetimelike(other): other = tslibs.iNaT elif isinstance(other, (datetime, np.datetime64, date)): other = self._box_func(other) if getattr(other, 'tz') is not None: raise TypeError("cannot coerce a Timestamp with a tz on a " "naive Block") other = other.asm8.view('i8') elif hasattr(other, 'dtype') and is_datetime64_dtype(other): other = other.astype('i8', copy=False).view('i8') else: # coercion issues # let higher levels handle raise TypeError(other) return values, other
[ "def", "_try_coerce_args", "(", "self", ",", "values", ",", "other", ")", ":", "values", "=", "values", ".", "view", "(", "'i8'", ")", "if", "isinstance", "(", "other", ",", "bool", ")", ":", "raise", "TypeError", "elif", "is_null_datetimelike", "(", "other", ")", ":", "other", "=", "tslibs", ".", "iNaT", "elif", "isinstance", "(", "other", ",", "(", "datetime", ",", "np", ".", "datetime64", ",", "date", ")", ")", ":", "other", "=", "self", ".", "_box_func", "(", "other", ")", "if", "getattr", "(", "other", ",", "'tz'", ")", "is", "not", "None", ":", "raise", "TypeError", "(", "\"cannot coerce a Timestamp with a tz on a \"", "\"naive Block\"", ")", "other", "=", "other", ".", "asm8", ".", "view", "(", "'i8'", ")", "elif", "hasattr", "(", "other", ",", "'dtype'", ")", "and", "is_datetime64_dtype", "(", "other", ")", ":", "other", "=", "other", ".", "astype", "(", "'i8'", ",", "copy", "=", "False", ")", ".", "view", "(", "'i8'", ")", "else", ":", "# coercion issues", "# let higher levels handle", "raise", "TypeError", "(", "other", ")", "return", "values", ",", "other" ]
Coerce values and other to dtype 'i8'. NaN and NaT convert to the smallest i8, and will correctly round-trip to NaT if converted back in _try_coerce_result. values is always ndarray-like, other may not be Parameters ---------- values : ndarray-like other : ndarray-like or scalar Returns ------- base-type values, base-type other
[ "Coerce", "values", "and", "other", "to", "dtype", "i8", ".", "NaN", "and", "NaT", "convert", "to", "the", "smallest", "i8", "and", "will", "correctly", "round", "-", "trip", "to", "NaT", "if", "converted", "back", "in", "_try_coerce_result", ".", "values", "is", "always", "ndarray", "-", "like", "other", "may", "not", "be" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/blocks.py#L2109-L2145
19,578
pandas-dev/pandas
pandas/core/internals/blocks.py
DatetimeTZBlock.get_values
def get_values(self, dtype=None): """ Returns an ndarray of values. Parameters ---------- dtype : np.dtype Only `object`-like dtypes are respected here (not sure why). Returns ------- values : ndarray When ``dtype=object``, then and object-dtype ndarray of boxed values is returned. Otherwise, an M8[ns] ndarray is returned. DatetimeArray is always 1-d. ``get_values`` will reshape the return value to be the same dimensionality as the block. """ values = self.values if is_object_dtype(dtype): values = values._box_values(values._data) values = np.asarray(values) if self.ndim == 2: # Ensure that our shape is correct for DataFrame. # ExtensionArrays are always 1-D, even in a DataFrame when # the analogous NumPy-backed column would be a 2-D ndarray. values = values.reshape(1, -1) return values
python
def get_values(self, dtype=None): """ Returns an ndarray of values. Parameters ---------- dtype : np.dtype Only `object`-like dtypes are respected here (not sure why). Returns ------- values : ndarray When ``dtype=object``, then and object-dtype ndarray of boxed values is returned. Otherwise, an M8[ns] ndarray is returned. DatetimeArray is always 1-d. ``get_values`` will reshape the return value to be the same dimensionality as the block. """ values = self.values if is_object_dtype(dtype): values = values._box_values(values._data) values = np.asarray(values) if self.ndim == 2: # Ensure that our shape is correct for DataFrame. # ExtensionArrays are always 1-D, even in a DataFrame when # the analogous NumPy-backed column would be a 2-D ndarray. values = values.reshape(1, -1) return values
[ "def", "get_values", "(", "self", ",", "dtype", "=", "None", ")", ":", "values", "=", "self", ".", "values", "if", "is_object_dtype", "(", "dtype", ")", ":", "values", "=", "values", ".", "_box_values", "(", "values", ".", "_data", ")", "values", "=", "np", ".", "asarray", "(", "values", ")", "if", "self", ".", "ndim", "==", "2", ":", "# Ensure that our shape is correct for DataFrame.", "# ExtensionArrays are always 1-D, even in a DataFrame when", "# the analogous NumPy-backed column would be a 2-D ndarray.", "values", "=", "values", ".", "reshape", "(", "1", ",", "-", "1", ")", "return", "values" ]
Returns an ndarray of values. Parameters ---------- dtype : np.dtype Only `object`-like dtypes are respected here (not sure why). Returns ------- values : ndarray When ``dtype=object``, then and object-dtype ndarray of boxed values is returned. Otherwise, an M8[ns] ndarray is returned. DatetimeArray is always 1-d. ``get_values`` will reshape the return value to be the same dimensionality as the block.
[ "Returns", "an", "ndarray", "of", "values", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/blocks.py#L2245-L2277
19,579
pandas-dev/pandas
pandas/core/internals/blocks.py
DatetimeTZBlock._try_coerce_args
def _try_coerce_args(self, values, other): """ localize and return i8 for the values Parameters ---------- values : ndarray-like other : ndarray-like or scalar Returns ------- base-type values, base-type other """ # asi8 is a view, needs copy values = _block_shape(values.view("i8"), ndim=self.ndim) if isinstance(other, ABCSeries): other = self._holder(other) if isinstance(other, bool): raise TypeError elif is_datetime64_dtype(other): # add the tz back other = self._holder(other, dtype=self.dtype) elif is_null_datetimelike(other): other = tslibs.iNaT elif isinstance(other, self._holder): if other.tz != self.values.tz: raise ValueError("incompatible or non tz-aware value") other = _block_shape(other.asi8, ndim=self.ndim) elif isinstance(other, (np.datetime64, datetime, date)): other = tslibs.Timestamp(other) tz = getattr(other, 'tz', None) # test we can have an equal time zone if tz is None or str(tz) != str(self.values.tz): raise ValueError("incompatible or non tz-aware value") other = other.value else: raise TypeError(other) return values, other
python
def _try_coerce_args(self, values, other): """ localize and return i8 for the values Parameters ---------- values : ndarray-like other : ndarray-like or scalar Returns ------- base-type values, base-type other """ # asi8 is a view, needs copy values = _block_shape(values.view("i8"), ndim=self.ndim) if isinstance(other, ABCSeries): other = self._holder(other) if isinstance(other, bool): raise TypeError elif is_datetime64_dtype(other): # add the tz back other = self._holder(other, dtype=self.dtype) elif is_null_datetimelike(other): other = tslibs.iNaT elif isinstance(other, self._holder): if other.tz != self.values.tz: raise ValueError("incompatible or non tz-aware value") other = _block_shape(other.asi8, ndim=self.ndim) elif isinstance(other, (np.datetime64, datetime, date)): other = tslibs.Timestamp(other) tz = getattr(other, 'tz', None) # test we can have an equal time zone if tz is None or str(tz) != str(self.values.tz): raise ValueError("incompatible or non tz-aware value") other = other.value else: raise TypeError(other) return values, other
[ "def", "_try_coerce_args", "(", "self", ",", "values", ",", "other", ")", ":", "# asi8 is a view, needs copy", "values", "=", "_block_shape", "(", "values", ".", "view", "(", "\"i8\"", ")", ",", "ndim", "=", "self", ".", "ndim", ")", "if", "isinstance", "(", "other", ",", "ABCSeries", ")", ":", "other", "=", "self", ".", "_holder", "(", "other", ")", "if", "isinstance", "(", "other", ",", "bool", ")", ":", "raise", "TypeError", "elif", "is_datetime64_dtype", "(", "other", ")", ":", "# add the tz back", "other", "=", "self", ".", "_holder", "(", "other", ",", "dtype", "=", "self", ".", "dtype", ")", "elif", "is_null_datetimelike", "(", "other", ")", ":", "other", "=", "tslibs", ".", "iNaT", "elif", "isinstance", "(", "other", ",", "self", ".", "_holder", ")", ":", "if", "other", ".", "tz", "!=", "self", ".", "values", ".", "tz", ":", "raise", "ValueError", "(", "\"incompatible or non tz-aware value\"", ")", "other", "=", "_block_shape", "(", "other", ".", "asi8", ",", "ndim", "=", "self", ".", "ndim", ")", "elif", "isinstance", "(", "other", ",", "(", "np", ".", "datetime64", ",", "datetime", ",", "date", ")", ")", ":", "other", "=", "tslibs", ".", "Timestamp", "(", "other", ")", "tz", "=", "getattr", "(", "other", ",", "'tz'", ",", "None", ")", "# test we can have an equal time zone", "if", "tz", "is", "None", "or", "str", "(", "tz", ")", "!=", "str", "(", "self", ".", "values", ".", "tz", ")", ":", "raise", "ValueError", "(", "\"incompatible or non tz-aware value\"", ")", "other", "=", "other", ".", "value", "else", ":", "raise", "TypeError", "(", "other", ")", "return", "values", ",", "other" ]
localize and return i8 for the values Parameters ---------- values : ndarray-like other : ndarray-like or scalar Returns ------- base-type values, base-type other
[ "localize", "and", "return", "i8", "for", "the", "values" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/blocks.py#L2294-L2336
19,580
pandas-dev/pandas
pandas/core/internals/blocks.py
DatetimeTZBlock.diff
def diff(self, n, axis=0): """1st discrete difference Parameters ---------- n : int, number of periods to diff axis : int, axis to diff upon. default 0 Return ------ A list with a new TimeDeltaBlock. Note ---- The arguments here are mimicking shift so they are called correctly by apply. """ if axis == 0: # Cannot currently calculate diff across multiple blocks since this # function is invoked via apply raise NotImplementedError new_values = (self.values - self.shift(n, axis=axis)[0].values).asi8 # Reshape the new_values like how algos.diff does for timedelta data new_values = new_values.reshape(1, len(new_values)) new_values = new_values.astype('timedelta64[ns]') return [TimeDeltaBlock(new_values, placement=self.mgr_locs.indexer)]
python
def diff(self, n, axis=0): """1st discrete difference Parameters ---------- n : int, number of periods to diff axis : int, axis to diff upon. default 0 Return ------ A list with a new TimeDeltaBlock. Note ---- The arguments here are mimicking shift so they are called correctly by apply. """ if axis == 0: # Cannot currently calculate diff across multiple blocks since this # function is invoked via apply raise NotImplementedError new_values = (self.values - self.shift(n, axis=axis)[0].values).asi8 # Reshape the new_values like how algos.diff does for timedelta data new_values = new_values.reshape(1, len(new_values)) new_values = new_values.astype('timedelta64[ns]') return [TimeDeltaBlock(new_values, placement=self.mgr_locs.indexer)]
[ "def", "diff", "(", "self", ",", "n", ",", "axis", "=", "0", ")", ":", "if", "axis", "==", "0", ":", "# Cannot currently calculate diff across multiple blocks since this", "# function is invoked via apply", "raise", "NotImplementedError", "new_values", "=", "(", "self", ".", "values", "-", "self", ".", "shift", "(", "n", ",", "axis", "=", "axis", ")", "[", "0", "]", ".", "values", ")", ".", "asi8", "# Reshape the new_values like how algos.diff does for timedelta data", "new_values", "=", "new_values", ".", "reshape", "(", "1", ",", "len", "(", "new_values", ")", ")", "new_values", "=", "new_values", ".", "astype", "(", "'timedelta64[ns]'", ")", "return", "[", "TimeDeltaBlock", "(", "new_values", ",", "placement", "=", "self", ".", "mgr_locs", ".", "indexer", ")", "]" ]
1st discrete difference Parameters ---------- n : int, number of periods to diff axis : int, axis to diff upon. default 0 Return ------ A list with a new TimeDeltaBlock. Note ---- The arguments here are mimicking shift so they are called correctly by apply.
[ "1st", "discrete", "difference" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/blocks.py#L2362-L2388
19,581
pandas-dev/pandas
pandas/core/internals/blocks.py
TimeDeltaBlock._try_coerce_args
def _try_coerce_args(self, values, other): """ Coerce values and other to int64, with null values converted to iNaT. values is always ndarray-like, other may not be Parameters ---------- values : ndarray-like other : ndarray-like or scalar Returns ------- base-type values, base-type other """ values = values.view('i8') if isinstance(other, bool): raise TypeError elif is_null_datetimelike(other): other = tslibs.iNaT elif isinstance(other, (timedelta, np.timedelta64)): other = Timedelta(other).value elif hasattr(other, 'dtype') and is_timedelta64_dtype(other): other = other.astype('i8', copy=False).view('i8') else: # coercion issues # let higher levels handle raise TypeError(other) return values, other
python
def _try_coerce_args(self, values, other): """ Coerce values and other to int64, with null values converted to iNaT. values is always ndarray-like, other may not be Parameters ---------- values : ndarray-like other : ndarray-like or scalar Returns ------- base-type values, base-type other """ values = values.view('i8') if isinstance(other, bool): raise TypeError elif is_null_datetimelike(other): other = tslibs.iNaT elif isinstance(other, (timedelta, np.timedelta64)): other = Timedelta(other).value elif hasattr(other, 'dtype') and is_timedelta64_dtype(other): other = other.astype('i8', copy=False).view('i8') else: # coercion issues # let higher levels handle raise TypeError(other) return values, other
[ "def", "_try_coerce_args", "(", "self", ",", "values", ",", "other", ")", ":", "values", "=", "values", ".", "view", "(", "'i8'", ")", "if", "isinstance", "(", "other", ",", "bool", ")", ":", "raise", "TypeError", "elif", "is_null_datetimelike", "(", "other", ")", ":", "other", "=", "tslibs", ".", "iNaT", "elif", "isinstance", "(", "other", ",", "(", "timedelta", ",", "np", ".", "timedelta64", ")", ")", ":", "other", "=", "Timedelta", "(", "other", ")", ".", "value", "elif", "hasattr", "(", "other", ",", "'dtype'", ")", "and", "is_timedelta64_dtype", "(", "other", ")", ":", "other", "=", "other", ".", "astype", "(", "'i8'", ",", "copy", "=", "False", ")", ".", "view", "(", "'i8'", ")", "else", ":", "# coercion issues", "# let higher levels handle", "raise", "TypeError", "(", "other", ")", "return", "values", ",", "other" ]
Coerce values and other to int64, with null values converted to iNaT. values is always ndarray-like, other may not be Parameters ---------- values : ndarray-like other : ndarray-like or scalar Returns ------- base-type values, base-type other
[ "Coerce", "values", "and", "other", "to", "int64", "with", "null", "values", "converted", "to", "iNaT", ".", "values", "is", "always", "ndarray", "-", "like", "other", "may", "not", "be" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/blocks.py#L2477-L2506
19,582
pandas-dev/pandas
pandas/core/internals/blocks.py
ObjectBlock._replace_single
def _replace_single(self, to_replace, value, inplace=False, filter=None, regex=False, convert=True, mask=None): """ Replace elements by the given value. Parameters ---------- to_replace : object or pattern Scalar to replace or regular expression to match. value : object Replacement object. inplace : bool, default False Perform inplace modification. filter : list, optional regex : bool, default False If true, perform regular expression substitution. convert : bool, default True If true, try to coerce any object types to better types. mask : array-like of bool, optional True indicate corresponding element is ignored. Returns ------- a new block, the result after replacing """ inplace = validate_bool_kwarg(inplace, 'inplace') # to_replace is regex compilable to_rep_re = regex and is_re_compilable(to_replace) # regex is regex compilable regex_re = is_re_compilable(regex) # only one will survive if to_rep_re and regex_re: raise AssertionError('only one of to_replace and regex can be ' 'regex compilable') # if regex was passed as something that can be a regex (rather than a # boolean) if regex_re: to_replace = regex regex = regex_re or to_rep_re # try to get the pattern attribute (compiled re) or it's a string try: pattern = to_replace.pattern except AttributeError: pattern = to_replace # if the pattern is not empty and to_replace is either a string or a # regex if regex and pattern: rx = re.compile(to_replace) else: # if the thing to replace is not a string or compiled regex call # the superclass method -> to_replace is some kind of object return super().replace(to_replace, value, inplace=inplace, filter=filter, regex=regex) new_values = self.values if inplace else self.values.copy() # deal with replacing values with objects (strings) that match but # whose replacement is not a string (numeric, nan, object) if isna(value) or not isinstance(value, str): def re_replacer(s): try: return value if rx.search(s) is not None else s except TypeError: return s else: # value is guaranteed to be a string here, s can be either a string # or null if it's null it gets returned def re_replacer(s): try: return rx.sub(value, s) except TypeError: return s f = np.vectorize(re_replacer, otypes=[self.dtype]) if filter is None: filt = slice(None) else: filt = self.mgr_locs.isin(filter).nonzero()[0] if mask is None: new_values[filt] = f(new_values[filt]) else: new_values[filt][mask] = f(new_values[filt][mask]) # convert block = self.make_block(new_values) if convert: block = block.convert(by_item=True, numeric=False) return block
python
def _replace_single(self, to_replace, value, inplace=False, filter=None, regex=False, convert=True, mask=None): """ Replace elements by the given value. Parameters ---------- to_replace : object or pattern Scalar to replace or regular expression to match. value : object Replacement object. inplace : bool, default False Perform inplace modification. filter : list, optional regex : bool, default False If true, perform regular expression substitution. convert : bool, default True If true, try to coerce any object types to better types. mask : array-like of bool, optional True indicate corresponding element is ignored. Returns ------- a new block, the result after replacing """ inplace = validate_bool_kwarg(inplace, 'inplace') # to_replace is regex compilable to_rep_re = regex and is_re_compilable(to_replace) # regex is regex compilable regex_re = is_re_compilable(regex) # only one will survive if to_rep_re and regex_re: raise AssertionError('only one of to_replace and regex can be ' 'regex compilable') # if regex was passed as something that can be a regex (rather than a # boolean) if regex_re: to_replace = regex regex = regex_re or to_rep_re # try to get the pattern attribute (compiled re) or it's a string try: pattern = to_replace.pattern except AttributeError: pattern = to_replace # if the pattern is not empty and to_replace is either a string or a # regex if regex and pattern: rx = re.compile(to_replace) else: # if the thing to replace is not a string or compiled regex call # the superclass method -> to_replace is some kind of object return super().replace(to_replace, value, inplace=inplace, filter=filter, regex=regex) new_values = self.values if inplace else self.values.copy() # deal with replacing values with objects (strings) that match but # whose replacement is not a string (numeric, nan, object) if isna(value) or not isinstance(value, str): def re_replacer(s): try: return value if rx.search(s) is not None else s except TypeError: return s else: # value is guaranteed to be a string here, s can be either a string # or null if it's null it gets returned def re_replacer(s): try: return rx.sub(value, s) except TypeError: return s f = np.vectorize(re_replacer, otypes=[self.dtype]) if filter is None: filt = slice(None) else: filt = self.mgr_locs.isin(filter).nonzero()[0] if mask is None: new_values[filt] = f(new_values[filt]) else: new_values[filt][mask] = f(new_values[filt][mask]) # convert block = self.make_block(new_values) if convert: block = block.convert(by_item=True, numeric=False) return block
[ "def", "_replace_single", "(", "self", ",", "to_replace", ",", "value", ",", "inplace", "=", "False", ",", "filter", "=", "None", ",", "regex", "=", "False", ",", "convert", "=", "True", ",", "mask", "=", "None", ")", ":", "inplace", "=", "validate_bool_kwarg", "(", "inplace", ",", "'inplace'", ")", "# to_replace is regex compilable", "to_rep_re", "=", "regex", "and", "is_re_compilable", "(", "to_replace", ")", "# regex is regex compilable", "regex_re", "=", "is_re_compilable", "(", "regex", ")", "# only one will survive", "if", "to_rep_re", "and", "regex_re", ":", "raise", "AssertionError", "(", "'only one of to_replace and regex can be '", "'regex compilable'", ")", "# if regex was passed as something that can be a regex (rather than a", "# boolean)", "if", "regex_re", ":", "to_replace", "=", "regex", "regex", "=", "regex_re", "or", "to_rep_re", "# try to get the pattern attribute (compiled re) or it's a string", "try", ":", "pattern", "=", "to_replace", ".", "pattern", "except", "AttributeError", ":", "pattern", "=", "to_replace", "# if the pattern is not empty and to_replace is either a string or a", "# regex", "if", "regex", "and", "pattern", ":", "rx", "=", "re", ".", "compile", "(", "to_replace", ")", "else", ":", "# if the thing to replace is not a string or compiled regex call", "# the superclass method -> to_replace is some kind of object", "return", "super", "(", ")", ".", "replace", "(", "to_replace", ",", "value", ",", "inplace", "=", "inplace", ",", "filter", "=", "filter", ",", "regex", "=", "regex", ")", "new_values", "=", "self", ".", "values", "if", "inplace", "else", "self", ".", "values", ".", "copy", "(", ")", "# deal with replacing values with objects (strings) that match but", "# whose replacement is not a string (numeric, nan, object)", "if", "isna", "(", "value", ")", "or", "not", "isinstance", "(", "value", ",", "str", ")", ":", "def", "re_replacer", "(", "s", ")", ":", "try", ":", "return", "value", "if", "rx", ".", "search", "(", "s", ")", "is", "not", "None", "else", "s", "except", "TypeError", ":", "return", "s", "else", ":", "# value is guaranteed to be a string here, s can be either a string", "# or null if it's null it gets returned", "def", "re_replacer", "(", "s", ")", ":", "try", ":", "return", "rx", ".", "sub", "(", "value", ",", "s", ")", "except", "TypeError", ":", "return", "s", "f", "=", "np", ".", "vectorize", "(", "re_replacer", ",", "otypes", "=", "[", "self", ".", "dtype", "]", ")", "if", "filter", "is", "None", ":", "filt", "=", "slice", "(", "None", ")", "else", ":", "filt", "=", "self", ".", "mgr_locs", ".", "isin", "(", "filter", ")", ".", "nonzero", "(", ")", "[", "0", "]", "if", "mask", "is", "None", ":", "new_values", "[", "filt", "]", "=", "f", "(", "new_values", "[", "filt", "]", ")", "else", ":", "new_values", "[", "filt", "]", "[", "mask", "]", "=", "f", "(", "new_values", "[", "filt", "]", "[", "mask", "]", ")", "# convert", "block", "=", "self", ".", "make_block", "(", "new_values", ")", "if", "convert", ":", "block", "=", "block", ".", "convert", "(", "by_item", "=", "True", ",", "numeric", "=", "False", ")", "return", "block" ]
Replace elements by the given value. Parameters ---------- to_replace : object or pattern Scalar to replace or regular expression to match. value : object Replacement object. inplace : bool, default False Perform inplace modification. filter : list, optional regex : bool, default False If true, perform regular expression substitution. convert : bool, default True If true, try to coerce any object types to better types. mask : array-like of bool, optional True indicate corresponding element is ignored. Returns ------- a new block, the result after replacing
[ "Replace", "elements", "by", "the", "given", "value", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/blocks.py#L2744-L2841
19,583
pandas-dev/pandas
pandas/io/excel/_xlsxwriter.py
_XlsxStyler.convert
def convert(cls, style_dict, num_format_str=None): """ converts a style_dict to an xlsxwriter format dict Parameters ---------- style_dict : style dictionary to convert num_format_str : optional number format string """ # Create a XlsxWriter format object. props = {} if num_format_str is not None: props['num_format'] = num_format_str if style_dict is None: return props if 'borders' in style_dict: style_dict = style_dict.copy() style_dict['border'] = style_dict.pop('borders') for style_group_key, style_group in style_dict.items(): for src, dst in cls.STYLE_MAPPING.get(style_group_key, []): # src is a sequence of keys into a nested dict # dst is a flat key if dst in props: continue v = style_group for k in src: try: v = v[k] except (KeyError, TypeError): break else: props[dst] = v if isinstance(props.get('pattern'), str): # TODO: support other fill patterns props['pattern'] = 0 if props['pattern'] == 'none' else 1 for k in ['border', 'top', 'right', 'bottom', 'left']: if isinstance(props.get(k), str): try: props[k] = ['none', 'thin', 'medium', 'dashed', 'dotted', 'thick', 'double', 'hair', 'mediumDashed', 'dashDot', 'mediumDashDot', 'dashDotDot', 'mediumDashDotDot', 'slantDashDot'].index(props[k]) except ValueError: props[k] = 2 if isinstance(props.get('font_script'), str): props['font_script'] = ['baseline', 'superscript', 'subscript'].index(props['font_script']) if isinstance(props.get('underline'), str): props['underline'] = {'none': 0, 'single': 1, 'double': 2, 'singleAccounting': 33, 'doubleAccounting': 34}[props['underline']] return props
python
def convert(cls, style_dict, num_format_str=None): """ converts a style_dict to an xlsxwriter format dict Parameters ---------- style_dict : style dictionary to convert num_format_str : optional number format string """ # Create a XlsxWriter format object. props = {} if num_format_str is not None: props['num_format'] = num_format_str if style_dict is None: return props if 'borders' in style_dict: style_dict = style_dict.copy() style_dict['border'] = style_dict.pop('borders') for style_group_key, style_group in style_dict.items(): for src, dst in cls.STYLE_MAPPING.get(style_group_key, []): # src is a sequence of keys into a nested dict # dst is a flat key if dst in props: continue v = style_group for k in src: try: v = v[k] except (KeyError, TypeError): break else: props[dst] = v if isinstance(props.get('pattern'), str): # TODO: support other fill patterns props['pattern'] = 0 if props['pattern'] == 'none' else 1 for k in ['border', 'top', 'right', 'bottom', 'left']: if isinstance(props.get(k), str): try: props[k] = ['none', 'thin', 'medium', 'dashed', 'dotted', 'thick', 'double', 'hair', 'mediumDashed', 'dashDot', 'mediumDashDot', 'dashDotDot', 'mediumDashDotDot', 'slantDashDot'].index(props[k]) except ValueError: props[k] = 2 if isinstance(props.get('font_script'), str): props['font_script'] = ['baseline', 'superscript', 'subscript'].index(props['font_script']) if isinstance(props.get('underline'), str): props['underline'] = {'none': 0, 'single': 1, 'double': 2, 'singleAccounting': 33, 'doubleAccounting': 34}[props['underline']] return props
[ "def", "convert", "(", "cls", ",", "style_dict", ",", "num_format_str", "=", "None", ")", ":", "# Create a XlsxWriter format object.", "props", "=", "{", "}", "if", "num_format_str", "is", "not", "None", ":", "props", "[", "'num_format'", "]", "=", "num_format_str", "if", "style_dict", "is", "None", ":", "return", "props", "if", "'borders'", "in", "style_dict", ":", "style_dict", "=", "style_dict", ".", "copy", "(", ")", "style_dict", "[", "'border'", "]", "=", "style_dict", ".", "pop", "(", "'borders'", ")", "for", "style_group_key", ",", "style_group", "in", "style_dict", ".", "items", "(", ")", ":", "for", "src", ",", "dst", "in", "cls", ".", "STYLE_MAPPING", ".", "get", "(", "style_group_key", ",", "[", "]", ")", ":", "# src is a sequence of keys into a nested dict", "# dst is a flat key", "if", "dst", "in", "props", ":", "continue", "v", "=", "style_group", "for", "k", "in", "src", ":", "try", ":", "v", "=", "v", "[", "k", "]", "except", "(", "KeyError", ",", "TypeError", ")", ":", "break", "else", ":", "props", "[", "dst", "]", "=", "v", "if", "isinstance", "(", "props", ".", "get", "(", "'pattern'", ")", ",", "str", ")", ":", "# TODO: support other fill patterns", "props", "[", "'pattern'", "]", "=", "0", "if", "props", "[", "'pattern'", "]", "==", "'none'", "else", "1", "for", "k", "in", "[", "'border'", ",", "'top'", ",", "'right'", ",", "'bottom'", ",", "'left'", "]", ":", "if", "isinstance", "(", "props", ".", "get", "(", "k", ")", ",", "str", ")", ":", "try", ":", "props", "[", "k", "]", "=", "[", "'none'", ",", "'thin'", ",", "'medium'", ",", "'dashed'", ",", "'dotted'", ",", "'thick'", ",", "'double'", ",", "'hair'", ",", "'mediumDashed'", ",", "'dashDot'", ",", "'mediumDashDot'", ",", "'dashDotDot'", ",", "'mediumDashDotDot'", ",", "'slantDashDot'", "]", ".", "index", "(", "props", "[", "k", "]", ")", "except", "ValueError", ":", "props", "[", "k", "]", "=", "2", "if", "isinstance", "(", "props", ".", "get", "(", "'font_script'", ")", ",", "str", ")", ":", "props", "[", "'font_script'", "]", "=", "[", "'baseline'", ",", "'superscript'", ",", "'subscript'", "]", ".", "index", "(", "props", "[", "'font_script'", "]", ")", "if", "isinstance", "(", "props", ".", "get", "(", "'underline'", ")", ",", "str", ")", ":", "props", "[", "'underline'", "]", "=", "{", "'none'", ":", "0", ",", "'single'", ":", "1", ",", "'double'", ":", "2", ",", "'singleAccounting'", ":", "33", ",", "'doubleAccounting'", ":", "34", "}", "[", "props", "[", "'underline'", "]", "]", "return", "props" ]
converts a style_dict to an xlsxwriter format dict Parameters ---------- style_dict : style dictionary to convert num_format_str : optional number format string
[ "converts", "a", "style_dict", "to", "an", "xlsxwriter", "format", "dict" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/excel/_xlsxwriter.py#L85-L147
19,584
pandas-dev/pandas
pandas/core/reshape/reshape.py
_unstack_extension_series
def _unstack_extension_series(series, level, fill_value): """ Unstack an ExtensionArray-backed Series. The ExtensionDtype is preserved. Parameters ---------- series : Series A Series with an ExtensionArray for values level : Any The level name or number. fill_value : Any The user-level (not physical storage) fill value to use for missing values introduced by the reshape. Passed to ``series.values.take``. Returns ------- DataFrame Each column of the DataFrame will have the same dtype as the input Series. """ # Implementation note: the basic idea is to # 1. Do a regular unstack on a dummy array of integers # 2. Followup with a columnwise take. # We use the dummy take to discover newly-created missing values # introduced by the reshape. from pandas.core.reshape.concat import concat dummy_arr = np.arange(len(series)) # fill_value=-1, since we will do a series.values.take later result = _Unstacker(dummy_arr, series.index, level=level, fill_value=-1).get_result() out = [] values = extract_array(series, extract_numpy=False) for col, indices in result.iteritems(): out.append(Series(values.take(indices.values, allow_fill=True, fill_value=fill_value), name=col, index=result.index)) return concat(out, axis='columns', copy=False, keys=result.columns)
python
def _unstack_extension_series(series, level, fill_value): """ Unstack an ExtensionArray-backed Series. The ExtensionDtype is preserved. Parameters ---------- series : Series A Series with an ExtensionArray for values level : Any The level name or number. fill_value : Any The user-level (not physical storage) fill value to use for missing values introduced by the reshape. Passed to ``series.values.take``. Returns ------- DataFrame Each column of the DataFrame will have the same dtype as the input Series. """ # Implementation note: the basic idea is to # 1. Do a regular unstack on a dummy array of integers # 2. Followup with a columnwise take. # We use the dummy take to discover newly-created missing values # introduced by the reshape. from pandas.core.reshape.concat import concat dummy_arr = np.arange(len(series)) # fill_value=-1, since we will do a series.values.take later result = _Unstacker(dummy_arr, series.index, level=level, fill_value=-1).get_result() out = [] values = extract_array(series, extract_numpy=False) for col, indices in result.iteritems(): out.append(Series(values.take(indices.values, allow_fill=True, fill_value=fill_value), name=col, index=result.index)) return concat(out, axis='columns', copy=False, keys=result.columns)
[ "def", "_unstack_extension_series", "(", "series", ",", "level", ",", "fill_value", ")", ":", "# Implementation note: the basic idea is to", "# 1. Do a regular unstack on a dummy array of integers", "# 2. Followup with a columnwise take.", "# We use the dummy take to discover newly-created missing values", "# introduced by the reshape.", "from", "pandas", ".", "core", ".", "reshape", ".", "concat", "import", "concat", "dummy_arr", "=", "np", ".", "arange", "(", "len", "(", "series", ")", ")", "# fill_value=-1, since we will do a series.values.take later", "result", "=", "_Unstacker", "(", "dummy_arr", ",", "series", ".", "index", ",", "level", "=", "level", ",", "fill_value", "=", "-", "1", ")", ".", "get_result", "(", ")", "out", "=", "[", "]", "values", "=", "extract_array", "(", "series", ",", "extract_numpy", "=", "False", ")", "for", "col", ",", "indices", "in", "result", ".", "iteritems", "(", ")", ":", "out", ".", "append", "(", "Series", "(", "values", ".", "take", "(", "indices", ".", "values", ",", "allow_fill", "=", "True", ",", "fill_value", "=", "fill_value", ")", ",", "name", "=", "col", ",", "index", "=", "result", ".", "index", ")", ")", "return", "concat", "(", "out", ",", "axis", "=", "'columns'", ",", "copy", "=", "False", ",", "keys", "=", "result", ".", "columns", ")" ]
Unstack an ExtensionArray-backed Series. The ExtensionDtype is preserved. Parameters ---------- series : Series A Series with an ExtensionArray for values level : Any The level name or number. fill_value : Any The user-level (not physical storage) fill value to use for missing values introduced by the reshape. Passed to ``series.values.take``. Returns ------- DataFrame Each column of the DataFrame will have the same dtype as the input Series.
[ "Unstack", "an", "ExtensionArray", "-", "backed", "Series", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/reshape/reshape.py#L411-L454
19,585
pandas-dev/pandas
pandas/core/reshape/reshape.py
stack
def stack(frame, level=-1, dropna=True): """ Convert DataFrame to Series with multi-level Index. Columns become the second level of the resulting hierarchical index Returns ------- stacked : Series """ def factorize(index): if index.is_unique: return index, np.arange(len(index)) codes, categories = _factorize_from_iterable(index) return categories, codes N, K = frame.shape # Will also convert negative level numbers and check if out of bounds. level_num = frame.columns._get_level_number(level) if isinstance(frame.columns, MultiIndex): return _stack_multi_columns(frame, level_num=level_num, dropna=dropna) elif isinstance(frame.index, MultiIndex): new_levels = list(frame.index.levels) new_codes = [lab.repeat(K) for lab in frame.index.codes] clev, clab = factorize(frame.columns) new_levels.append(clev) new_codes.append(np.tile(clab, N).ravel()) new_names = list(frame.index.names) new_names.append(frame.columns.name) new_index = MultiIndex(levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False) else: levels, (ilab, clab) = zip(*map(factorize, (frame.index, frame.columns))) codes = ilab.repeat(K), np.tile(clab, N).ravel() new_index = MultiIndex(levels=levels, codes=codes, names=[frame.index.name, frame.columns.name], verify_integrity=False) if frame._is_homogeneous_type: # For homogeneous EAs, frame.values will coerce to object. So # we concatenate instead. dtypes = list(frame.dtypes.values) dtype = dtypes[0] if is_extension_array_dtype(dtype): arr = dtype.construct_array_type() new_values = arr._concat_same_type([ col._values for _, col in frame.iteritems() ]) new_values = _reorder_for_extension_array_stack(new_values, N, K) else: # homogeneous, non-EA new_values = frame.values.ravel() else: # non-homogeneous new_values = frame.values.ravel() if dropna: mask = notna(new_values) new_values = new_values[mask] new_index = new_index[mask] return frame._constructor_sliced(new_values, index=new_index)
python
def stack(frame, level=-1, dropna=True): """ Convert DataFrame to Series with multi-level Index. Columns become the second level of the resulting hierarchical index Returns ------- stacked : Series """ def factorize(index): if index.is_unique: return index, np.arange(len(index)) codes, categories = _factorize_from_iterable(index) return categories, codes N, K = frame.shape # Will also convert negative level numbers and check if out of bounds. level_num = frame.columns._get_level_number(level) if isinstance(frame.columns, MultiIndex): return _stack_multi_columns(frame, level_num=level_num, dropna=dropna) elif isinstance(frame.index, MultiIndex): new_levels = list(frame.index.levels) new_codes = [lab.repeat(K) for lab in frame.index.codes] clev, clab = factorize(frame.columns) new_levels.append(clev) new_codes.append(np.tile(clab, N).ravel()) new_names = list(frame.index.names) new_names.append(frame.columns.name) new_index = MultiIndex(levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False) else: levels, (ilab, clab) = zip(*map(factorize, (frame.index, frame.columns))) codes = ilab.repeat(K), np.tile(clab, N).ravel() new_index = MultiIndex(levels=levels, codes=codes, names=[frame.index.name, frame.columns.name], verify_integrity=False) if frame._is_homogeneous_type: # For homogeneous EAs, frame.values will coerce to object. So # we concatenate instead. dtypes = list(frame.dtypes.values) dtype = dtypes[0] if is_extension_array_dtype(dtype): arr = dtype.construct_array_type() new_values = arr._concat_same_type([ col._values for _, col in frame.iteritems() ]) new_values = _reorder_for_extension_array_stack(new_values, N, K) else: # homogeneous, non-EA new_values = frame.values.ravel() else: # non-homogeneous new_values = frame.values.ravel() if dropna: mask = notna(new_values) new_values = new_values[mask] new_index = new_index[mask] return frame._constructor_sliced(new_values, index=new_index)
[ "def", "stack", "(", "frame", ",", "level", "=", "-", "1", ",", "dropna", "=", "True", ")", ":", "def", "factorize", "(", "index", ")", ":", "if", "index", ".", "is_unique", ":", "return", "index", ",", "np", ".", "arange", "(", "len", "(", "index", ")", ")", "codes", ",", "categories", "=", "_factorize_from_iterable", "(", "index", ")", "return", "categories", ",", "codes", "N", ",", "K", "=", "frame", ".", "shape", "# Will also convert negative level numbers and check if out of bounds.", "level_num", "=", "frame", ".", "columns", ".", "_get_level_number", "(", "level", ")", "if", "isinstance", "(", "frame", ".", "columns", ",", "MultiIndex", ")", ":", "return", "_stack_multi_columns", "(", "frame", ",", "level_num", "=", "level_num", ",", "dropna", "=", "dropna", ")", "elif", "isinstance", "(", "frame", ".", "index", ",", "MultiIndex", ")", ":", "new_levels", "=", "list", "(", "frame", ".", "index", ".", "levels", ")", "new_codes", "=", "[", "lab", ".", "repeat", "(", "K", ")", "for", "lab", "in", "frame", ".", "index", ".", "codes", "]", "clev", ",", "clab", "=", "factorize", "(", "frame", ".", "columns", ")", "new_levels", ".", "append", "(", "clev", ")", "new_codes", ".", "append", "(", "np", ".", "tile", "(", "clab", ",", "N", ")", ".", "ravel", "(", ")", ")", "new_names", "=", "list", "(", "frame", ".", "index", ".", "names", ")", "new_names", ".", "append", "(", "frame", ".", "columns", ".", "name", ")", "new_index", "=", "MultiIndex", "(", "levels", "=", "new_levels", ",", "codes", "=", "new_codes", ",", "names", "=", "new_names", ",", "verify_integrity", "=", "False", ")", "else", ":", "levels", ",", "(", "ilab", ",", "clab", ")", "=", "zip", "(", "*", "map", "(", "factorize", ",", "(", "frame", ".", "index", ",", "frame", ".", "columns", ")", ")", ")", "codes", "=", "ilab", ".", "repeat", "(", "K", ")", ",", "np", ".", "tile", "(", "clab", ",", "N", ")", ".", "ravel", "(", ")", "new_index", "=", "MultiIndex", "(", "levels", "=", "levels", ",", "codes", "=", "codes", ",", "names", "=", "[", "frame", ".", "index", ".", "name", ",", "frame", ".", "columns", ".", "name", "]", ",", "verify_integrity", "=", "False", ")", "if", "frame", ".", "_is_homogeneous_type", ":", "# For homogeneous EAs, frame.values will coerce to object. So", "# we concatenate instead.", "dtypes", "=", "list", "(", "frame", ".", "dtypes", ".", "values", ")", "dtype", "=", "dtypes", "[", "0", "]", "if", "is_extension_array_dtype", "(", "dtype", ")", ":", "arr", "=", "dtype", ".", "construct_array_type", "(", ")", "new_values", "=", "arr", ".", "_concat_same_type", "(", "[", "col", ".", "_values", "for", "_", ",", "col", "in", "frame", ".", "iteritems", "(", ")", "]", ")", "new_values", "=", "_reorder_for_extension_array_stack", "(", "new_values", ",", "N", ",", "K", ")", "else", ":", "# homogeneous, non-EA", "new_values", "=", "frame", ".", "values", ".", "ravel", "(", ")", "else", ":", "# non-homogeneous", "new_values", "=", "frame", ".", "values", ".", "ravel", "(", ")", "if", "dropna", ":", "mask", "=", "notna", "(", "new_values", ")", "new_values", "=", "new_values", "[", "mask", "]", "new_index", "=", "new_index", "[", "mask", "]", "return", "frame", ".", "_constructor_sliced", "(", "new_values", ",", "index", "=", "new_index", ")" ]
Convert DataFrame to Series with multi-level Index. Columns become the second level of the resulting hierarchical index Returns ------- stacked : Series
[ "Convert", "DataFrame", "to", "Series", "with", "multi", "-", "level", "Index", ".", "Columns", "become", "the", "second", "level", "of", "the", "resulting", "hierarchical", "index" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/reshape/reshape.py#L457-L524
19,586
pandas-dev/pandas
pandas/core/reshape/reshape.py
make_axis_dummies
def make_axis_dummies(frame, axis='minor', transform=None): """ Construct 1-0 dummy variables corresponding to designated axis labels Parameters ---------- frame : DataFrame axis : {'major', 'minor'}, default 'minor' transform : function, default None Function to apply to axis labels first. For example, to get "day of week" dummies in a time series regression you might call:: make_axis_dummies(panel, axis='major', transform=lambda d: d.weekday()) Returns ------- dummies : DataFrame Column names taken from chosen axis """ numbers = {'major': 0, 'minor': 1} num = numbers.get(axis, axis) items = frame.index.levels[num] codes = frame.index.codes[num] if transform is not None: mapped_items = items.map(transform) codes, items = _factorize_from_iterable(mapped_items.take(codes)) values = np.eye(len(items), dtype=float) values = values.take(codes, axis=0) return DataFrame(values, columns=items, index=frame.index)
python
def make_axis_dummies(frame, axis='minor', transform=None): """ Construct 1-0 dummy variables corresponding to designated axis labels Parameters ---------- frame : DataFrame axis : {'major', 'minor'}, default 'minor' transform : function, default None Function to apply to axis labels first. For example, to get "day of week" dummies in a time series regression you might call:: make_axis_dummies(panel, axis='major', transform=lambda d: d.weekday()) Returns ------- dummies : DataFrame Column names taken from chosen axis """ numbers = {'major': 0, 'minor': 1} num = numbers.get(axis, axis) items = frame.index.levels[num] codes = frame.index.codes[num] if transform is not None: mapped_items = items.map(transform) codes, items = _factorize_from_iterable(mapped_items.take(codes)) values = np.eye(len(items), dtype=float) values = values.take(codes, axis=0) return DataFrame(values, columns=items, index=frame.index)
[ "def", "make_axis_dummies", "(", "frame", ",", "axis", "=", "'minor'", ",", "transform", "=", "None", ")", ":", "numbers", "=", "{", "'major'", ":", "0", ",", "'minor'", ":", "1", "}", "num", "=", "numbers", ".", "get", "(", "axis", ",", "axis", ")", "items", "=", "frame", ".", "index", ".", "levels", "[", "num", "]", "codes", "=", "frame", ".", "index", ".", "codes", "[", "num", "]", "if", "transform", "is", "not", "None", ":", "mapped_items", "=", "items", ".", "map", "(", "transform", ")", "codes", ",", "items", "=", "_factorize_from_iterable", "(", "mapped_items", ".", "take", "(", "codes", ")", ")", "values", "=", "np", ".", "eye", "(", "len", "(", "items", ")", ",", "dtype", "=", "float", ")", "values", "=", "values", ".", "take", "(", "codes", ",", "axis", "=", "0", ")", "return", "DataFrame", "(", "values", ",", "columns", "=", "items", ",", "index", "=", "frame", ".", "index", ")" ]
Construct 1-0 dummy variables corresponding to designated axis labels Parameters ---------- frame : DataFrame axis : {'major', 'minor'}, default 'minor' transform : function, default None Function to apply to axis labels first. For example, to get "day of week" dummies in a time series regression you might call:: make_axis_dummies(panel, axis='major', transform=lambda d: d.weekday()) Returns ------- dummies : DataFrame Column names taken from chosen axis
[ "Construct", "1", "-", "0", "dummy", "variables", "corresponding", "to", "designated", "axis", "labels" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/reshape/reshape.py#L970-L1003
19,587
pandas-dev/pandas
pandas/core/reshape/reshape.py
_reorder_for_extension_array_stack
def _reorder_for_extension_array_stack(arr, n_rows, n_columns): """ Re-orders the values when stacking multiple extension-arrays. The indirect stacking method used for EAs requires a followup take to get the order correct. Parameters ---------- arr : ExtensionArray n_rows, n_columns : int The number of rows and columns in the original DataFrame. Returns ------- taken : ExtensionArray The original `arr` with elements re-ordered appropriately Examples -------- >>> arr = np.array(['a', 'b', 'c', 'd', 'e', 'f']) >>> _reorder_for_extension_array_stack(arr, 2, 3) array(['a', 'c', 'e', 'b', 'd', 'f'], dtype='<U1') >>> _reorder_for_extension_array_stack(arr, 3, 2) array(['a', 'd', 'b', 'e', 'c', 'f'], dtype='<U1') """ # final take to get the order correct. # idx is an indexer like # [c0r0, c1r0, c2r0, ..., # c0r1, c1r1, c2r1, ...] idx = np.arange(n_rows * n_columns).reshape(n_columns, n_rows).T.ravel() return arr.take(idx)
python
def _reorder_for_extension_array_stack(arr, n_rows, n_columns): """ Re-orders the values when stacking multiple extension-arrays. The indirect stacking method used for EAs requires a followup take to get the order correct. Parameters ---------- arr : ExtensionArray n_rows, n_columns : int The number of rows and columns in the original DataFrame. Returns ------- taken : ExtensionArray The original `arr` with elements re-ordered appropriately Examples -------- >>> arr = np.array(['a', 'b', 'c', 'd', 'e', 'f']) >>> _reorder_for_extension_array_stack(arr, 2, 3) array(['a', 'c', 'e', 'b', 'd', 'f'], dtype='<U1') >>> _reorder_for_extension_array_stack(arr, 3, 2) array(['a', 'd', 'b', 'e', 'c', 'f'], dtype='<U1') """ # final take to get the order correct. # idx is an indexer like # [c0r0, c1r0, c2r0, ..., # c0r1, c1r1, c2r1, ...] idx = np.arange(n_rows * n_columns).reshape(n_columns, n_rows).T.ravel() return arr.take(idx)
[ "def", "_reorder_for_extension_array_stack", "(", "arr", ",", "n_rows", ",", "n_columns", ")", ":", "# final take to get the order correct.", "# idx is an indexer like", "# [c0r0, c1r0, c2r0, ...,", "# c0r1, c1r1, c2r1, ...]", "idx", "=", "np", ".", "arange", "(", "n_rows", "*", "n_columns", ")", ".", "reshape", "(", "n_columns", ",", "n_rows", ")", ".", "T", ".", "ravel", "(", ")", "return", "arr", ".", "take", "(", "idx", ")" ]
Re-orders the values when stacking multiple extension-arrays. The indirect stacking method used for EAs requires a followup take to get the order correct. Parameters ---------- arr : ExtensionArray n_rows, n_columns : int The number of rows and columns in the original DataFrame. Returns ------- taken : ExtensionArray The original `arr` with elements re-ordered appropriately Examples -------- >>> arr = np.array(['a', 'b', 'c', 'd', 'e', 'f']) >>> _reorder_for_extension_array_stack(arr, 2, 3) array(['a', 'c', 'e', 'b', 'd', 'f'], dtype='<U1') >>> _reorder_for_extension_array_stack(arr, 3, 2) array(['a', 'd', 'b', 'e', 'c', 'f'], dtype='<U1')
[ "Re", "-", "orders", "the", "values", "when", "stacking", "multiple", "extension", "-", "arrays", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/reshape/reshape.py#L1006-L1038
19,588
pandas-dev/pandas
pandas/io/sas/sas_xport.py
_parse_float_vec
def _parse_float_vec(vec): """ Parse a vector of float values representing IBM 8 byte floats into native 8 byte floats. """ dtype = np.dtype('>u4,>u4') vec1 = vec.view(dtype=dtype) xport1 = vec1['f0'] xport2 = vec1['f1'] # Start by setting first half of ieee number to first half of IBM # number sans exponent ieee1 = xport1 & 0x00ffffff # The fraction bit to the left of the binary point in the ieee # format was set and the number was shifted 0, 1, 2, or 3 # places. This will tell us how to adjust the ibm exponent to be a # power of 2 ieee exponent and how to shift the fraction bits to # restore the correct magnitude. shift = np.zeros(len(vec), dtype=np.uint8) shift[np.where(xport1 & 0x00200000)] = 1 shift[np.where(xport1 & 0x00400000)] = 2 shift[np.where(xport1 & 0x00800000)] = 3 # shift the ieee number down the correct number of places then # set the second half of the ieee number to be the second half # of the ibm number shifted appropriately, ored with the bits # from the first half that would have been shifted in if we # could shift a double. All we are worried about are the low # order 3 bits of the first half since we're only shifting by # 1, 2, or 3. ieee1 >>= shift ieee2 = (xport2 >> shift) | ((xport1 & 0x00000007) << (29 + (3 - shift))) # clear the 1 bit to the left of the binary point ieee1 &= 0xffefffff # set the exponent of the ieee number to be the actual exponent # plus the shift count + 1023. Or this into the first half of the # ieee number. The ibm exponent is excess 64 but is adjusted by 65 # since during conversion to ibm format the exponent is # incremented by 1 and the fraction bits left 4 positions to the # right of the radix point. (had to add >> 24 because C treats & # 0x7f as 0x7f000000 and Python doesn't) ieee1 |= ((((((xport1 >> 24) & 0x7f) - 65) << 2) + shift + 1023) << 20) | (xport1 & 0x80000000) ieee = np.empty((len(ieee1),), dtype='>u4,>u4') ieee['f0'] = ieee1 ieee['f1'] = ieee2 ieee = ieee.view(dtype='>f8') ieee = ieee.astype('f8') return ieee
python
def _parse_float_vec(vec): """ Parse a vector of float values representing IBM 8 byte floats into native 8 byte floats. """ dtype = np.dtype('>u4,>u4') vec1 = vec.view(dtype=dtype) xport1 = vec1['f0'] xport2 = vec1['f1'] # Start by setting first half of ieee number to first half of IBM # number sans exponent ieee1 = xport1 & 0x00ffffff # The fraction bit to the left of the binary point in the ieee # format was set and the number was shifted 0, 1, 2, or 3 # places. This will tell us how to adjust the ibm exponent to be a # power of 2 ieee exponent and how to shift the fraction bits to # restore the correct magnitude. shift = np.zeros(len(vec), dtype=np.uint8) shift[np.where(xport1 & 0x00200000)] = 1 shift[np.where(xport1 & 0x00400000)] = 2 shift[np.where(xport1 & 0x00800000)] = 3 # shift the ieee number down the correct number of places then # set the second half of the ieee number to be the second half # of the ibm number shifted appropriately, ored with the bits # from the first half that would have been shifted in if we # could shift a double. All we are worried about are the low # order 3 bits of the first half since we're only shifting by # 1, 2, or 3. ieee1 >>= shift ieee2 = (xport2 >> shift) | ((xport1 & 0x00000007) << (29 + (3 - shift))) # clear the 1 bit to the left of the binary point ieee1 &= 0xffefffff # set the exponent of the ieee number to be the actual exponent # plus the shift count + 1023. Or this into the first half of the # ieee number. The ibm exponent is excess 64 but is adjusted by 65 # since during conversion to ibm format the exponent is # incremented by 1 and the fraction bits left 4 positions to the # right of the radix point. (had to add >> 24 because C treats & # 0x7f as 0x7f000000 and Python doesn't) ieee1 |= ((((((xport1 >> 24) & 0x7f) - 65) << 2) + shift + 1023) << 20) | (xport1 & 0x80000000) ieee = np.empty((len(ieee1),), dtype='>u4,>u4') ieee['f0'] = ieee1 ieee['f1'] = ieee2 ieee = ieee.view(dtype='>f8') ieee = ieee.astype('f8') return ieee
[ "def", "_parse_float_vec", "(", "vec", ")", ":", "dtype", "=", "np", ".", "dtype", "(", "'>u4,>u4'", ")", "vec1", "=", "vec", ".", "view", "(", "dtype", "=", "dtype", ")", "xport1", "=", "vec1", "[", "'f0'", "]", "xport2", "=", "vec1", "[", "'f1'", "]", "# Start by setting first half of ieee number to first half of IBM", "# number sans exponent", "ieee1", "=", "xport1", "&", "0x00ffffff", "# The fraction bit to the left of the binary point in the ieee", "# format was set and the number was shifted 0, 1, 2, or 3", "# places. This will tell us how to adjust the ibm exponent to be a", "# power of 2 ieee exponent and how to shift the fraction bits to", "# restore the correct magnitude.", "shift", "=", "np", ".", "zeros", "(", "len", "(", "vec", ")", ",", "dtype", "=", "np", ".", "uint8", ")", "shift", "[", "np", ".", "where", "(", "xport1", "&", "0x00200000", ")", "]", "=", "1", "shift", "[", "np", ".", "where", "(", "xport1", "&", "0x00400000", ")", "]", "=", "2", "shift", "[", "np", ".", "where", "(", "xport1", "&", "0x00800000", ")", "]", "=", "3", "# shift the ieee number down the correct number of places then", "# set the second half of the ieee number to be the second half", "# of the ibm number shifted appropriately, ored with the bits", "# from the first half that would have been shifted in if we", "# could shift a double. All we are worried about are the low", "# order 3 bits of the first half since we're only shifting by", "# 1, 2, or 3.", "ieee1", ">>=", "shift", "ieee2", "=", "(", "xport2", ">>", "shift", ")", "|", "(", "(", "xport1", "&", "0x00000007", ")", "<<", "(", "29", "+", "(", "3", "-", "shift", ")", ")", ")", "# clear the 1 bit to the left of the binary point", "ieee1", "&=", "0xffefffff", "# set the exponent of the ieee number to be the actual exponent", "# plus the shift count + 1023. Or this into the first half of the", "# ieee number. The ibm exponent is excess 64 but is adjusted by 65", "# since during conversion to ibm format the exponent is", "# incremented by 1 and the fraction bits left 4 positions to the", "# right of the radix point. (had to add >> 24 because C treats &", "# 0x7f as 0x7f000000 and Python doesn't)", "ieee1", "|=", "(", "(", "(", "(", "(", "(", "xport1", ">>", "24", ")", "&", "0x7f", ")", "-", "65", ")", "<<", "2", ")", "+", "shift", "+", "1023", ")", "<<", "20", ")", "|", "(", "xport1", "&", "0x80000000", ")", "ieee", "=", "np", ".", "empty", "(", "(", "len", "(", "ieee1", ")", ",", ")", ",", "dtype", "=", "'>u4,>u4'", ")", "ieee", "[", "'f0'", "]", "=", "ieee1", "ieee", "[", "'f1'", "]", "=", "ieee2", "ieee", "=", "ieee", ".", "view", "(", "dtype", "=", "'>f8'", ")", "ieee", "=", "ieee", ".", "astype", "(", "'f8'", ")", "return", "ieee" ]
Parse a vector of float values representing IBM 8 byte floats into native 8 byte floats.
[ "Parse", "a", "vector", "of", "float", "values", "representing", "IBM", "8", "byte", "floats", "into", "native", "8", "byte", "floats", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/sas/sas_xport.py#L170-L224
19,589
pandas-dev/pandas
pandas/io/sas/sas_xport.py
XportReader._record_count
def _record_count(self): """ Get number of records in file. This is maybe suboptimal because we have to seek to the end of the file. Side effect: returns file position to record_start. """ self.filepath_or_buffer.seek(0, 2) total_records_length = (self.filepath_or_buffer.tell() - self.record_start) if total_records_length % 80 != 0: warnings.warn("xport file may be corrupted") if self.record_length > 80: self.filepath_or_buffer.seek(self.record_start) return total_records_length // self.record_length self.filepath_or_buffer.seek(-80, 2) last_card = self.filepath_or_buffer.read(80) last_card = np.frombuffer(last_card, dtype=np.uint64) # 8 byte blank ix = np.flatnonzero(last_card == 2314885530818453536) if len(ix) == 0: tail_pad = 0 else: tail_pad = 8 * len(ix) self.filepath_or_buffer.seek(self.record_start) return (total_records_length - tail_pad) // self.record_length
python
def _record_count(self): """ Get number of records in file. This is maybe suboptimal because we have to seek to the end of the file. Side effect: returns file position to record_start. """ self.filepath_or_buffer.seek(0, 2) total_records_length = (self.filepath_or_buffer.tell() - self.record_start) if total_records_length % 80 != 0: warnings.warn("xport file may be corrupted") if self.record_length > 80: self.filepath_or_buffer.seek(self.record_start) return total_records_length // self.record_length self.filepath_or_buffer.seek(-80, 2) last_card = self.filepath_or_buffer.read(80) last_card = np.frombuffer(last_card, dtype=np.uint64) # 8 byte blank ix = np.flatnonzero(last_card == 2314885530818453536) if len(ix) == 0: tail_pad = 0 else: tail_pad = 8 * len(ix) self.filepath_or_buffer.seek(self.record_start) return (total_records_length - tail_pad) // self.record_length
[ "def", "_record_count", "(", "self", ")", ":", "self", ".", "filepath_or_buffer", ".", "seek", "(", "0", ",", "2", ")", "total_records_length", "=", "(", "self", ".", "filepath_or_buffer", ".", "tell", "(", ")", "-", "self", ".", "record_start", ")", "if", "total_records_length", "%", "80", "!=", "0", ":", "warnings", ".", "warn", "(", "\"xport file may be corrupted\"", ")", "if", "self", ".", "record_length", ">", "80", ":", "self", ".", "filepath_or_buffer", ".", "seek", "(", "self", ".", "record_start", ")", "return", "total_records_length", "//", "self", ".", "record_length", "self", ".", "filepath_or_buffer", ".", "seek", "(", "-", "80", ",", "2", ")", "last_card", "=", "self", ".", "filepath_or_buffer", ".", "read", "(", "80", ")", "last_card", "=", "np", ".", "frombuffer", "(", "last_card", ",", "dtype", "=", "np", ".", "uint64", ")", "# 8 byte blank", "ix", "=", "np", ".", "flatnonzero", "(", "last_card", "==", "2314885530818453536", ")", "if", "len", "(", "ix", ")", "==", "0", ":", "tail_pad", "=", "0", "else", ":", "tail_pad", "=", "8", "*", "len", "(", "ix", ")", "self", ".", "filepath_or_buffer", ".", "seek", "(", "self", ".", "record_start", ")", "return", "(", "total_records_length", "-", "tail_pad", ")", "//", "self", ".", "record_length" ]
Get number of records in file. This is maybe suboptimal because we have to seek to the end of the file. Side effect: returns file position to record_start.
[ "Get", "number", "of", "records", "in", "file", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/sas/sas_xport.py#L364-L399
19,590
pandas-dev/pandas
pandas/io/sas/sas_xport.py
XportReader.get_chunk
def get_chunk(self, size=None): """ Reads lines from Xport file and returns as dataframe Parameters ---------- size : int, defaults to None Number of lines to read. If None, reads whole file. Returns ------- DataFrame """ if size is None: size = self._chunksize return self.read(nrows=size)
python
def get_chunk(self, size=None): """ Reads lines from Xport file and returns as dataframe Parameters ---------- size : int, defaults to None Number of lines to read. If None, reads whole file. Returns ------- DataFrame """ if size is None: size = self._chunksize return self.read(nrows=size)
[ "def", "get_chunk", "(", "self", ",", "size", "=", "None", ")", ":", "if", "size", "is", "None", ":", "size", "=", "self", ".", "_chunksize", "return", "self", ".", "read", "(", "nrows", "=", "size", ")" ]
Reads lines from Xport file and returns as dataframe Parameters ---------- size : int, defaults to None Number of lines to read. If None, reads whole file. Returns ------- DataFrame
[ "Reads", "lines", "from", "Xport", "file", "and", "returns", "as", "dataframe" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/sas/sas_xport.py#L401-L416
19,591
pandas-dev/pandas
pandas/core/internals/managers.py
construction_error
def construction_error(tot_items, block_shape, axes, e=None): """ raise a helpful message about our construction """ passed = tuple(map(int, [tot_items] + list(block_shape))) # Correcting the user facing error message during dataframe construction if len(passed) <= 2: passed = passed[::-1] implied = tuple(len(ax) for ax in axes) # Correcting the user facing error message during dataframe construction if len(implied) <= 2: implied = implied[::-1] if passed == implied and e is not None: raise e if block_shape[0] == 0: raise ValueError("Empty data passed with indices specified.") raise ValueError("Shape of passed values is {0}, indices imply {1}".format( passed, implied))
python
def construction_error(tot_items, block_shape, axes, e=None): """ raise a helpful message about our construction """ passed = tuple(map(int, [tot_items] + list(block_shape))) # Correcting the user facing error message during dataframe construction if len(passed) <= 2: passed = passed[::-1] implied = tuple(len(ax) for ax in axes) # Correcting the user facing error message during dataframe construction if len(implied) <= 2: implied = implied[::-1] if passed == implied and e is not None: raise e if block_shape[0] == 0: raise ValueError("Empty data passed with indices specified.") raise ValueError("Shape of passed values is {0}, indices imply {1}".format( passed, implied))
[ "def", "construction_error", "(", "tot_items", ",", "block_shape", ",", "axes", ",", "e", "=", "None", ")", ":", "passed", "=", "tuple", "(", "map", "(", "int", ",", "[", "tot_items", "]", "+", "list", "(", "block_shape", ")", ")", ")", "# Correcting the user facing error message during dataframe construction", "if", "len", "(", "passed", ")", "<=", "2", ":", "passed", "=", "passed", "[", ":", ":", "-", "1", "]", "implied", "=", "tuple", "(", "len", "(", "ax", ")", "for", "ax", "in", "axes", ")", "# Correcting the user facing error message during dataframe construction", "if", "len", "(", "implied", ")", "<=", "2", ":", "implied", "=", "implied", "[", ":", ":", "-", "1", "]", "if", "passed", "==", "implied", "and", "e", "is", "not", "None", ":", "raise", "e", "if", "block_shape", "[", "0", "]", "==", "0", ":", "raise", "ValueError", "(", "\"Empty data passed with indices specified.\"", ")", "raise", "ValueError", "(", "\"Shape of passed values is {0}, indices imply {1}\"", ".", "format", "(", "passed", ",", "implied", ")", ")" ]
raise a helpful message about our construction
[ "raise", "a", "helpful", "message", "about", "our", "construction" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/managers.py#L1670-L1687
19,592
pandas-dev/pandas
pandas/core/internals/managers.py
_simple_blockify
def _simple_blockify(tuples, dtype): """ return a single array of a block that has a single dtype; if dtype is not None, coerce to this dtype """ values, placement = _stack_arrays(tuples, dtype) # CHECK DTYPE? if dtype is not None and values.dtype != dtype: # pragma: no cover values = values.astype(dtype) block = make_block(values, placement=placement) return [block]
python
def _simple_blockify(tuples, dtype): """ return a single array of a block that has a single dtype; if dtype is not None, coerce to this dtype """ values, placement = _stack_arrays(tuples, dtype) # CHECK DTYPE? if dtype is not None and values.dtype != dtype: # pragma: no cover values = values.astype(dtype) block = make_block(values, placement=placement) return [block]
[ "def", "_simple_blockify", "(", "tuples", ",", "dtype", ")", ":", "values", ",", "placement", "=", "_stack_arrays", "(", "tuples", ",", "dtype", ")", "# CHECK DTYPE?", "if", "dtype", "is", "not", "None", "and", "values", ".", "dtype", "!=", "dtype", ":", "# pragma: no cover", "values", "=", "values", ".", "astype", "(", "dtype", ")", "block", "=", "make_block", "(", "values", ",", "placement", "=", "placement", ")", "return", "[", "block", "]" ]
return a single array of a block that has a single dtype; if dtype is not None, coerce to this dtype
[ "return", "a", "single", "array", "of", "a", "block", "that", "has", "a", "single", "dtype", ";", "if", "dtype", "is", "not", "None", "coerce", "to", "this", "dtype" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/managers.py#L1792-L1803
19,593
pandas-dev/pandas
pandas/core/internals/managers.py
_multi_blockify
def _multi_blockify(tuples, dtype=None): """ return an array of blocks that potentially have different dtypes """ # group by dtype grouper = itertools.groupby(tuples, lambda x: x[2].dtype) new_blocks = [] for dtype, tup_block in grouper: values, placement = _stack_arrays(list(tup_block), dtype) block = make_block(values, placement=placement) new_blocks.append(block) return new_blocks
python
def _multi_blockify(tuples, dtype=None): """ return an array of blocks that potentially have different dtypes """ # group by dtype grouper = itertools.groupby(tuples, lambda x: x[2].dtype) new_blocks = [] for dtype, tup_block in grouper: values, placement = _stack_arrays(list(tup_block), dtype) block = make_block(values, placement=placement) new_blocks.append(block) return new_blocks
[ "def", "_multi_blockify", "(", "tuples", ",", "dtype", "=", "None", ")", ":", "# group by dtype", "grouper", "=", "itertools", ".", "groupby", "(", "tuples", ",", "lambda", "x", ":", "x", "[", "2", "]", ".", "dtype", ")", "new_blocks", "=", "[", "]", "for", "dtype", ",", "tup_block", "in", "grouper", ":", "values", ",", "placement", "=", "_stack_arrays", "(", "list", "(", "tup_block", ")", ",", "dtype", ")", "block", "=", "make_block", "(", "values", ",", "placement", "=", "placement", ")", "new_blocks", ".", "append", "(", "block", ")", "return", "new_blocks" ]
return an array of blocks that potentially have different dtypes
[ "return", "an", "array", "of", "blocks", "that", "potentially", "have", "different", "dtypes" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/managers.py#L1806-L1820
19,594
pandas-dev/pandas
pandas/core/internals/managers.py
_interleaved_dtype
def _interleaved_dtype( blocks: List[Block] ) -> Optional[Union[np.dtype, ExtensionDtype]]: """Find the common dtype for `blocks`. Parameters ---------- blocks : List[Block] Returns ------- dtype : Optional[Union[np.dtype, ExtensionDtype]] None is returned when `blocks` is empty. """ if not len(blocks): return None return find_common_type([b.dtype for b in blocks])
python
def _interleaved_dtype( blocks: List[Block] ) -> Optional[Union[np.dtype, ExtensionDtype]]: """Find the common dtype for `blocks`. Parameters ---------- blocks : List[Block] Returns ------- dtype : Optional[Union[np.dtype, ExtensionDtype]] None is returned when `blocks` is empty. """ if not len(blocks): return None return find_common_type([b.dtype for b in blocks])
[ "def", "_interleaved_dtype", "(", "blocks", ":", "List", "[", "Block", "]", ")", "->", "Optional", "[", "Union", "[", "np", ".", "dtype", ",", "ExtensionDtype", "]", "]", ":", "if", "not", "len", "(", "blocks", ")", ":", "return", "None", "return", "find_common_type", "(", "[", "b", ".", "dtype", "for", "b", "in", "blocks", "]", ")" ]
Find the common dtype for `blocks`. Parameters ---------- blocks : List[Block] Returns ------- dtype : Optional[Union[np.dtype, ExtensionDtype]] None is returned when `blocks` is empty.
[ "Find", "the", "common", "dtype", "for", "blocks", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/managers.py#L1864-L1881
19,595
pandas-dev/pandas
pandas/core/internals/managers.py
_consolidate
def _consolidate(blocks): """ Merge blocks having same dtype, exclude non-consolidating blocks """ # sort by _can_consolidate, dtype gkey = lambda x: x._consolidate_key grouper = itertools.groupby(sorted(blocks, key=gkey), gkey) new_blocks = [] for (_can_consolidate, dtype), group_blocks in grouper: merged_blocks = _merge_blocks(list(group_blocks), dtype=dtype, _can_consolidate=_can_consolidate) new_blocks = _extend_blocks(merged_blocks, new_blocks) return new_blocks
python
def _consolidate(blocks): """ Merge blocks having same dtype, exclude non-consolidating blocks """ # sort by _can_consolidate, dtype gkey = lambda x: x._consolidate_key grouper = itertools.groupby(sorted(blocks, key=gkey), gkey) new_blocks = [] for (_can_consolidate, dtype), group_blocks in grouper: merged_blocks = _merge_blocks(list(group_blocks), dtype=dtype, _can_consolidate=_can_consolidate) new_blocks = _extend_blocks(merged_blocks, new_blocks) return new_blocks
[ "def", "_consolidate", "(", "blocks", ")", ":", "# sort by _can_consolidate, dtype", "gkey", "=", "lambda", "x", ":", "x", ".", "_consolidate_key", "grouper", "=", "itertools", ".", "groupby", "(", "sorted", "(", "blocks", ",", "key", "=", "gkey", ")", ",", "gkey", ")", "new_blocks", "=", "[", "]", "for", "(", "_can_consolidate", ",", "dtype", ")", ",", "group_blocks", "in", "grouper", ":", "merged_blocks", "=", "_merge_blocks", "(", "list", "(", "group_blocks", ")", ",", "dtype", "=", "dtype", ",", "_can_consolidate", "=", "_can_consolidate", ")", "new_blocks", "=", "_extend_blocks", "(", "merged_blocks", ",", "new_blocks", ")", "return", "new_blocks" ]
Merge blocks having same dtype, exclude non-consolidating blocks
[ "Merge", "blocks", "having", "same", "dtype", "exclude", "non", "-", "consolidating", "blocks" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/managers.py#L1884-L1898
19,596
pandas-dev/pandas
pandas/core/internals/managers.py
_compare_or_regex_search
def _compare_or_regex_search(a, b, regex=False): """ Compare two array_like inputs of the same shape or two scalar values Calls operator.eq or re.search, depending on regex argument. If regex is True, perform an element-wise regex matching. Parameters ---------- a : array_like or scalar b : array_like or scalar regex : bool, default False Returns ------- mask : array_like of bool """ if not regex: op = lambda x: operator.eq(x, b) else: op = np.vectorize(lambda x: bool(re.search(b, x)) if isinstance(x, str) else False) is_a_array = isinstance(a, np.ndarray) is_b_array = isinstance(b, np.ndarray) # numpy deprecation warning to have i8 vs integer comparisons if is_datetimelike_v_numeric(a, b): result = False # numpy deprecation warning if comparing numeric vs string-like elif is_numeric_v_string_like(a, b): result = False else: result = op(a) if is_scalar(result) and (is_a_array or is_b_array): type_names = [type(a).__name__, type(b).__name__] if is_a_array: type_names[0] = 'ndarray(dtype={dtype})'.format(dtype=a.dtype) if is_b_array: type_names[1] = 'ndarray(dtype={dtype})'.format(dtype=b.dtype) raise TypeError( "Cannot compare types {a!r} and {b!r}".format(a=type_names[0], b=type_names[1])) return result
python
def _compare_or_regex_search(a, b, regex=False): """ Compare two array_like inputs of the same shape or two scalar values Calls operator.eq or re.search, depending on regex argument. If regex is True, perform an element-wise regex matching. Parameters ---------- a : array_like or scalar b : array_like or scalar regex : bool, default False Returns ------- mask : array_like of bool """ if not regex: op = lambda x: operator.eq(x, b) else: op = np.vectorize(lambda x: bool(re.search(b, x)) if isinstance(x, str) else False) is_a_array = isinstance(a, np.ndarray) is_b_array = isinstance(b, np.ndarray) # numpy deprecation warning to have i8 vs integer comparisons if is_datetimelike_v_numeric(a, b): result = False # numpy deprecation warning if comparing numeric vs string-like elif is_numeric_v_string_like(a, b): result = False else: result = op(a) if is_scalar(result) and (is_a_array or is_b_array): type_names = [type(a).__name__, type(b).__name__] if is_a_array: type_names[0] = 'ndarray(dtype={dtype})'.format(dtype=a.dtype) if is_b_array: type_names[1] = 'ndarray(dtype={dtype})'.format(dtype=b.dtype) raise TypeError( "Cannot compare types {a!r} and {b!r}".format(a=type_names[0], b=type_names[1])) return result
[ "def", "_compare_or_regex_search", "(", "a", ",", "b", ",", "regex", "=", "False", ")", ":", "if", "not", "regex", ":", "op", "=", "lambda", "x", ":", "operator", ".", "eq", "(", "x", ",", "b", ")", "else", ":", "op", "=", "np", ".", "vectorize", "(", "lambda", "x", ":", "bool", "(", "re", ".", "search", "(", "b", ",", "x", ")", ")", "if", "isinstance", "(", "x", ",", "str", ")", "else", "False", ")", "is_a_array", "=", "isinstance", "(", "a", ",", "np", ".", "ndarray", ")", "is_b_array", "=", "isinstance", "(", "b", ",", "np", ".", "ndarray", ")", "# numpy deprecation warning to have i8 vs integer comparisons", "if", "is_datetimelike_v_numeric", "(", "a", ",", "b", ")", ":", "result", "=", "False", "# numpy deprecation warning if comparing numeric vs string-like", "elif", "is_numeric_v_string_like", "(", "a", ",", "b", ")", ":", "result", "=", "False", "else", ":", "result", "=", "op", "(", "a", ")", "if", "is_scalar", "(", "result", ")", "and", "(", "is_a_array", "or", "is_b_array", ")", ":", "type_names", "=", "[", "type", "(", "a", ")", ".", "__name__", ",", "type", "(", "b", ")", ".", "__name__", "]", "if", "is_a_array", ":", "type_names", "[", "0", "]", "=", "'ndarray(dtype={dtype})'", ".", "format", "(", "dtype", "=", "a", ".", "dtype", ")", "if", "is_b_array", ":", "type_names", "[", "1", "]", "=", "'ndarray(dtype={dtype})'", ".", "format", "(", "dtype", "=", "b", ".", "dtype", ")", "raise", "TypeError", "(", "\"Cannot compare types {a!r} and {b!r}\"", ".", "format", "(", "a", "=", "type_names", "[", "0", "]", ",", "b", "=", "type_names", "[", "1", "]", ")", ")", "return", "result" ]
Compare two array_like inputs of the same shape or two scalar values Calls operator.eq or re.search, depending on regex argument. If regex is True, perform an element-wise regex matching. Parameters ---------- a : array_like or scalar b : array_like or scalar regex : bool, default False Returns ------- mask : array_like of bool
[ "Compare", "two", "array_like", "inputs", "of", "the", "same", "shape", "or", "two", "scalar", "values" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/managers.py#L1901-L1949
19,597
pandas-dev/pandas
pandas/core/internals/managers.py
items_overlap_with_suffix
def items_overlap_with_suffix(left, lsuffix, right, rsuffix): """ If two indices overlap, add suffixes to overlapping entries. If corresponding suffix is empty, the entry is simply converted to string. """ to_rename = left.intersection(right) if len(to_rename) == 0: return left, right else: if not lsuffix and not rsuffix: raise ValueError('columns overlap but no suffix specified: ' '{rename}'.format(rename=to_rename)) def renamer(x, suffix): """Rename the left and right indices. If there is overlap, and suffix is not None, add suffix, otherwise, leave it as-is. Parameters ---------- x : original column name suffix : str or None Returns ------- x : renamed column name """ if x in to_rename and suffix is not None: return '{x}{suffix}'.format(x=x, suffix=suffix) return x lrenamer = partial(renamer, suffix=lsuffix) rrenamer = partial(renamer, suffix=rsuffix) return (_transform_index(left, lrenamer), _transform_index(right, rrenamer))
python
def items_overlap_with_suffix(left, lsuffix, right, rsuffix): """ If two indices overlap, add suffixes to overlapping entries. If corresponding suffix is empty, the entry is simply converted to string. """ to_rename = left.intersection(right) if len(to_rename) == 0: return left, right else: if not lsuffix and not rsuffix: raise ValueError('columns overlap but no suffix specified: ' '{rename}'.format(rename=to_rename)) def renamer(x, suffix): """Rename the left and right indices. If there is overlap, and suffix is not None, add suffix, otherwise, leave it as-is. Parameters ---------- x : original column name suffix : str or None Returns ------- x : renamed column name """ if x in to_rename and suffix is not None: return '{x}{suffix}'.format(x=x, suffix=suffix) return x lrenamer = partial(renamer, suffix=lsuffix) rrenamer = partial(renamer, suffix=rsuffix) return (_transform_index(left, lrenamer), _transform_index(right, rrenamer))
[ "def", "items_overlap_with_suffix", "(", "left", ",", "lsuffix", ",", "right", ",", "rsuffix", ")", ":", "to_rename", "=", "left", ".", "intersection", "(", "right", ")", "if", "len", "(", "to_rename", ")", "==", "0", ":", "return", "left", ",", "right", "else", ":", "if", "not", "lsuffix", "and", "not", "rsuffix", ":", "raise", "ValueError", "(", "'columns overlap but no suffix specified: '", "'{rename}'", ".", "format", "(", "rename", "=", "to_rename", ")", ")", "def", "renamer", "(", "x", ",", "suffix", ")", ":", "\"\"\"Rename the left and right indices.\n\n If there is overlap, and suffix is not None, add\n suffix, otherwise, leave it as-is.\n\n Parameters\n ----------\n x : original column name\n suffix : str or None\n\n Returns\n -------\n x : renamed column name\n \"\"\"", "if", "x", "in", "to_rename", "and", "suffix", "is", "not", "None", ":", "return", "'{x}{suffix}'", ".", "format", "(", "x", "=", "x", ",", "suffix", "=", "suffix", ")", "return", "x", "lrenamer", "=", "partial", "(", "renamer", ",", "suffix", "=", "lsuffix", ")", "rrenamer", "=", "partial", "(", "renamer", ",", "suffix", "=", "rsuffix", ")", "return", "(", "_transform_index", "(", "left", ",", "lrenamer", ")", ",", "_transform_index", "(", "right", ",", "rrenamer", ")", ")" ]
If two indices overlap, add suffixes to overlapping entries. If corresponding suffix is empty, the entry is simply converted to string.
[ "If", "two", "indices", "overlap", "add", "suffixes", "to", "overlapping", "entries", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/managers.py#L1956-L1994
19,598
pandas-dev/pandas
pandas/core/internals/managers.py
_transform_index
def _transform_index(index, func, level=None): """ Apply function to all values found in index. This includes transforming multiindex entries separately. Only apply function to one level of the MultiIndex if level is specified. """ if isinstance(index, MultiIndex): if level is not None: items = [tuple(func(y) if i == level else y for i, y in enumerate(x)) for x in index] else: items = [tuple(func(y) for y in x) for x in index] return MultiIndex.from_tuples(items, names=index.names) else: items = [func(x) for x in index] return Index(items, name=index.name, tupleize_cols=False)
python
def _transform_index(index, func, level=None): """ Apply function to all values found in index. This includes transforming multiindex entries separately. Only apply function to one level of the MultiIndex if level is specified. """ if isinstance(index, MultiIndex): if level is not None: items = [tuple(func(y) if i == level else y for i, y in enumerate(x)) for x in index] else: items = [tuple(func(y) for y in x) for x in index] return MultiIndex.from_tuples(items, names=index.names) else: items = [func(x) for x in index] return Index(items, name=index.name, tupleize_cols=False)
[ "def", "_transform_index", "(", "index", ",", "func", ",", "level", "=", "None", ")", ":", "if", "isinstance", "(", "index", ",", "MultiIndex", ")", ":", "if", "level", "is", "not", "None", ":", "items", "=", "[", "tuple", "(", "func", "(", "y", ")", "if", "i", "==", "level", "else", "y", "for", "i", ",", "y", "in", "enumerate", "(", "x", ")", ")", "for", "x", "in", "index", "]", "else", ":", "items", "=", "[", "tuple", "(", "func", "(", "y", ")", "for", "y", "in", "x", ")", "for", "x", "in", "index", "]", "return", "MultiIndex", ".", "from_tuples", "(", "items", ",", "names", "=", "index", ".", "names", ")", "else", ":", "items", "=", "[", "func", "(", "x", ")", "for", "x", "in", "index", "]", "return", "Index", "(", "items", ",", "name", "=", "index", ".", "name", ",", "tupleize_cols", "=", "False", ")" ]
Apply function to all values found in index. This includes transforming multiindex entries separately. Only apply function to one level of the MultiIndex if level is specified.
[ "Apply", "function", "to", "all", "values", "found", "in", "index", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/managers.py#L1997-L2014
19,599
pandas-dev/pandas
pandas/core/internals/managers.py
concatenate_block_managers
def concatenate_block_managers(mgrs_indexers, axes, concat_axis, copy): """ Concatenate block managers into one. Parameters ---------- mgrs_indexers : list of (BlockManager, {axis: indexer,...}) tuples axes : list of Index concat_axis : int copy : bool """ concat_plans = [get_mgr_concatenation_plan(mgr, indexers) for mgr, indexers in mgrs_indexers] concat_plan = combine_concat_plans(concat_plans, concat_axis) blocks = [] for placement, join_units in concat_plan: if len(join_units) == 1 and not join_units[0].indexers: b = join_units[0].block values = b.values if copy: values = values.copy() elif not copy: values = values.view() b = b.make_block_same_class(values, placement=placement) elif is_uniform_join_units(join_units): b = join_units[0].block.concat_same_type( [ju.block for ju in join_units], placement=placement) else: b = make_block( concatenate_join_units(join_units, concat_axis, copy=copy), placement=placement) blocks.append(b) return BlockManager(blocks, axes)
python
def concatenate_block_managers(mgrs_indexers, axes, concat_axis, copy): """ Concatenate block managers into one. Parameters ---------- mgrs_indexers : list of (BlockManager, {axis: indexer,...}) tuples axes : list of Index concat_axis : int copy : bool """ concat_plans = [get_mgr_concatenation_plan(mgr, indexers) for mgr, indexers in mgrs_indexers] concat_plan = combine_concat_plans(concat_plans, concat_axis) blocks = [] for placement, join_units in concat_plan: if len(join_units) == 1 and not join_units[0].indexers: b = join_units[0].block values = b.values if copy: values = values.copy() elif not copy: values = values.view() b = b.make_block_same_class(values, placement=placement) elif is_uniform_join_units(join_units): b = join_units[0].block.concat_same_type( [ju.block for ju in join_units], placement=placement) else: b = make_block( concatenate_join_units(join_units, concat_axis, copy=copy), placement=placement) blocks.append(b) return BlockManager(blocks, axes)
[ "def", "concatenate_block_managers", "(", "mgrs_indexers", ",", "axes", ",", "concat_axis", ",", "copy", ")", ":", "concat_plans", "=", "[", "get_mgr_concatenation_plan", "(", "mgr", ",", "indexers", ")", "for", "mgr", ",", "indexers", "in", "mgrs_indexers", "]", "concat_plan", "=", "combine_concat_plans", "(", "concat_plans", ",", "concat_axis", ")", "blocks", "=", "[", "]", "for", "placement", ",", "join_units", "in", "concat_plan", ":", "if", "len", "(", "join_units", ")", "==", "1", "and", "not", "join_units", "[", "0", "]", ".", "indexers", ":", "b", "=", "join_units", "[", "0", "]", ".", "block", "values", "=", "b", ".", "values", "if", "copy", ":", "values", "=", "values", ".", "copy", "(", ")", "elif", "not", "copy", ":", "values", "=", "values", ".", "view", "(", ")", "b", "=", "b", ".", "make_block_same_class", "(", "values", ",", "placement", "=", "placement", ")", "elif", "is_uniform_join_units", "(", "join_units", ")", ":", "b", "=", "join_units", "[", "0", "]", ".", "block", ".", "concat_same_type", "(", "[", "ju", ".", "block", "for", "ju", "in", "join_units", "]", ",", "placement", "=", "placement", ")", "else", ":", "b", "=", "make_block", "(", "concatenate_join_units", "(", "join_units", ",", "concat_axis", ",", "copy", "=", "copy", ")", ",", "placement", "=", "placement", ")", "blocks", ".", "append", "(", "b", ")", "return", "BlockManager", "(", "blocks", ",", "axes", ")" ]
Concatenate block managers into one. Parameters ---------- mgrs_indexers : list of (BlockManager, {axis: indexer,...}) tuples axes : list of Index concat_axis : int copy : bool
[ "Concatenate", "block", "managers", "into", "one", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/managers.py#L2038-L2074